commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
ea027e70f94d351fade02a3110135e031b9f52c5
|
Update botcmd import.
|
dieHardBot.py
|
dieHardBot.py
|
#!/usr/bin/env python
"""A bot which will respond to various Die Hard character name commands and
mentions and respond with a random line spoken by that character in the film.
"""
from errbot.botplugin import BotPlugin
from errbot.jabberbot import botcmd
from dieHard import DieHard
def generate(character):
f = lambda self, mess, args: "(%s) %s" % (character,
self.diehard.get_random(character))
f.__name__ = character
f.__doc__ = "Get a random quote from %s." % character.title()
return f
class DieHardBotBuilder(type):
def __new__(mcs, name, bases, classDict):
newClassDict = dict(classDict.items() +
[(character, botcmd(generate(character)))
for character in DieHard.CHARACTERS])
return super(DieHardBotBuilder, mcs).__new__(mcs, name, bases,
newClassDict)
class DieHardBot(BotPlugin):
__metaclass__ = DieHardBotBuilder
def __init__(self):
super(BotPlugin, self).__init__()
self.diehard = DieHard()
def callback_message(self, conn, mess):
"""Listen for Die Hard mentions and interject random lines from those
characters who were mentioned.
"""
message = ""
for character in DieHard.CHARACTERS:
if mess.getBody().find("(%s)" % character) != -1:
message = "(%s) %s" % (character,
self.diehard.get_random(character))
break
if message:
self.send(mess.getFrom(), message, message_type=mess.getType())
|
Python
| 0
|
@@ -231,18 +231,8 @@
rbot
-.jabberbot
imp
|
ca90b06be21728f83a04cd9425e11e9fa0f4d684
|
Remove reference to strong, since its not used
|
dirty_bits.py
|
dirty_bits.py
|
from django.db.models import get_models, ManyToManyField
from django.db.models.signals import post_init, post_save
from threading import Lock
hash_fnc = hash
REGISTRY_LOCK = Lock()
REGISTRY = set()
NEW_MODEL_HASH = None
def register_all(strict=False):
models = get_models()
for model in models:
register(model, strict)
def register(cls, strict=False):
with REGISTRY_LOCK:
if cls in REGISTRY:
return
cls.__strict_dirty_checking = strict
REGISTRY.add(cls)
def _init_hash(sender, instance):
if sender in REGISTRY:
instance.__dirty_hash, instance.__old_values = cls._get_hash(
instance,
instance.__strict_dirty_checking
)
else:
instance.__dirty_hash, instance.__old_values = NEW_MODEL_HASH, None
def _convert_value(field, instance):
# Ignoring many to many since django calls save
# Trying to track this relationship causes performance issues
if isinstance(field, ManyToManyField):
return None
else:
return field.value_to_string(instance)
def _get_hash(instance, strong=False):
model_key_values = tuple(
(
(field.name, _convert_value(field, instance)) for field in
(instance._meta.fields + instance._meta.many_to_many)
)
)
if not instance.pk:
return NEW_MODEL_HASH, None
return hash_fnc(model_key_values), model_key_values
def __compute_hash(model_key_values):
return hash(model_key_values)
def is_dirty(self):
if self.__dirty_hash == NEW_MODEL_HASH:
# initial state of a model is dirty
return True
new_hash, new_values = cls._get_hash(self)
if not self.__strict_dirty_checking:
return new_hash != self.__dirty_hash
else:
# if the hashes are equal and the tuples are equal then its not dirty
return new_hash != self.__dirty_hash or self.__old_values != new_values
cls._init_hash = _init_hash
cls._get_hash = _get_hash
cls.is_dirty = is_dirty
def _post_init(sender, instance, **kwargs):
_init_hash(sender, instance)
def _post_save(sender, instance, **kwargs):
_init_hash(sender, instance)
post_save.connect(_post_save, sender=cls, weak=False)
post_init.connect(_post_init, sender=cls, weak=False)
|
Python
| 0.000003
|
@@ -1173,22 +1173,8 @@
ance
-, strong=False
):%0A
|
7b7ff6e8cea6c0811310efb5407eeffe49044992
|
clean up last_in_suite jobs
|
teuthology/queue.py
|
teuthology/queue.py
|
import argparse
import logging
import os
import subprocess
import sys
import tempfile
import yaml
import beanstalkc
from teuthology import safepath
log = logging.getLogger(__name__)
def connect(ctx):
host = ctx.teuthology_config['queue_host']
port = ctx.teuthology_config['queue_port']
return beanstalkc.Connection(host=host, port=port)
def worker():
parser = argparse.ArgumentParser(description="""
Grab jobs from a beanstalk queue and run the teuthology tests they
describe. One job is run at a time.
""")
parser.add_argument(
'-v', '--verbose',
action='store_true', default=None,
help='be more verbose',
)
parser.add_argument(
'--archive-dir',
metavar='DIR',
help='path under which to archive results',
required=True,
)
parser.add_argument(
'-l', '--log-dir',
help='path in which to store logs',
required=True,
)
ctx = parser.parse_args()
loglevel = logging.INFO
if ctx.verbose:
loglevel = logging.DEBUG
logging.basicConfig(
level=loglevel,
filename=os.path.join(ctx.log_dir, 'worker.{pid}'.format(pid=os.getpid())),
format='%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s',
datefmt='%Y-%m-%dT%H:%M:%S',
)
if not os.path.isdir(ctx.archive_dir):
sys.exit("{prog}: archive directory must exist: {path}".format(
prog=os.path.basename(sys.argv[0]),
path=ctx.archive_dir,
))
from teuthology.misc import read_config
read_config(ctx)
beanstalk = connect(ctx)
beanstalk.watch('teuthology')
beanstalk.ignore('default')
while True:
job = beanstalk.reserve(timeout=60)
if job is None:
continue
# bury the job so it won't be re-run if it fails
job.bury()
log.debug('Reserved job %d', job.jid)
log.debug('Config is: %s', job.body)
job_config = yaml.safe_load(job.body)
safe_archive = safepath.munge(job_config['name'])
if job_config.get('last_in_suite', False):
log.debug('Generating coverage for %s', job_config['name'])
args = [
os.path.join(os.path.dirname(sys.argv[0]), 'teuthology-results'),
'--timeout',
str(job_config.get('results_timeout', 21600)),
'--email',
job_config['email'],
'--archive-dir',
os.path.join(ctx.archive_dir, safe_archive),
'--name',
job_config['name'],
]
if job_config.get('email_on_success', False):
args.append('--email-on-success')
subprocess.Popen(args=args)
else:
log.debug('Creating archive dir...')
safepath.makedirs(ctx.archive_dir, safe_archive)
archive_path = os.path.join(ctx.archive_dir, safe_archive, str(job.jid))
log.info('Running job %d', job.jid)
run_job(job_config, archive_path)
job.delete()
def run_job(job_config, archive_path):
arg = [
os.path.join(os.path.dirname(sys.argv[0]), 'teuthology'),
]
if job_config['verbose']:
arg.append('-v')
arg.extend([
'--lock',
'--block',
'--keep-locked-on-error',
'--owner', job_config['owner'],
'--archive', archive_path,
])
if job_config['description'] is not None:
arg.extend(['--description', job_config['description']])
arg.append('--')
tmp_fp, tmp_path = tempfile.mkstemp()
try:
os.write(tmp_fp, yaml.safe_dump(job_config['config']))
arg.append(tmp_path)
subprocess.check_call(
args=arg,
close_fds=True,
)
except subprocess.CalledProcessError as e:
log.exception(e)
else:
log.info('Success!')
finally:
os.close(tmp_fp)
os.unlink(tmp_path)
|
Python
| 0
|
@@ -3094,28 +3094,24 @@
th)%0A
-
job.delete()
|
74286f4d631b09d46a0c9df995aa21e608b99dc2
|
Update regex_utils
|
text/regex_utils.py
|
text/regex_utils.py
|
# coding=utf-8
import re
def parse_line(regex , line):
if line == None:
return None
if regex == None:
return line
items = []
pattern = re.compile(regex)
match = pattern.match(line)
if match:
items = match.groups()
return items
def check_line(regex, line):
if line == None:
return False
if regex == None:
return False
pattern = re.compile(regex)
match = pattern.match(line)
if match:
return True
else:
return False
def sub(regex, repl, line, count = 0):
if line == None:
return None
if regex == None or repl == None:
return line
return re.sub(regex, repl, line, count)
def split(regex, line):
if line == None:
return None
if regex == None:
return line
return re.split(regex, line)
|
Python
| 0.000002
|
@@ -539,16 +539,140 @@
rn False
+%0A %0Adef match(regex, line):%0A return check_line(regex, line)%0A%0Adef group(regex, line):%0A return parse_line(regex, line)
%0A%0Adef su
|
c679fcce6e48a445c6af5545977c0a286b4204c4
|
Fix web.py line reading with empty page id
|
web.py
|
web.py
|
import socket
import threading
import re
import json
from collections import defaultdict
from html import escape as escape_html
from urllib.parse import quote_plus
class _Html(object):
def __init__(self, template, values):
self.value = template.format(*map(escape_html, values))
def __repr__(self):
return repr(self.value)
all_pages = {}
all_sessions = {}
class _Interactive(object):
socket_writer = None
def on_open(self):
pass
def on_close(self):
pass
def eval(self, message, target=None):
target = target or self.id
if '\n' in message:
line = 'send {} eval(decodeURI("{}"))\n'.format(target, quote_plus(message))
else:
line = 'send {} {}\n'.format(target, message)
_Interactive.socket_writer.write(line)
_Interactive.socket_writer.flush()
def call(self, method, *args, target=None):
# Python' and Javascript's quoting rules are close enough that we can
# use 'repr' to generate properly escaped characters.
exp = '{}({})'.format(method, ', '.join(map(repr, args)))
self.eval(exp, target=target)
def set(self, element_id, value, target=None):
if isinstance(value, _Html):
setter = 'setRaw'
else:
setter = 'set'
self.call(setter, element_id, value, target=target)
def html(self, template, *values):
return _Html(template, values)
class SessionBase(_Interactive):
def __init__(self, id, pages):
self.id = id
self.pages = pages
class PageBase(_Interactive):
def __init__(self, id, session):
self.id = id
self.session = session
def setup(PageCls=PageBase, SessionCls=SessionBase, host='localhost', port=8001, auto_destroy_sessions=False):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('localhost', 8001))
reader = s.makefile('r', encoding='utf-8')
writer = s.makefile('w', encoding='utf-8')
_Interactive.socket_writer = writer
def subroutine():
while True:
line = reader.readline()
event, id, params = re.match(r'(\S+) (\S+) (.*)', line).groups()
if event == 'connected':
session_id = params
if session_id in all_sessions:
session = all_sessions[params]
else:
session = SessionCls(session_id, {})
all_sessions[session_id] = session
session.on_open()
page = PageCls(id, session)
session.pages[id] = page
all_pages[id] = page
page.on_open()
elif event == 'disconnected':
session = all_pages[id].session
del session.pages[id]
del all_pages[id]
page.on_close()
if auto_destroy_sessions and len(session.pages) == 0:
del all_sessions[session.id]
session.on_close()
elif event == 'call':
call = json.loads(params)
method = call['method']
# Nope, not falling for that.
if method in dir(_Interactive) or method.startswith('_'):
print('Somebody tried to call', method)
continue
try:
getattr(all_pages[id], method)(*call['params'])
except AttributeError:
getattr(all_pages[id].session, method)(*call['params'])
threading.Thread(target=subroutine).start()
|
Python
| 0
|
@@ -2163,17 +2163,17 @@
%5CS+) (%5CS
-+
+*
) (.*)',
|
462171f13341f7987c84a148d05d44d43f4c7436
|
Use spawn_id in marker key
|
web.py
|
web.py
|
# -*- coding: utf-8 -*-
from datetime import datetime
import argparse
import json
import requests
from flask import Flask, render_template
from flask_googlemaps import GoogleMaps
from flask_googlemaps import Map
from flask_googlemaps import icons
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import config as app_config
import db
import utils
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
with open('credentials.json') as f:
credentials = json.load(f)
with open('locales/pokemon.en.json') as f:
pokemon_names = json.load(f)
GOOGLEMAPS_KEY = credentials.get('gmaps_key', None)
AUTO_REFRESH = 45 # refresh map every X s
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-H',
'--host',
help='Set web server listening host',
default='127.0.0.1'
)
parser.add_argument(
'-P',
'--port',
type=int,
help='Set web server listening port',
default=5000
)
parser.add_argument(
'-d', '--debug', help='Debug Mode', action='store_true'
)
parser.set_defaults(DEBUG=True)
return parser.parse_args()
def create_app():
app = Flask(__name__, template_folder='templates')
GoogleMaps(app, key=GOOGLEMAPS_KEY)
return app
app = create_app()
@app.route('/data')
def data():
"""Gets all the PokeMarkers via REST"""
return json.dumps(get_pokemarkers())
@app.route('/config')
def config():
"""Gets the settings for the Google Maps via REST"""
map_center = utils.get_map_center()
return json.dumps({
'lat': map_center[0],
'lng': map_center[1],
'zoom': 15,
'identifier': 'fullmap'
})
@app.route('/')
def fullmap():
return render_template(
'map.html',
key=GOOGLEMAPS_KEY,
fullmap=get_map(),
auto_refresh=AUTO_REFRESH * 1000
)
def get_pokemarkers():
markers = []
workers = app_config.GRID[0] * app_config.GRID[1]
for i in range(workers):
coords = utils.get_start_coords(i)
markers.append({
'icon': icons.dots.red,
'lat': coords[0],
'lng': coords[1],
'infobox': "Worker %d" % i,
'type': 'custom',
'key': 'start-position-%d' % i,
'disappear_time': -1
})
session = db.Session()
pokemons = db.get_sightings(session)
session.close()
for pokemon in pokemons:
name = pokemon_names[str(pokemon.pokemon_id)]
datestr = datetime.fromtimestamp(pokemon.expire_timestamp)
dateoutput = datestr.strftime("%H:%M:%S")
LABEL_TMPL = u'''
<div><b>{name}</b><span> - </span><small><a href='http://www.pokemon.com/us/pokedex/{id}' target='_blank' title='View in Pokedex'>#{id}</a></small></div>
<div>Disappears at - {disappear_time_formatted} <span class='label-countdown' disappears-at='{disappear_time}'></span></div>
<div><a href='https://www.google.com/maps/dir/Current+Location/{lat},{lng}' target='_blank' title='View in Maps'>Get Directions</a></div>
'''
label = LABEL_TMPL.format(
id=pokemon.pokemon_id,
name=name,
disappear_time=pokemon.expire_timestamp,
disappear_time_formatted=dateoutput,
lat=pokemon.lat,
lng=pokemon.lon,
)
# NOTE: `infobox` field doesn't render multiple line string in frontend
label = label.replace('\n', '')
markers.append({
'type': 'pokemon',
'name': name,
'key': pokemon.pokemon_id,
'disappear_time': pokemon.expire_timestamp,
'icon': 'static/icons/%d.png' % pokemon.pokemon_id,
'lat': pokemon.lat,
'lng': pokemon.lon,
'infobox': label
})
return markers
def get_map():
map_center = utils.get_map_center()
fullmap = Map(
identifier='fullmap2',
style='height:100%;width:100%;top:0;left:0;position:absolute;z-index:200;',
lat=map_center[0],
lng=map_center[1],
markers=get_pokemarkers(),
zoom='15',
)
return fullmap
if __name__ == '__main__':
args = get_args()
app.run(debug=True, threaded=True, host=args.host, port=args.port)
|
Python
| 0.000001
|
@@ -3583,16 +3583,31 @@
'key':
+'%7B%7D-%7B%7D'.format(
pokemon.
@@ -3609,32 +3609,51 @@
emon.pokemon_id,
+ pokemon.spawn_id),
%0A 'di
|
e5fb37ad00eff773640c2c71b2c8b15dda5a8015
|
Refactor web.py
|
web.py
|
web.py
|
import json
import os
import logging
from flask import Flask, render_template, request, jsonify, redirect, url_for, session
from flask_sslify import SSLify
from flask_oauth import OAuth
from urllib2 import Request, urlopen, URLError
from mongo_agent import MongoAgent
# Initialize logging and setting to INFO level
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', level=logging.INFO)
# Google Console API values
GOOGLE_CLIENT_ID = os.environ['GOOGLE_CLIENT_ID']
GOOGLE_CLIENT_SECRET = os.environ['GOOGLE_CLIENT_SECRET']
REDIRECT_URI = '/gCallback'
SECRET_KEY = os.environ['SECRET_KEY']
DEBUG = False
app = Flask(__name__)
#sslify = SSLify(app)
app.secret_key = SECRET_KEY
port = int(os.getenv("PORT"))
app.context = ('server.crt', 'server.key')
# Google OAuth authentication
oauth = OAuth()
google = oauth.remote_app('google',
base_url='https://www.google.com/accounts/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params={'scope': 'https://www.googleapis.com/auth/userinfo.email',
'response_type': 'code'},
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
consumer_key=GOOGLE_CLIENT_ID,
consumer_secret=GOOGLE_CLIENT_SECRET)
# Mongo agent
mongo = MongoAgent(os.environ['MONGO_CONN'], os.environ['MONGO_DB'])
@app.route("/")
def index():
access_token = session.get('access_token')
if access_token is None:
return redirect(url_for('login'))
access_token = access_token[0]
headers = {'Authorization': 'OAuth ' + access_token}
req = Request('https://www.googleapis.com/oauth2/v1/userinfo', None, headers)
try:
res = urlopen(req)
except URLError as e:
if e.code == 401:
# Unauthorized - bad token
session.pop('access_token', None)
return redirect(url_for('login'))
return res.read()
google_user_info = json.loads(res.read())
email = google_user_info['email']
session['email'] = email
return render_template('index.html', email=email, token=access_token, picture=google_user_info['picture'])
@app.route('/login')
def login():
callback = url_for('authorized', _external=True)
return google.authorize(callback=callback)
@app.route(REDIRECT_URI)
@google.authorized_handler
def authorized(resp):
access_token = resp['access_token']
session['access_token'] = access_token, ''
return redirect(url_for('index'))
@google.tokengetter
def get_access_token():
return session.get('access_token')
@app.route('/get_user_data')
def get_user_data():
email = session.get('email')
data = mongo.get_dashboard_data(email)
return jsonify(user=data['dash_data'])
@app.route('/get_metrics_data')
def get_metrics_data():
email = session.get('email')
data = mongo.get_metrics_data(email)
return jsonify(metrics=data['metrics_data'])
if __name__ == "__main__":
app.run(host='0.0.0.0', port=port)
|
Python
| 0
|
@@ -76,17 +76,8 @@
ate,
- request,
jso
@@ -399,31 +399,27 @@
%0A%0A#
-Google Console API valu
+Environment variabl
es%0AG
@@ -529,36 +529,8 @@
T'%5D%0A
-REDIRECT_URI = '/gCallback'%0A
SECR
@@ -567,66 +567,177 @@
Y'%5D%0A
-DEBUG = False%0A%0Aapp = Flask(__name__)%0A%0A#sslify = SSLify(app
+ENVIRONMENT_TYPE = os.environ%5B'ENVIRONMENT_TYPE'%5D%0AMONGO_DB = os.environ%5B'MONGO_DB'%5D%0AMONGO_CONN = os.environ%5B'MONGO_CONN'%5D%0A%0A# Flask app configuration%0Aapp = Flask(__name__
)%0Aap
@@ -762,16 +762,20 @@
RET_KEY%0A
+app.
port = i
@@ -796,16 +796,33 @@
PORT%22))%0A
+app.debug = True%0A
app.cont
@@ -863,35 +863,243 @@
%0A%0A#
-Google OAuth authentication
+Initialize Mongo agent%0Amongo = MongoAgent(MONGO_CONN, MONGO_DB)%0A%0A# Apply production configuration%0Aif ENVIRONMENT_TYPE == 'prod':%0A SSLify(app)%0A app.host = '0.0.0.0'%0A app.debug = False%0A%0A# Google OAuth%0AREDIRECT_URI = '/gCallback'
%0Aoau
@@ -1887,92 +1887,8 @@
T)%0A%0A
-# Mongo agent%0Amongo = MongoAgent(os.environ%5B'MONGO_CONN'%5D, os.environ%5B'MONGO_DB'%5D)%0A%0A
%0A@ap
@@ -2265,45 +2265,10 @@
rror
- as e:%0A if e.code == 401:%0A
+:%0A
@@ -2306,20 +2306,16 @@
-
session.
@@ -2344,28 +2344,24 @@
ne)%0A
-
return redir
@@ -2385,34 +2385,8 @@
n'))
-%0A return res.read()
%0A%0A
@@ -3430,31 +3430,6 @@
run(
-host='0.0.0.0', port=port
)%0A
|
f900f24320d3e5c715bd7e72b60deb8c5cc2f00a
|
Move selection of compressed or indented into modifiable section
|
wf3.py
|
wf3.py
|
### Do not edit the lines at the top and bottom of this file.
### Edit the workflow description between START and FINISH comments
from balcaza.t2types import *
from balcaza.t2activity import *
from balcaza.t2flow import *
### START editing your workflow below here
#
# This example creates a simple nested workflow. First, create the inner nested workflow:
inner = Workflow('eigenanalysis')
# Refer to workflow input ports using <wflow>.input.<portname>
# output ports usig <wflow>.output.<portname>
inner.input.speciesName = String
inner.input.speciesName.description = """Species name
Controls the title of the bar plot that will be generated with the analysis. As an example, it can be the name of the species or the name of the place where the research has been conducted, between others.
"""
inner.input.speciesName.example = "Gentiana pneumonanthe"
inner.input.stageMatrix = RExpression
rserve = RServer()
# Create a reusable activity and assign it to a workflow task, using
# <wflow>.task.<taskname> = <activity>
inner.task.CalculatePlotSize = rserve.runScript(
"plot_size <- 128 + 32 * dim(stage_matrix)[1]",
inputs = dict(stage_matrix=RExpression),
outputs = dict(plot_size=Integer)
)
# Task input ports are <wflow>.task.<taskname>.input.<portname>
# Connect ports using the >> operator
inner.input.stageMatrix >> inner.task.CalculatePlotSize.input.stage_matrix
inner.task.ProjectionMatrix = rserve.runFile(
"projectionMatrix.R",
inputs=dict(plot_title=String, stage_matrix=RExpression, plot_size=Integer),
outputs=dict(plot_image=PNGImage)
)
inner.task.ProjectionMatrix.description = 'Create a projection matrix'
# There is a handy shortcut for text constant inputs
"Projection Matrix" >> inner.task.ProjectionMatrix.input.plot_title
inner.input.stageMatrix >> inner.task.ProjectionMatrix.input.stage_matrix
inner.task.CalculatePlotSize.output.plot_size >> inner.task.ProjectionMatrix.input.plot_size
inner.output.projectionMatrix = PNGImage
inner.output.projectionMatrix.description = 'Plot of results'
inner.task.ProjectionMatrix.output.plot_image >> inner.output.projectionMatrix
# Create another workflow
outer = Workflow('Eigenanalysis')
# and add the nested workflow (treat the nested workflow just like any other acivity)
outer.task.Eigenanalysis = inner
# Assign to variables, to make things more reusable (to more readable)
desc = """Species name
Controls the title of the bar plot that will be generated with the analysis. As an example, it can be the name of the species or the name of the place where the research has been conducted, between others.
"""
example = "Gentiana pneumonanthe"
outer.input.speciesName = String
sn = outer.input.speciesName
sn.description = desc
sn.example = example
outer.input.stageMatrixFile = TextFile
outer.input.stageMatrixFile.description = """The stage matrix file input port:
Here comes the stage matrix without the stage names (as you see in the example). It should be provied as a txt-file.
Example from:
J. Gerard B. Oostermeijer; M.L. Brugman; E.R. de Boer; H.C.M. Den Nijs. 1996. Temporal and Spatial Variation in the Demography of Gentiana pneumonanthe, a Rare Perennial Herb. The Journal of Ecology, Vol. 84(2): 153-166.
"""
outer.input.stageMatrixFile.example = """0.0000 0.0000 0.0000 7.6660 0.0000
0.0579 0.0100 0.0000 8.5238 0.0000
0.4637 0.8300 0.9009 0.2857 0.8604
0.0000 0.0400 0.0090 0.6190 0.1162
0.0000 0.0300 0.0180 0.0000 0.0232"""
# List types must identify what the list contains
outer.input.stages = List(String)
rshell = rserve.runFile(
"readMatrix.R",
inputs = dict(stage_matrix_file=TextFile, stages=Vector(String)),
outputs = dict(stage_matrix=RExpression)
)
outer.task.ReadMatrix = rshell
outer.input.stageMatrixFile >> outer.task.ReadMatrix.input.stage_matrix_file
outer.input.stages >> outer.task.ReadMatrix.input.stages
outer.task.ReadMatrix.output.stage_matrix >> outer.task.Eigenanalysis.input.stageMatrix # not inner.input.stageMatrix !
outer.input.speciesName >> outer.task.Eigenanalysis.input.speciesName
outer.output.projectionMatrix = PNGImage
outer.task.Eigenanalysis.output.projectionMatrix >> outer.output.projectionMatrix
outer.author = 'Maria and Jon'
outer.description = 'Hello'
outer.title = 'Workflow 34'
# FINISH your workflow above here, and do not change the lines below, except to select
# one of the export lines
import sys
import maximal.XMLExport as XMLExport
# You can output the workflow indented for readability or compressed for space
#outer.exportXML(XMLExport.XMLExporter(XMLExport.XMLIndenter(sys.stdout)))
outer.exportXML(XMLExport.XMLExporter(XMLExport.XMLCompressor(sys.stdout)))
|
Python
| 0
|
@@ -4257,16 +4257,165 @@
ow 34'%0A%0A
+# Set compressed = True to create a smaller workflow file%0A# Set compressed = False to create a workflow indented for readability%0A%0Acompressed = True%0A%0A
# FINISH
@@ -4478,52 +4478,8 @@
elow
-, except to select%0A# one of the export lines
%0A%0Aim
@@ -4530,89 +4530,24 @@
rt%0A%0A
-# You can output the workflow indented for readability or compressed for space%0A%0A#
+if compressed:%0A%09
oute
@@ -4597,15 +4597,17 @@
.XML
-Indente
+Compresso
r(sy
@@ -4618,16 +4618,23 @@
dout)))%0A
+else:%0A%09
outer.ex
@@ -4672,33 +4672,31 @@
MLExport.XML
-Compresso
+Indente
r(sys.stdout
@@ -4700,9 +4700,8 @@
out)))%0A%0A
-%0A
|
c98783bad0d249a203fcf1479b839182d967d3cc
|
Make python test_main behave as list-query
|
Trie.py
|
Trie.py
|
#! /usr/bin/env python
# vim: set encoding=utf-8
"""
This module provides access to libtrie shared object. Using it should be faster
than spawning a process and communicating with it.
This Python interface only allows for querying the trie. It is not possible to
create new tries via Python.
"""
from ctypes import cdll, c_char_p, c_void_p, cast
import ctypes.util
import os
LIBPATH = os.path.dirname(os.path.abspath(__file__)) + '/libtrie.so'
LIBTRIE = cdll.LoadLibrary(LIBPATH)
LIBTRIE.trie_load.argtypes = [c_char_p]
LIBTRIE.trie_load.restype = c_void_p
LIBTRIE.trie_lookup.argtypes = [c_void_p, c_char_p]
LIBTRIE.trie_lookup.restype = c_void_p
LIBTRIE.trie_get_last_error.restype = c_char_p
LIBC = ctypes.CDLL(ctypes.util.find_library('c'))
class Trie(object):
"""
Trie class encapsulates the underlying trie structure. It is created from
file and only provides means to query a key. There are no modifications
possible.
"""
def __init__(self, filename, encoding='utf8'):
"""
Create new trie from given file. The optional `encoding` parameter
specifies how to encode keys before looking them pu.
"""
self.encoding = encoding
self.free_func = LIBTRIE.trie_free
self.ptr = LIBTRIE.trie_load(filename)
if not self.ptr:
err = LIBTRIE.trie_get_last_error()
raise IOError(str(err))
def __del__(self):
if self and self.ptr:
self.free_func(self.ptr)
def lookup(self, key):
"""
Check that `key` is present in the trie. If so, return list of strings
that are associated with this key. Otherwise return empty list.
The key should be a unicode object.
"""
res = LIBTRIE.trie_lookup(self.ptr, key.encode(self.encoding))
if res:
result = cast(res, c_char_p).value.decode(self.encoding)
LIBC.free(res)
return [s for s in result.split('\n')]
else:
return []
def test_main():
"""
This function creates a storage backed by a file and tests it by retrieving
a couple of records.
"""
import sys
t = Trie('prijmeni6.trie')
for name in sys.stdin:
name = name.strip().decode('utf8')
print '\n'.join(t.lookup(name)).encode('utf8')
if __name__ == '__main__':
test_main()
|
Python
| 0.000634
|
@@ -2165,33 +2165,148 @@
-t = T
+if len(sys.argv) != 2:%0A sys.stderr.w
ri
+t
e('
-prijmeni6.trie'
+Need one command line argument - trie file%5Cn')%0A sys.exit(1)%0A t = Trie(sys.argv%5B1%5D
)%0A%0A
@@ -2374,16 +2374,65 @@
'utf8')%0A
+ res = t.lookup(name)%0A if res:%0A
@@ -2447,30 +2447,19 @@
n'.join(
-t.lookup(name)
+res
).encode
@@ -2466,16 +2466,60 @@
('utf8')
+%0A else:%0A print 'Not found'
%0A%0Aif __n
|
f49fc187bc397a56f03217c88fa06b7ef1704b41
|
Add docstring for `is_landscape()`
|
Util.py
|
Util.py
|
"""Collection of Helper Functions"""
import os
from fnmatch import fnmatch
from PyPDF2 import PdfFileReader
def pdf_file(filename):
"""Test whether or the the filename ends with '.pdf'."""
return fnmatch(filename, '*.pdf')
def all_pdf_files_in_directory(path):
"""Return a list of of PDF files in a directory."""
return [filename for filename in os.listdir(path) if pdf_file(filename)]
def concat_pdf_pages(files):
"""A generator that yields one PDF page a time for all pages in the PDF files."""
for input_file in files:
for page in PdfFileReader(input_file).pages:
yield page
def split_on_condition(iterable, predicate):
"""Split a iterable into chunks, where the first item in the chunk will be the
evaluate to True with predicate function, and the rest of the items in the chunk
evaluates to False."""
it = iter(iterable)
# Initialize the chunk list with an item
# StopIteration will be thrown if there are no further items in the iterator
chunk = [it.next()]
while True:
try:
item = it.next()
if predicate(item):
# If the next item should be in a new chunk then return the current chunk
yield chunk
# Then rest the chunk list
chunk = [item]
else:
# Simply append the item to current chunk if it doesn't match the predicate
chunk.append(item)
except StopIteration:
# If the end of the iterator is reached then simply return the current chunk
yield chunk
break
def is_landscape(page):
box = page.mediaBox
return box.getWidth() > box.getHeight()
|
Python
| 0.000001
|
@@ -1652,24 +1652,91 @@
cape(page):%0A
+ %22%22%22Check whether or not a page is in landscape orientation.%22%22%22%0A
box = pa
|
041ff0805acde88fe6cb797227df9abc4c6b8d96
|
Use cache for rendering posts in /blog.
|
blog.py
|
blog.py
|
import webapp2
import os
import jinja2
import json
import helper
from google.appengine.ext import db
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
autoescape=True,
extensions=['jinja2.ext.autoescape'])
def render_str(template, **kw):
t = JINJA_ENVIRONMENT.get_template(template)
return t.render(kw)
class BaseHandler(webapp2.RequestHandler):
def render(self, template, **kw):
self.response.write(render_str(template, **kw))
def set_cookie(self, user):
cookie = helper.make_cookie(user)
self.response.headers.add_header(
"Set-Cookie",
"user={}; Path=/".format(cookie))
def logout(self):
self.response.headers.add_header("Set-Cookie", "user=; Path=/")
# Unit 1
class MainPage(BaseHandler):
def get(self):
self.render('index.html')
# Unit 2
class Rot13Handler(BaseHandler):
def get(self):
self.render('unit2/rot13.html')
def post(self):
texts = self.request.get('text')
self.render('unit2/rot13.html', texts=helper.rot13(texts))
class SignupHandler(BaseHandler):
def get(self):
self.render('unit2/signup.html')
def post(self):
username = self.request.get('username')
password = self.request.get('password')
verify = self.request.get('verify')
email = self.request.get('email')
errors = helper.signup_errors(username, password, verify, email)
u = db.GqlQuery("SELECT * from User "
"WHERE username =:1", username)
used_name = "This username has already been used." if u.get() else ""
if any(errors) or used_name:
self.render('unit2/signup.html',
username=username,
email=email,
username_error=errors[0] or used_name,
password_error=errors[1],
verify_error=errors[2],
email_error=errors[3])
else:
salt = helper.make_salt()
hashed_pass = helper.hash_pass(password, salt)
user = User(username=username, password=hashed_pass,
salt=salt, email=email)
user.put()
self.set_cookie(username)
self.redirect('/blog/welcome')
class WelcomeHandler(BaseHandler):
def get(self):
ck = self.request.cookies.get("user")
if helper.valid_cookie(ck):
self.render("/unit2/welcome.html", username=ck.split("|")[0])
else:
self.redirect("/blog/signup")
# Unit 3
class Posts(db.Model):
subject = db.StringProperty(required=True)
content = db.TextProperty(required=True)
created = db.DateProperty(auto_now_add=True)
class BlogHandler(BaseHandler):
def get(self):
posts = db.GqlQuery("SELECT * FROM Posts "
"ORDER BY created DESC "
"LIMIT 10")
self.render("unit3/index.html",
posts=posts)
class NewPostHandler(BaseHandler):
def render_post(self, subject="", content="", error=""):
self.render("unit3/newpost.html",
subject=subject,
content=content,
error=error)
def get(self):
self.render_post()
def post(self):
subject = self.request.get("subject")
content = self.request.get("content")
if subject and content:
p = Posts(subject=subject, content=content)
p.put()
i = p.key().id()
self.redirect("/blog/{}".format(i))
else:
error = "Please input both subject and content!"
self.render_post(subject, content, error)
class PermanentPost(BaseHandler):
def get(self, blog_id):
post = Posts.get_by_id(long(blog_id))
if post:
self.render("unit3/permanent.html",
post=post)
else:
self.response.write("This page doesn't exist!")
# Unit 4
class User(db.Model):
username = db.StringProperty(required=True)
password = db.StringProperty(required=True)
salt = db.StringProperty(required=True)
email = db.StringProperty()
class LoginHandler(BaseHandler):
def get(self):
self.render("/unit4/login.html")
def post(self):
username = self.request.get('username')
password = self.request.get('password')
user = db.GqlQuery("SELECT * from User "
"WHERE username =:1", username).get()
if user:
salt = user.salt.encode("ascii")
hashed_pwd = user.password.encode("ascii")
if helper.valid_pass(password, salt, hashed_pwd):
self.set_cookie(username)
self.redirect("/blog/welcome")
self.render("/unit4/login.html", error="Invalid login!")
class Logout(BaseHandler):
def get(self):
self.logout()
self.redirect("/blog/signup")
# Unit 5
class BlogJson(BaseHandler):
def get(self):
self.response.headers['Content-Type'] = ('application/json; '
'charset=UTF-8')
allPosts = [{"content": p.content,
"created": p.created.strftime('%c'),
"subject": p.subject}
for p in Posts.all()]
self.response.write(json.dumps(allPosts))
class PermanentJson(BaseHandler):
def get(self, blog_id):
blog_id = blog_id.split(',')[0]
post = Posts.get_by_id(long(blog_id))
if post:
self.response.headers['Content-Type'] = ('application/json; '
'charset=UTF-8')
p = {"content": post.content,
"created": post.created.strftime('%c'),
"subject": post.subject}
self.response.write(json.dumps(p))
else:
self.response.write("This page doesn't exist!")
application = webapp2.WSGIApplication([
('/', MainPage),
('/rot13', Rot13Handler),
('/blog/signup', SignupHandler),
('/blog/welcome', WelcomeHandler),
('/blog', BlogHandler),
('/blog/.json', BlogJson),
('/blog/newpost', NewPostHandler),
('/blog/([0-9]+)', PermanentPost),
('/blog/([0-9]+).json', PermanentJson),
('/blog/login', LoginHandler),
('/blog/logout', Logout)
], debug=True)
|
Python
| 0
|
@@ -2821,57 +2821,79 @@
)%0A%0A%0A
-class BlogHandler(BaseHandler):%0A def get(self)
+CACHE = %7B%7D%0A%0A%0Adef update_blog():%0A key = %22top%22%0A if key not in CACHE
:%0A
@@ -2994,16 +2994,16 @@
DESC %22%0A
-
@@ -3034,16 +3034,176 @@
IT 10%22)%0A
+ posts = list(posts)%0A CACHE%5Bkey%5D = posts%0A return CACHE%5Bkey%5D%0A%0A%0Aclass BlogHandler(BaseHandler):%0A def get(self):%0A posts = update_blog()%0A
@@ -3778,24 +3778,50 @@
p.put()
+%0A CACHE.clear()
%0A%0A
|
92a57e512e4437b781d7db76587d27092033a49a
|
remove dead code
|
chart-02-ols-median-of-root-median-squared-errors.py
|
chart-02-ols-median-of-root-median-squared-errors.py
|
# create files for chart-02-ols-median-of-root-mdian-squared-errors
# with these choices
# metric in median-root-median-squared-errors
# model in ols
# ndays in 30 60 ... 360
# predictors in act actlog ct ctlog
# responses in price logprice
# usetax in yes no
# year in 2008
# invocations and files created
# python chart-02X.py makefile -> src/chart-02X.makefile
# python chart-02X.py data -> data/working/chart-02X.data
# python chart-02X.py txt -> data/working/chart-02X.txt
# python chart-02X.py txtY -> data/working/chart-02X-Y.txt
import sys
from Bunch import Bunch
from chart_02_template import chart
def main():
def median_value(x):
cvresult = CvResult(x)
specs = Bunch(metric='median-of-root-median-squared-errors',
title='Median of Root Median Squared Errors',
model='ols',
training_periods=['30', '60', '90', '120', '150', '180',
'210', '240', '270', '300', '330', '360'],
feature_sets=['act', 'actlog', 'ct', 'ctlog'],
responses=['price', 'logprice'],
usetax=['yes', 'no'],
year='2008')
chart(specs=specs,
argv=sys.argv)
if __name__ == '__main__':
main()
|
Python
| 0.999454
|
@@ -671,65 +671,8 @@
():%0A
- def median_value(x):%0A cvresult = CvResult(x)%0A%0A
@@ -1101,48 +1101,8 @@
'%5D,%0A
- usetax=%5B'yes', 'no'%5D,%0A
|
4c425af1be1d4a4d4b424db5edf27f24b768f396
|
Save the IP address in a file, if the IP address hasn't
|
slackip.py
|
slackip.py
|
#!/usr/bin/env python
"""
## slackip.py
##
## I am behind a NAT get my global IPv4 address and write it to a slack channel
##
## https://github.com/nerby/slackIP
##
## Atanu Ghosh
## <atanu@acm.org>
## 2017-03-05
"""
from __future__ import print_function
import getopt
import httplib
import os
import sys
IPHOST = "ipinfo.io"
CHANNEL = """
{"channel": "#pandabot",
"username": "webhookbot",
"text": "IP address of host %s is %s",
"icon_emoji": ":ghost:"}
"""
def get_my_ip():
"""
Get my IPv4 external address
"""
conn = httplib.HTTPConnection(IPHOST)
conn.request("GET", "/ip")
response = conn.getresponse()
return response.status, response.reason, response.read()
def write_to_slack_channel(host, url, hostname, ipaddress):
"""
Write to slack channel
curl -X POST --data-urlencode 'payload={"channel": "#pandabot", "username": "webhookbot", "text": "This is posted to #pandabot and comes from a bot named webhookbot.", "icon_emoji": ":ghost:"}' https://hooks.slack.com/services/??? # pylint: disable=locally-disabled, line-too-long
"""
conn = httplib.HTTPSConnection(host)
conn.request("POST", url, CHANNEL % (hostname, ipaddress))
response = conn.getresponse()
ret = response.read()
conn.close()
if response.reason != "OK":
print(response.reason, ret, file=sys.stderr)
return False
return True
USAGE =\
"""\
usage: %s
\t -h --host
\t -u --url
"""
def main():
"""
Main function
"""
def usage():
"""
Usage message
"""
print(USAGE % sys.argv[0], end='', file=sys.stderr)
try:
opts, _ = getopt.getopt(sys.argv[1:], "h:u:",
["host=", "url="])
except getopt.GetoptError:
usage()
sys.exit(1)
url = None
host = None
for option, arg in opts:
if option in ("-h", "--host"):
host = arg
if option in ("-u", "--url"):
url = arg
if not url or not host:
print("The host and URL must be provided", file=sys.stderr)
return
status, reason, ipaddress = get_my_ip()
if status != 200:
print("Problem", reason, file=sys.stderr)
return
print(ipaddress)
write_to_slack_channel(host, url, os.uname()[1], ipaddress)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -690,16 +690,21 @@
e.read()
+%5B:-1%5D
%0A%0Adef wr
@@ -1380,18 +1380,514 @@
-return Tru
+print(%22Sucessfully wrote to slack channel%22)%0A%0A return True%0A%0Adef save_ip_address_to_file(filename, ipaddress):%0A %22%22%22%0A Save my IP address to a file%0A %22%22%22%0A%0A save = open(filename, 'w')%0A save.write(ipaddress)%0A save.close()%0A%0Adef read_ip_address_from_file(filename):%0A %22%22%22%0A Read my saved IP address from file%0A %22%22%22%0A%0A try:%0A with open(filename, %22r%22) as saved:%0A for line in saved:%0A return True, line%0A except IOError:%0A pass%0A%0A return False, Non
e%0A%0AU
@@ -1934,16 +1934,29 @@
u --url%0A
+%5Ct -s --save%0A
%22%22%22%0A%0Adef
@@ -2183,16 +2183,18 @@
%5D, %22h:u:
+s:
%22,%0A
@@ -2236,16 +2236,25 @@
, %22url=%22
+, %22save=%22
%5D)%0A e
@@ -2347,16 +2347,32 @@
t = None
+%0A save = None
%0A%0A fo
@@ -2520,119 +2520,69 @@
arg%0A
-%0A
+
if
-not url or not host:%0A print(%22The host and URL must be provided%22, file=sys.stderr)%0A return
+option in (%22-s%22, %22--save%22):%0A save = arg
%0A%0A
@@ -2721,26 +2721,695 @@
print(
-ipaddress)
+%22Global IP address%22, ipaddress)%0A%0A if not url or not host:%0A print(%22The host and URL must be provided%22, file=sys.stderr)%0A return%0A%0A if save:%0A status, saved_ipaddress = read_ip_address_from_file(save)%0A if not status:%0A print(%22No saved IP address in%22, save)%0A save_ip_address_to_file(save, ipaddress)%0A else:%0A if ipaddress != saved_ipaddress:%0A print(%22Saved IP address does not match current IP address%22,%0A saved_ipaddress, save)%0A save_ip_address_to_file(save, ipaddress)%0A else:%0A print(%22IP address has not changed%22)%0A return
%0A%0A wr
|
3809d9a277412ef7c53905ecdcae55d537e08c95
|
Fix whitespace in tests file
|
travis_solo_tests.py
|
travis_solo_tests.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from nose.tools import eq_, ok_
from travis_solo import Configuration, Loader, Step
class TestLoader(object):
def setup(self):
self.loader = Loader()
def test_loading_steps(self):
settings = dict(
before_install=['do before install',],
install='pip install .',
script='nosetests',
after_script=['a', 'b'],
)
steps = self.loader.load_steps(settings)
eq_(steps, (
Step('before_install', ('do before install',)),
Step('install', ('pip install .',)),
Step('script', ('nosetests',)),
Step('after_script', ('a', 'b'), can_fail=True),
))
def test_loading_configurations(self):
settings = dict(
language='python',
python=['2.7', '3.3'],
env=['A=a', 'A=b'],
matrix=dict(
include=[
dict(
python='2.7',
env='A=c',
),
],
exclude=[
dict(
python='3.3',
env='A=a',
),
],
)
)
configurations = self.loader.load_configurations(settings)
eq_(configurations, (
Configuration(python='2.7', variables={'A': 'a'}),
Configuration(python='2.7', variables={'A': 'b'}),
Configuration(python='3.3', variables={'A': 'b'}),
Configuration(python='2.7', variables={'A': 'c'}),
))
|
Python
| 0.001412
|
@@ -213,20 +213,17 @@
bject):%0A
-
+%09
def setu
@@ -231,24 +231,18 @@
(self):%0A
-
+%09%09
self.loa
@@ -257,20 +257,17 @@
ader()%0A%0A
-
+%09
def test
@@ -284,32 +284,26 @@
teps(self):%0A
-
+%09%09
settings = d
@@ -307,28 +307,19 @@
= dict(%0A
-
+%09%09%09
before_i
@@ -349,28 +349,19 @@
all',%5D,%0A
-
+%09%09%09
install=
@@ -377,28 +377,19 @@
all .',%0A
-
+%09%09%09
script='
@@ -400,28 +400,19 @@
tests',%0A
-
+%09%09%09
after_sc
@@ -428,34 +428,22 @@
, 'b'%5D,%0A
- )%0A
+%09%09)%0A%09%09
steps =
@@ -475,24 +475,18 @@
ttings)%0A
-
+%09%09
eq_(step
@@ -490,28 +490,19 @@
teps, (%0A
-
+%09%09%09
Step('be
@@ -537,36 +537,27 @@
nstall',)),%0A
-
+%09%09%09
Step('instal
@@ -577,36 +577,27 @@
tall .',)),%0A
-
+%09%09%09
Step('script
@@ -616,28 +616,19 @@
ts',)),%0A
-
+%09%09%09
Step('af
@@ -672,24 +672,15 @@
e),%0A
- ))%0A%0A
+%09%09))%0A%0A%09
def
@@ -714,24 +714,18 @@
(self):%0A
-
+%09%09
settings
@@ -733,28 +733,19 @@
= dict(%0A
-
+%09%09%09
language
@@ -755,28 +755,19 @@
ython',%0A
-
+%09%09%09
python=%5B
@@ -781,28 +781,19 @@
'3.3'%5D,%0A
-
+%09%09%09
env=%5B'A=
@@ -804,28 +804,19 @@
'A=b'%5D,%0A
-
+%09%09%09
matrix=d
@@ -824,375 +824,174 @@
ct(%0A
- include=%5B%0A dict(%0A python='2.7',%0A env='A=c',%0A ),%0A %5D,%0A exclude=%5B%0A dict(%0A python='3.3',%0A env='A=a',%0A ),%0A %5D,%0A )%0A )%0A%0A
+%09%09%09%09include=%5B%0A%09%09%09%09%09dict(%0A%09%09%09%09%09%09python='2.7',%0A%09%09%09%09%09%09env='A=c',%0A%09%09%09%09%09),%0A%09%09%09%09%5D,%0A%09%09%09%09exclude=%5B%0A%09%09%09%09%09dict(%0A%09%09%09%09%09%09python='3.3',%0A%09%09%09%09%09%09env='A=a',%0A%09%09%09%09%09),%0A%09%09%09%09%5D,%0A%09%09%09)%0A%09%09)%0A%0A%09%09
conf
@@ -1046,24 +1046,18 @@
tings)%0A%0A
-
+%09%09
eq_(conf
@@ -1070,28 +1070,19 @@
ions, (%0A
-
+%09%09%09
Configur
@@ -1120,36 +1120,27 @@
'A': 'a'%7D),%0A
-
+%09%09%09
Configuratio
@@ -1174,36 +1174,27 @@
'A': 'b'%7D),%0A
-
+%09%09%09
Configuratio
@@ -1236,20 +1236,11 @@
%7D),%0A
-
+%09%09%09
Conf
@@ -1290,15 +1290,9 @@
%7D),%0A
-
+%09%09
))%0A
|
27aff0f597d16ed73a976397a14b9cd1f7cf2c4a
|
Add support for sendback paths
|
troposphere/views.py
|
troposphere/views.py
|
import logging
from datetime import datetime
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.conf import settings
from django.core.urlresolvers import reverse
from troposphere.cas import CASClient, InvalidTicket
from troposphere.oauth import OAuthClient, Unauthorized
import troposphere.messages as messages
logger = logging.getLogger(__name__)
def get_cas_client(request):
return CASClient(settings.CAS_SERVER,
request.build_absolute_uri(reverse('cas_service')))
key = open(settings.OAUTH_PRIVATE_KEY_PATH, 'r').read()
oauth_client = OAuthClient(settings.OAUTH_SERVER,
key,
settings.OAUTH_ISS,
settings.OAUTH_SCOPE)
def root(request):
return redirect('application')
def application(request):
records, disabled_login = get_maintenance()
if disabled_login:
return redirect('maintenance')
response = None
for msg in messages.get_messages(request):
if isinstance(msg, dict) and 'login_check' in msg:
if 'access_token' in msg.keys():
token = msg['access_token']
response = render(request, 'application.html', {
'access_token': token['value'],
'expires': token['expires']})
else:
response = render(request, 'application.html')
if response:
return response
messages.add_message(request, 'gatewayed')
return redirect(get_cas_client(request).get_login_endpoint(gateway=True))
def get_maintenance():
"""
Returns a list of maintenance records along with a boolean to indicate
whether or not login should be disabled
"""
return ([], False)
def maintenance(request):
return HttpResponse("We're undergoing maintenance", status=503)
def login(request):
return redirect(get_cas_client(request).get_login_endpoint())
def logout(request):
"""
TODO: Destroy OAuth token
"""
if 'cas' in request.GET:
root_url = request.build_absolute_uri(reverse('application'))
return redirect(get_cas_client(request).get_logout_endpoint(root_url))
return redirect('application')
def gateway_request(request):
"""
Returns true iff the preceeding request was an attempt to log in the use
into CAS with gateway=true
https://wiki.jasig.org/display/CAS/gateway
"""
return any(m == 'gatewayed' for m in messages.get_messages(request))
def cas_service(request):
gatewayed = gateway_request(request)
ticket = request.GET.get('ticket', None)
if not ticket:
logger.info("No Ticket received in GET string")
messages.add_message(request, {'login_check': True})
return redirect('application')
# Authenticate request with CAS
try:
user = get_cas_client(request).validate_ticket(ticket)
except InvalidTicket:
messages.add_message(request, {'login_check': True})
return redirect('application')
logger.debug(user + " successfully authenticated against CAS")
# Authorize request with Groupy OAuth
try:
token, expires = oauth_client.generate_access_token(user)
logger.debug("TOKEN: " + token)
expires = int((expires - datetime.utcfromtimestamp(0)).total_seconds())
messages.add_message(request, {'login_check': True,
'access_token': {'value': token,
'expires': expires}})
return redirect('application')
except Unauthorized:
if gatewayed:
messages.add_message(request, {'login_check': True})
return redirect('application')
else:
return redirect('forbidden')
return redirect('application')
def forbidden(request):
"""
View used when someone tries to log in and is an authenticated iPlant
user, but was found to be unauthorized to use Atmosphere by OAuth.
Returns HTTP status code 403 Forbidden
"""
return render(request, 'no_user.html', status=403)
|
Python
| 0
|
@@ -1494,16 +1494,81 @@
sponse%0A%0A
+ flash = %7B'gatewayed': True, 'path': request.get_full_path()%7D%0A
mess
@@ -1593,27 +1593,21 @@
equest,
-'gatewayed'
+flash
)%0A re
@@ -2374,16 +2374,54 @@
Returns
+a tuple of the form (a, b) where a is
true iff
@@ -2435,16 +2435,20 @@
eceeding
+%0A
request
@@ -2480,20 +2480,16 @@
the use
-%0A
into CA
@@ -2507,16 +2507,84 @@
way=true
+ and b%0A is the path that was originally requested on Troposphere.
%0A htt
@@ -2638,36 +2638,8 @@
%0A
- return any(m == 'gatewayed'
for
@@ -2674,16 +2674,137 @@
request)
+:%0A if isinstance(m, dict) and m.has_key('gatewayed'):%0A return (True, m%5B'path'%5D)%0A return (False, None
)%0A%0Adef c
@@ -2837,16 +2837,26 @@
atewayed
+, sendback
= gatew
@@ -2875,16 +2875,74 @@
equest)%0A
+ if sendback is None:%0A sendback = 'application'%0A
tick
@@ -3135,37 +3135,32 @@
rn redirect(
-'application'
+sendback
)%0A%0A # Aut
@@ -3369,29 +3369,24 @@
edirect(
-'application'
+sendback
)%0A%0A l
@@ -3915,37 +3915,32 @@
rn redirect(
-'application'
+sendback
)%0A except
@@ -4069,29 +4069,24 @@
edirect(
-'application'
+sendback
)%0A
@@ -4151,37 +4151,32 @@
rn redirect(
-'application'
+sendback
)%0A%0Adef forbi
|
aa632e5956abc516cee119d5ee413d7e8d9e0dc0
|
Fix execution from non-Git directories.
|
appengine/components/tools/calculate_version.py
|
appengine/components/tools/calculate_version.py
|
#!/usr/bin/env python
# Copyright 2013 The Swarming Authors. All rights reserved.
# Use of this source code is governed by the Apache v2.0 license that can be
# found in the LICENSE file.
"""Given current git checkout state return version string to use for an app."""
import getpass
import logging
import optparse
import os
import subprocess
import sys
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(ROOT_DIR, '..', 'third_party_local'))
from depot_tools import git_number
from depot_tools import git_common
def git(cmd, cwd):
return subprocess.check_output(['git'] + cmd, cwd=cwd)
def get_pseudo_revision(root, remote):
"""Returns the pseudo revision number and commit hash describing
the base upstream commit this branch is based on.
The base upstream commit hash is determined by 'git merge-base'. See the man
page for more information.
The pseudo revision is calculated by the number of commits separating the base
upstream commit from the rootest commit. The earliest commit should be a root
commit, e.g. a commit with no parent. A git tree can have multiple root
commits when git repositories are merged together. The oldest one will be
selected. The list of all root commits can be retrieved with:
git rev-list --parents HEAD | egrep "^[a-f0-9]{40}$"
Returns:
tuple of:
- pseudo revision number as a int
- upstream commit hash this branch is based of.
"""
mergebase = git(['merge-base', 'HEAD', remote], cwd=root).rstrip()
targets = git_common.parse_commitrefs(mergebase)
git_number.load_generation_numbers(targets)
git_number.finalize(targets)
return git_number.get_num(targets[0]), mergebase
def is_pristine(root, mergebase):
"""Returns True if the tree is pristine relating to mergebase."""
head = git(['rev-parse', 'HEAD'], cwd=root).rstrip()
logging.info('head: %s, mergebase: %s', head, mergebase)
if head != mergebase:
return False
# Look for local uncommitted diff.
return not (
git(['diff', '--ignore-submodules=none', mergebase], cwd=root) or
git(['diff', '--ignore-submodules', '--cached', mergebase], cwd=root))
def calculate_version(root, tag):
"""Returns a tag for a git checkout.
Uses the pseudo revision number from the upstream commit this branch is based
on, the abbreviated commit hash. Adds -tainted-<username> if the code is not
pristine and optionally adds a tag to further describe it.
"""
pseudo_revision, mergebase = get_pseudo_revision(root, 'origin/master')
pristine = is_pristine(root, mergebase)
# Trim it to 7 characters like 'git describe' does. 40 characters is
# overwhelming!
version = '%s-%s' % (pseudo_revision, mergebase[:7])
if not pristine:
version += '-tainted-%s' % getpass.getuser()
if tag:
version += '-' + tag
return version
def checkout_root(cwd):
"""Returns the root of the checkout."""
return git(['rev-parse', '--show-toplevel'], cwd).rstrip()
def main():
parser = optparse.OptionParser(description=sys.modules[__name__].__doc__)
parser.add_option('-v', '--verbose', action='store_true')
parser.add_option(
'-t', '--tag', help='Tag to attach to a tainted version')
options, args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG if options.verbose else logging.ERROR)
if args:
parser.error('Unknown arguments, %s' % args)
root = checkout_root(os.getcwd())
logging.info('Checkout root is %s', root)
print calculate_version(root, options.tag)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0.000027
|
@@ -263,16 +263,34 @@
pp.%22%22%22%0A%0A
+import contextlib%0A
import g
@@ -655,24 +655,157 @@
cwd=cwd)%0A%0A%0A
+@contextlib.contextmanager%0Adef chdir(path):%0A orig = os.getcwd()%0A try:%0A os.chdir(path)%0A yield%0A finally:%0A os.chdir(orig)%0A%0A%0A
def get_pseu
@@ -1685,16 +1685,38 @@
strip()%0A
+ with chdir(root):%0A
target
@@ -1758,16 +1758,18 @@
gebase)%0A
+
git_nu
@@ -1808,16 +1808,18 @@
gets)%0A
+
+
git_numb
@@ -1839,16 +1839,18 @@
argets)%0A
+
return
|
c1a5b9fcb3316ec5d8ff550be68197c583a21b7b
|
extract is_act_as_username method
|
djactasauth/backends.py
|
djactasauth/backends.py
|
# -*- coding: utf-8 -*-
from django.contrib.auth.backends import ModelBackend
from django.contrib.auth import get_user_model
class FilteredModelBackend(ModelBackend):
def get_user(self, user_id):
user = super(FilteredModelBackend, self).get_user(user_id)
return self.filter_user(user)
def authenticate(self, username=None, password=None, **kwargs):
user = super(FilteredModelBackend, self).authenticate(
username=username, password=password, **kwargs)
return self.filter_user(user)
def filter_user(self, user):
if not user:
return user
filters = getattr(self, 'filter_kwargs', None)
if filters:
qs = type(user)._default_manager.filter(
pk=user.pk).filter(**filters)
if not qs.exists():
return None
return user
class ActAsModelBackend(FilteredModelBackend):
sepchar = '/'
def authenticate(self, username=None, password=None, **kwargs):
if self.sepchar in username:
auth_username, act_as_username = username.split(self.sepchar)
else:
auth_username = act_as_username = username
auth_user = super(ActAsModelBackend, self).authenticate(
username=auth_username, password=password, **kwargs)
if not auth_user:
return auth_user
if auth_username != act_as_username:
UserModel = get_user_model()
try:
user = UserModel._default_manager.get_by_natural_key(
act_as_username)
except UserModel.DoesNotExist:
user = None
if not self.can_act_as(auth_user=auth_user, user=user):
return None
else:
user = auth_user
return user
def can_act_as(self, auth_user, user):
return False
class OnlySuperuserCanActAsModelBackend(ActAsModelBackend):
def can_act_as(self, auth_user, user):
return auth_user.is_superuser and not user.is_superuser
|
Python
| 0.999427
|
@@ -936,16 +936,116 @@
= '/'%0A%0A
+ @classmethod%0A def is_act_as_username(cls, username):%0A return cls.sepchar in username%0A%0A
def
@@ -1120,35 +1120,43 @@
if self.
-sepchar in
+is_act_as_username(
username
:%0A
@@ -1139,32 +1139,33 @@
sername(username
+)
:%0A au
|
1e222b72e632e5649d26dc71ab44ef31af7459fe
|
Fix rendering of groups in sidebar that didn't get all the template context passed into it.
|
django_backend/group.py
|
django_backend/group.py
|
from django.forms.forms import pretty_name
from django.template import Context
from django.template.loader import render_to_string
from .compat import context_flatten
class Group(list):
"""
A simplistic representation of backends that are related and should be
displayed as one "group" in the backend (e.g. as one box in the sidebar).
"""
template_name = 'django_backend/_group.html'
def __init__(self, id, name=None, position=0, template_name=None):
self.id = id
if name is None:
name = pretty_name(id)
self.template_name = template_name or self.template_name
self.name = name
self.position = position
super(Group, self).__init__()
@property
def backends(self):
return list(self)
def get_context_data(self, context, **kwargs):
data = {
'group': self,
}
data.update(kwargs)
return data
def get_template_name(self):
return self.template_name
def render(self, context):
context_data = {}
if isinstance(context, Context):
context_data.update(context_flatten(context))
context_data.update(self.get_context_data(context))
return render_to_string(
self.get_template_name(),
self.get_context_data(context))
|
Python
| 0
|
@@ -1179,24 +1179,19 @@
ext_data
-.update(
+ =
self.get
@@ -1203,33 +1203,48 @@
ext_data(context
-)
+, **context_data
)%0A return
@@ -1261,29 +1261,16 @@
_string(
-%0A
self.get
@@ -1290,30 +1290,9 @@
e(),
-%0A self.get_
+
cont
@@ -1299,19 +1299,10 @@
ext_data
-(context)
)%0A
|
bda88dfe6e0a2f16f0c3be74a42cf8783aae1d9e
|
Fix to support django v1.7
|
django_enum_js/views.py
|
django_enum_js/views.py
|
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.safestring import mark_safe
from django_enum_js import enum_wrapper
def enums_js(request):
enums = enum_wrapper.get_json_formatted_enums()
return render_to_response('django_enum_js/enums_js.tpl', { 'enums': mark_safe(enums), }, context_instance=RequestContext(request), mimetype='application/javascript')
|
Python
| 0.000001
|
@@ -386,12 +386,16 @@
t),
-mime
+content_
type
|
6a9ed8867ccaab1284ae999d752de92174de399e
|
fix error message rendering failing due to message being a dict
|
djcelery_email/tasks.py
|
djcelery_email/tasks.py
|
from django.conf import settings
from django.core.mail import get_connection, EmailMessage
from celery.task import task
CONFIG = getattr(settings, 'CELERY_EMAIL_TASK_CONFIG', {})
BACKEND = getattr(settings, 'CELERY_EMAIL_BACKEND',
'django.core.mail.backends.smtp.EmailBackend')
TASK_CONFIG = {
'name': 'djcelery_email_send',
'ignore_result': True,
}
TASK_CONFIG.update(CONFIG)
def from_dict(messagedict):
return EmailMessage(**messagedict)
@task(**TASK_CONFIG)
def send_email(message, **kwargs):
logger = send_email.get_logger()
conn = get_connection(backend=BACKEND,
**kwargs.pop('_backend_init_kwargs', {}))
try:
result = conn.send_messages([from_dict(message)])
logger.debug("Successfully sent email message to %r.", message['to'])
return result
except Exception as e:
# catching all exceptions b/c it could be any number of things
# depending on the backend
logger.warning("Failed to send email message to %r, retrying.",
message.to)
send_email.retry(exc=e)
# backwards compat
SendEmailTask = send_email
|
Python
| 0.000001
|
@@ -1080,11 +1080,14 @@
sage
-.to
+%5B'to'%5D
)%0A
|
81ff4ede4ea6397e6d54020c56cdf8dddcda1485
|
add dg sub-package to sfepy/discrete/setup.py
|
sfepy/discrete/setup.py
|
sfepy/discrete/setup.py
|
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
import os.path as op
auto_name = op.split(op.dirname(__file__))[-1]
config = Configuration(auto_name, parent_package, top_path)
subdirs = [
'common',
'fem',
'iga',
'structural',
]
for subdir in subdirs:
config.add_subpackage(subdir)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
|
Python
| 0
|
@@ -277,16 +277,30 @@
ommon',%0A
+ 'dg',%0A
|
152c77db3e06201d28300ea0c6112c3bb93d150f
|
Change f-string to .format()
|
desktop/core/src/desktop/lib/botserver/views.py
|
desktop/core/src/desktop/lib/botserver/views.py
|
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
from pprint import pprint
from desktop import conf
from django.shortcuts import render
from django.http import HttpResponse
from desktop.lib.django_util import login_notrequired, JsonResponse
from desktop.lib.exceptions_renderable import PopupException
from django.utils.translation import ugettext as _
from django.views.decorators.csrf import csrf_exempt
from django.conf import settings
from slack_sdk import WebClient
LOG = logging.getLogger(__name__)
SLACK_VERIFICATION_TOKEN = conf.SLACK.SLACK_VERIFICATION_TOKEN.get()
SLACK_BOT_USER_TOKEN = conf.SLACK.SLACK_BOT_USER_TOKEN.get()
slack_client = WebClient(token=SLACK_BOT_USER_TOKEN)
appname = "hue_bot"
@login_notrequired
@csrf_exempt
def slack_events(request):
try:
slack_message = json.loads(request.body)
if slack_message['token'] != SLACK_VERIFICATION_TOKEN:
return HttpResponse(status=403)
# challenge verification
if slack_message['type'] == 'url_verification':
response_dict = {"challenge": slack_message['challenge']}
return JsonResponse(response_dict, status=200)
if 'event' in slack_message:
event_message = slack_message['event']
parse_events(event_message)
except Exception as ex:
raise PopupException(_("Response content is not valid JSON"), detail=ex)
return HttpResponse(status=200)
def parse_events(event_message):
user_id = event_message.get('user')
text = event_message.get('text')
channel = event_message.get('channel')
BOT_ID = get_bot_id(appname)
# ignore bot's own message
if BOT_ID == user_id:
return HttpResponse(status=200)
if 'hello hue' in text.lower():
response = say_hi_user(channel, user_id)
if response['ok']:
return HttpResponse(status=200)
else:
raise PopupException(response["error"])
def say_hi_user(channel, user_id):
"""Bot sends Hi<username> message in a specific channel"""
bot_message = f'Hi <@{user_id}> :wave:'
response = slack_client.api_call(api_method='chat.postMessage', json={'channel': channel, 'text': bot_message})
return response
def get_bot_id(botusername):
"""Takes in bot username, Returns the bot id"""
response = slack_client.api_call('users.list')
users = response['members']
for user in users:
if botusername in user.get('name', '') and not user.get('deleted'):
return user.get('id')
|
Python
| 0.005025
|
@@ -2745,23 +2745,15 @@
e =
-f
'Hi %3C@%7B
-user_id
%7D%3E :
@@ -2758,16 +2758,32 @@
:wave:'
+.format(user_id)
%0A respo
|
a57273553db9dc5528acfcaf89f7baf637552d72
|
Fix some mock patch paths
|
ditto/twitter/tests/test_management_commands.py
|
ditto/twitter/tests/test_management_commands.py
|
# coding: utf-8
from mock import patch
from django.core.management import call_command
from django.core.management.base import CommandError
from django.test import TestCase
from django.utils.six import StringIO
from .. import factories
class FetchTwitterTweetsArgs(TestCase):
def test_fail_with_no_args(self):
"Fails when no arguments are provided"
with self.assertRaises(CommandError):
call_command('fetch_twitter_tweets')
def test_fail_with_account_only(self):
"Fails when only an account is provided"
with self.assertRaises(CommandError):
call_command('fetch_twitter_tweets', account='terry')
@patch('ditto.twitter.management.commands.fetch_twitter_tweets.RecentTweetsFetcher')
def test_with_recent(self, fetch_class):
"Calls the correct method when fetching recent tweets"
call_command('fetch_twitter_tweets', '--recent', stdout=StringIO())
fetch_class.assert_called_once_with(screen_name=None)
@patch('ditto.twitter.management.commands.fetch_twitter_tweets.RecentTweetsFetcher')
def test_with_recent_and_account(self, fetch_class):
"Calls the correct method when fetching one account's recent tweets"
call_command('fetch_twitter_tweets', '--recent', account='barbara',
stdout=StringIO())
fetch_class.assert_called_once_with(screen_name='barbara')
@patch('ditto.twitter.management.commands.fetch_twitter_tweets.FavoriteTweetsFetcher')
def test_with_favorites(self, fetch_class):
"Calls the correct method when fetching favorite tweets"
call_command('fetch_twitter_tweets', '--favorites', stdout=StringIO())
fetch_class.assert_called_once_with(screen_name=None)
@patch('ditto.twitter.management.commands.fetch_twitter_tweets.FavoriteTweetsFetcher')
def test_with_favorites_and_account(self, fetch_class):
"Calls the correct method when fetching one account's favorite tweets"
call_command('fetch_twitter_tweets', '--favorites',
account='barbara', stdout=StringIO())
fetch_class.assert_called_once_with(screen_name='barbara')
class FetchTwitterTweetsOutput(TestCase):
@patch('ditto.twitter.fetch.RecentTweetsFetcher.fetch')
def test_success_output(self, fetch_method):
"Responds correctly when recent tweets were successfully fetched"
# What the mocked method will return:
fetch_method.side_effect = [
[{'account': 'philgyford', 'success': True, 'fetched': 23}]
]
out = StringIO()
call_command('fetch_twitter_tweets', '--recent', stdout=out)
self.assertIn('philgyford: Fetched 23 tweets', out.getvalue())
@patch('ditto.twitter.fetch.RecentTweetsFetcher.fetch')
def test_error_output(self, fetch_method):
"Responds correctly when there was an error fetching recent tweets"
# What the mocked method will return:
fetch_method.side_effect = [
[{'account': 'philgyford', 'success': False,
'message': 'It broke'}]
]
out = StringIO()
out_err = StringIO()
call_command('fetch_twitter_tweets', '--recent', stdout=out,
stderr=out_err)
self.assertIn('philgyford: Failed to fetch tweets: It broke',
out_err.getvalue())
#class UpdateAccounts(TestCase):
#@patch('ditto.twitter.fetch.UpdateAccounts.update_all')
#def test_calls_correct_method(self, update_method):
#call_command('update_twitter_accounts', stdout=StringIO())
#update_method.assert_called_once_with()
#@patch('ditto.twitter.fetch.UpdateAccounts.update_all')
#def test_success_output(self, update_method):
#"Responds correctly when accounts were successfully updated"
## What the mocked method will return:
#update_method.side_effect = [
#[{'account': 'philgyford', 'success': True}]
#]
#out = StringIO()
#call_command('update_twitter_accounts', stdout=out)
#self.assertIn('Updated Account "philgyford"', out.getvalue())
#@patch('ditto.twitter.fetch.UpdateAccounts.update_all')
#def test_error_output(self, update_method):
#"Responds correctly when accounts were successfully updated"
## What the mocked method will return:
#update_method.side_effect = [
#[{'account': 'ID: 7', 'success': False, 'message': 'It broke'}]
#]
#out = StringIO()
#out_err = StringIO()
#call_command('update_twitter_accounts', stdout=out, stderr=out_err)
#self.assertIn('Failed to update Account "ID: 7": It broke',
#out_err.getvalue())
|
Python
| 0.000001
|
@@ -2282,37 +2282,72 @@
('ditto.twitter.
-fetch
+management.commands.fetch_twitter_tweets
.RecentTweetsFet
@@ -2839,21 +2839,56 @@
twitter.
-fetch
+management.commands.fetch_twitter_tweets
.RecentT
|
ddb560c398b0a9ba85faec6ba1e15f98bb94f1ad
|
update comments
|
pyWipeRemix.py
|
pyWipeRemix.py
|
#!/usr/bin/env python
#!/usr/bin/env python3
from __future__ import print_function
from builtins import input
""" Python 2.7 & 3.4 disk wiping utility for use on Linux operating systems. RUN AS ROOT. """
import sys # For interpreter variables & associated functions
import os # For operating system dependent functions
import re # For regular expression parsing
""" Define functions """
def osCheck():
""" Check if OS is 'Linux' """
if not sys.platform.startswith('linux'):
print('This program was designed for Linux. Exiting.')
sys.exit()
def printHeader():
""" Header for attached device(s) / partition(s) """
print(22 * "-", "DEVICES & PARTITIONS", 22 * "-")
def listDevices():
""" List mounted device(s) / partition(s) """
header = printHeader()
return os.system('lsblk /dev/sd* --nodeps --output NAME,MODEL,VENDOR,SIZE,TYPE,STATE') # lsblk -d -o NAME,MODEL,VENDOR...
def defineDevice():
""" Prompt user to define device or partition to wipe """
while True:
try:
device = str(input('Enter letter [and number] of device/partition to wipe,\ne.g. to wipe \'/dev/sdb\' enter \'b\': '))
if not re.match("^[a-z]$|^[a-z]\d$", device):
raise ValueError()
return device
except ValueError:
print('Sorry, that\'s not a valid device or partition. Try again.')
def appendDevice():
""" Append user-defined device/partition to /dev/sd """
letter = defineDevice()
return '/dev/sd' + letter
def numberOfWipes():
""" Prompt user for number of wipes to perform """
while True:
try:
wipes = int(input('How many times do you want to wipe the device or partition?: '))
if not wipes > 0:
raise ValueError()
return wipes
except ValueError:
print('Sorry, that\'s not a valid number. Try again.')
def warningMessage():
""" Warning! """
print('WARNING!!! WRITING CHANGES TO DISK WILL RESULT IN IRRECOVERABLE DATA LOSS.')
def confirmWipe():
""" Prompt user to confirm disk erasure """
warning = warningMessage()
while True:
try:
reply = str(input('Are you sure you want to proceed? (Yes/No): ')).lower().strip()
if reply == 'yes':
return True
elif reply == 'no':
print('Exiting pyWype.')
sys.exit()
except ValueError:
print('Sorry, that\'s not a valid entry. Try again: ')
def zerosToDevice():
""" Write zeros to device/partition """
append = appendDevice()
num = numberOfWipes()
confirm = confirmWipe()
passes = 1
for int in range(num):
print('Processing pass count {} of {} ... '.format(passes, num))
os.system(('dd if=/dev/zero |pv --progress --time --rate --bytes| dd of={} bs=4096'.format(append))) # pv -ptrb
passes += 1
def randomToDevice():
""" Write random zeros and ones to device/partition """
append = appendDevice()
num = numberOfWipes()
confirm = confirmWipe()
passes = 1
for int in range(num):
print('Processing pass count {} of {} ...'.format(passes, num))
os.system(('dd if=/dev/random |pv --progress --time --rate --bytes| dd of={} bs=4096'.format(append))) # pv -ptrb
passes += 1
def menu():
""" Menu prompt for use to select program option """
devices = listDevices()
while True:
print(30 * "-", "MENU", 30 * "-")
print('1. Overwrite device or partition with 0\'s \n(faster, less secure).')
print('2. Overwrite device or partition with random 0\'s & 1\'s \n(slower, more secure).')
print('3. Quit.')
choice = input('Select an option (1, 2 or 3): ')
if choice in ('1', '2', '3'):
return choice
def interactiveMode():
""" Display menu-driven options and return conversions. """
while True:
choice = menu()
if choice == '3':
sys.exit()
elif choice == '1':
zerosToDevice()
elif choice == '2':
randomToDevice()
def wipeDevice():
""" Program to Wipe drive """
osCheck()
interactiveMode()
if __name__ == '__main__':
print(28 * '-', " pyWype ", 28 * '-')
print('PYTHON DISK & PARTITION WIPING UTILITY FOR LINUX.\nTHIS WILL IRRECOVERABLY WIPE DATA FROM DRIVE.\nPROCEED WITH CAUTION.')
wipeDevice()
|
Python
| 0
|
@@ -4144,26 +4144,38 @@
nd r
-eturn convers
+un function based on select
ion
-s.
%22%22%22
|
54296c607b735ce06b3420efecb312f52876e012
|
Replace warning message with deprecation warning
|
django_react_templatetags/context_processors.py
|
django_react_templatetags/context_processors.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
def react_context_processor(request):
"""Expose a global list of react components to be processed"""
print("react_context_processor is no longer required.")
return {
'REACT_COMPONENTS': [],
}
|
Python
| 0.999999
|
@@ -40,16 +40,33 @@
-8 -*-%0A%0A
+import warnings%0A%0A
%0Adef rea
@@ -171,14 +171,31 @@
-print(
+warnings.warn(%0A
%22rea
@@ -238,16 +238,41 @@
quired.%22
+, DeprecationWarning%0A
)%0A%0A r
|
1a301f19a754e8bb3dfb1d7697193ccc90d82c33
|
Fix unit tests
|
django_tenants/tests/test_filesystem_storage.py
|
django_tenants/tests/test_filesystem_storage.py
|
import warnings
from django.db import connection
from django.core.files.base import ContentFile
from django.test import override_settings
from django_tenants import utils
from django_tenants.files.storage import TenantFileSystemStorage
from django_tenants.files.storages import TenantFileSystemStorage as OldTenantFileSystemStorage
from django_tenants.test.cases import TenantTestCase
class TenantFileSystemStorageTestCase(TenantTestCase):
def test_deprecated_module_raises_warning(self):
with warnings.catch_warnings(record=True) as warns:
deprecation_warning = "TenantFileSystemStorage has been moved from django_tenants.files.storages " \
"to django_tenants.files.storage."
OldTenantFileSystemStorage()
self.assertTrue(any(deprecation_warning in str(w.message) for w in warns))
@override_settings(MEDIA_ROOT="apps_dir/media",
MEDIA_URL="/media/")
def test_files_are_saved_under_subdirectories_per_tenant(self):
storage = TenantFileSystemStorage()
connection.set_schema_to_public()
tenant1 = utils.get_tenant_model()(schema_name='tenant1')
tenant1.save()
domain1 = utils.get_tenant_domain_model()(tenant=tenant1, domain='something.test.com')
domain1.save()
connection.set_schema_to_public()
tenant2 = utils.get_tenant_model()(schema_name='tenant2')
tenant2.save()
domain2 = utils.get_tenant_domain_model()(tenant=tenant2, domain='example.com')
domain2.save()
# this file should be saved on the public schema
public_file_name = storage.save('hello_world.txt', ContentFile('Hello World'))
public_os_path = storage.path(public_file_name)
public_url = storage.url(public_file_name)
# switch to tenant1
with utils.tenant_context(tenant1):
t1_file_name = storage.save('hello_from_1.txt', ContentFile('Hello T1'))
t1_os_path = storage.path(t1_file_name)
t1_url = storage.url(t1_file_name)
# switch to tenant2
with utils.tenant_context(tenant2):
t2_file_name = storage.save('hello_from_2.txt', ContentFile('Hello T2'))
t2_os_path = storage.path(t2_file_name)
t2_url = storage.url(t2_file_name)
# assert the paths are correct
self.assertTrue(public_os_path.endswith('apps_dir/media/public/%s' % public_file_name))
self.assertTrue(t1_os_path.endswith('apps_dir/media/tenant1/%s' % t1_file_name))
self.assertTrue(t2_os_path.endswith('apps_dir/media/tenant2/%s' % t2_file_name))
# assert urls are correct
self.assertEqual(public_url, '/media/public/%s' % public_file_name)
self.assertEqual(t1_url, '/media/tenant1/%s' % t1_file_name)
self.assertEqual(t2_url, '/media/tenant2/%s' % t2_file_name)
# assert contents are correct
with open(public_os_path, 'r') as f:
self.assertEqual(f.read(), 'Hello World')
with open(t1_os_path, 'r') as f:
self.assertEqual(f.read(), 'Hello T1')
with open(t2_os_path, 'r') as f:
self.assertEqual(f.read(), 'Hello T2')
|
Python
| 0.000005
|
@@ -351,17 +351,22 @@
nts.test
-.
+s.test
cases im
@@ -370,22 +370,20 @@
import
-Tenant
+Base
TestCase
@@ -384,17 +384,16 @@
stCase%0A%0A
-%0A
class Te
@@ -422,22 +422,20 @@
estCase(
-Tenant
+Base
TestCase
|
2b7e0d52a8a8764b66d8698800bf18e8adc9dae7
|
fix crash when running fix_loop_duplicates.py
|
dojo/management/commands/fix_loop_duplicates.py
|
dojo/management/commands/fix_loop_duplicates.py
|
from django.core.management.base import BaseCommand
from pytz import timezone
from dojo.utils import fix_loop_duplicates
locale = timezone(get_system_setting('time_zone'))
"""
Author: Marian Gawron
This script will identify loop dependencies in findings
"""
class Command(BaseCommand):
help = 'No input commands for fixing Loop findings.'
def handle(self, *args, **options):
fix_loop_duplicates()
|
Python
| 0.000003
|
@@ -49,34 +49,8 @@
and%0A
-from pytz import timezone%0A
from
@@ -93,60 +93,8 @@
es%0A%0A
-locale = timezone(get_system_setting('time_zone'))%0A%0A
%22%22%22%0A
|
e776ac5b08fa2a7ce299ec68697d330fb8a02fd5
|
upgrade __version__ in __init__.py to 1.4.0
|
django_nose/__init__.py
|
django_nose/__init__.py
|
VERSION = (1, 3, 0)
__version__ = '.'.join(map(str, VERSION))
from django_nose.runner import *
from django_nose.testcases import *
# Django < 1.2 compatibility.
run_tests = run_gis_tests = NoseTestSuiteRunner
|
Python
| 0.00002
|
@@ -11,9 +11,9 @@
(1,
-3
+4
, 0)
|
eec67d43d208b490c9d219b3c38e586597b1fa73
|
Refactor generate_module_objects
|
pytest_wish.py
|
pytest_wish.py
|
# -*- coding: utf-8 -*-
import importlib
import inspect
import re
import sys
import pytest
def pytest_addoption(parser):
group = parser.getgroup('wish')
group.addoption('--wish-modules', default=(), nargs='+',
help="Space separated list of module names.")
group.addoption('--wish-includes', nargs='+',
help="Space separated list of regexs matching full object names to include.")
group.addoption('--wish-excludes', default=(), nargs='+',
help="Space separated list of regexs matching full object names to exclude.")
group.addoption('--wish-fail', action='store_true', help="Show wish failures.")
def generate_module_objects(module):
for object_name, object_ in inspect.getmembers(module):
obj_module = inspect.getmodule(object_)
if obj_module is not module:
continue
yield object_name, object_
def valid_name(name, include_res, exclude_res):
include_name = any(include_re.match(name) for include_re in include_res)
exclude_name = any(exclude_re.match(name) for exclude_re in exclude_res)
return include_name and not exclude_name
def index_modules(modules, include_patterns, exclude_patterns=()):
include_res = [re.compile(pattern) for pattern in include_patterns]
exclude_res = [re.compile(pattern) for pattern in exclude_patterns]
object_index = {}
for module_name, module in modules.items():
for object_name, object_ in generate_module_objects(module):
full_object_name = '{}:{}'.format(module_name, object_name)
if valid_name(full_object_name, include_res, exclude_res):
object_index[full_object_name] = object_
return object_index
def pytest_generate_tests(metafunc):
if 'wish' not in metafunc.fixturenames:
return
wish_modules = metafunc.config.getoption('wish_modules')
for module_name in wish_modules:
importlib.import_module(module_name)
wish_includes = metafunc.config.getoption('wish_includes') or wish_modules
wish_excludes = metafunc.config.getoption('wish_excludes')
# NOTE: 'copy' is needed here because index_modules may unexpectedly trigger a module load
object_index = index_modules(sys.modules.copy(), wish_includes, wish_excludes)
object_items = sorted(object_index.items())
ids, params = list(zip(*object_items)) or [[], []]
metafunc.parametrize('wish', params, ids=ids, scope='module')
wish_fail = metafunc.config.getoption('wish_fail')
if not wish_fail:
metafunc.function = pytest.mark.xfail(metafunc.function)
|
Python
| 0.000002
|
@@ -784,20 +784,10 @@
-obj_module =
+if
ins
@@ -813,37 +813,11 @@
ct_)
-%0A if obj_module is not
+ is
mod
@@ -829,25 +829,8 @@
-
- continue%0A
|
ee38beca5c4f561b246af8e2affe9f8552b9eb50
|
Change the default DEBUG value to 'False' and update the default database name.
|
django_olcc/settings.py
|
django_olcc/settings.py
|
# Django settings for django_olcc project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'sqlite.db', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'r9z6b+z9s=_poqa)b62a3jop0ovp#1qgd%xdk%1uz4x71g!m^6'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'django_olcc.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
'templates',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'olcc',
'gunicorn',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Import local settings
try:
from settings_local import *
except ImportError:
pass
|
Python
| 0
|
@@ -45,19 +45,20 @@
DEBUG =
-Tru
+Fals
e%0ATEMPLA
@@ -326,22 +326,20 @@
NAME': '
-sqlite
+olcc
.db',
|
93e12746d19161b30e2dade0d71f22242603b0bd
|
Address fix
|
python/test.py
|
python/test.py
|
import math
from roboclaw import Roboclaw
address = 0x80
rc = Roboclaw("/dev/roboclaw",115200)
rc.Open()
version = rc.ReadVersion(address)
if version[0]==False:
print "GETVERSION Failed"
else:
print repr(version[1])
rc.SetM1VelocityPID(rc_address,3000,300,0,708)
rc.SetM2VelocityPID(rc_address,3000,300,0,720)
rc.WriteNVM(address)
nvm=[0,0,0]
rc.ReadNVM(address)
print str(nvm)
clicks = 300
click_vel = 30
ACCELERATION = 10
rc.SpeedAccelDistanceM1M2(address=address,accel=ACCELERATION,speed1=click_vel,distance1=int(abs(clicks/2)),speed2=int(click_vel),distance2=int(abs(clicks/2)),buffer=1)
rc.SpeedAccelDistanceM1M2(address=address,accel=ACCELERATION,speed1=0,distance1=int(abs(clicks/2)),speed2=0,distance2=int(abs(clicks/2)),buffer=0)
buffers = (0,0,0)
while (buffers[1]!=0x80 and buffers[2]!=0x80):
buffers = rc.ReadBuffers(address);
print "Waiting"
print "Stopping"
rc.SpeedAccelDistanceM1M2(address=address,accel=ACCELERATION,speed1=0,distance1=0,speed2=0,distance2=0,buffer=1)
print "Stop done"
|
Python
| 0.000001
|
@@ -223,35 +223,32 @@
etM1VelocityPID(
-rc_
address,3000,300
@@ -279,11 +279,8 @@
PID(
-rc_
addr
|
b860606c2ce654044131228ddfb741c517ab282e
|
make QLibraryInfo.location works
|
qtpy/QtCore.py
|
qtpy/QtCore.py
|
#
# Copyright © 2014-2015 Colin Duquesnoy
# Copyright © 2009- The Spyder Development Team
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
"""
Provides QtCore classes and functions.
"""
from . import PYQT6, PYQT5, PYSIDE2, PYSIDE6, PythonQtError
if PYQT6:
from PyQt6 import QtCore
from PyQt6.QtCore import *
from PyQt6.QtCore import pyqtSignal as Signal
from PyQt6.QtCore import pyqtBoundSignal as SignalInstance
from PyQt6.QtCore import pyqtSlot as Slot
from PyQt6.QtCore import pyqtProperty as Property
from PyQt6.QtCore import QT_VERSION_STR as __version__
# For issue #153
from PyQt6.QtCore import QDateTime
QDateTime.toPython = QDateTime.toPyDateTime
# Map missing methods
QCoreApplication.exec_ = QCoreApplication.exec
QEventLoop.exec_ = QEventLoop.exec
QThread.exec_ = QThread.exec
# Those are imported from `import *`
del pyqtSignal, pyqtBoundSignal, pyqtSlot, pyqtProperty, QT_VERSION_STR
# Allow unscoped access for enums inside the QtCore module
from .enums_compat import promote_enums
promote_enums(QtCore)
del QtCore
elif PYQT5:
from PyQt5.QtCore import *
from PyQt5.QtCore import pyqtSignal as Signal
from PyQt5.QtCore import pyqtBoundSignal as SignalInstance
from PyQt5.QtCore import pyqtSlot as Slot
from PyQt5.QtCore import pyqtProperty as Property
from PyQt5.QtCore import QT_VERSION_STR as __version__
# For issue #153
from PyQt5.QtCore import QDateTime
QDateTime.toPython = QDateTime.toPyDateTime
# Those are imported from `import *`
del pyqtSignal, pyqtBoundSignal, pyqtSlot, pyqtProperty, QT_VERSION_STR
elif PYSIDE6:
from PySide6.QtCore import *
import PySide6.QtCore
__version__ = PySide6.QtCore.__version__
# obsolete in qt6
Qt.BackgroundColorRole = Qt.BackgroundRole
Qt.TextColorRole = Qt.ForegroundRole
Qt.MidButton = Qt.MiddleButton
# Map DeprecationWarning methods
QCoreApplication.exec_ = QCoreApplication.exec
QEventLoop.exec_ = QEventLoop.exec
QThread.exec_ = QThread.exec
QTextStreamManipulator.exec_ = QTextStreamManipulator.exec
elif PYSIDE2:
from PySide2.QtCore import *
try: # may be limited to PySide-5.11a1 only
from PySide2.QtGui import QStringListModel
except Exception:
pass
import PySide2.QtCore
__version__ = PySide2.QtCore.__version__
else:
raise PythonQtError('No Qt bindings could be found')
|
Python
| 0
|
@@ -873,16 +873,68 @@
ead.exec
+%0A %0A QLibraryInfo.location = QLibraryInfo.path
%0A%0A #
|
304bfbe4a77358c766f6fe190a77f863d6915f46
|
Use constants for the log options
|
rccn/config.py
|
rccn/config.py
|
import sys, os, logging, time, re, glob, importlib
import psycopg2
import psycopg2.extras
import sqlite3
import json
import riak
from riak.transports.pbc.transport import RiakPbcTransport
from logging import handlers as loghandlers
from decimal import Decimal
from datetime import date
from config_values import *
class PGEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, date):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
return json.JSONEncoder.default(self, obj)
# Loggers
smlog = loghandlers.RotatingFileHandler(rhizomatica_dir+'/rccn/log/rccn.log', 'a', 104857600, 5)
formatter = logging.Formatter('%(asctime)s => %(name)-7s: %(levelname)-8s %(message)s')
smlog.setFormatter(formatter)
blog = loghandlers.RotatingFileHandler(rhizomatica_dir+'/rccn/log/billing.log', 'a', 104857600, 5)
blog.setFormatter(formatter)
alog = loghandlers.RotatingFileHandler(rhizomatica_dir+'/rccn/log/rapi.log', 'a', 104857600, 5)
alog.setFormatter(formatter)
slog = loghandlers.RotatingFileHandler(rhizomatica_dir+'/rccn/log/subscription.log', 'a', 104857600, 5)
slog.setFormatter(formatter)
smslog = loghandlers.RotatingFileHandler(rhizomatica_dir+'/rccn/log/sms.log', 'a', 104857600, 5)
smslog.setFormatter(formatter)
rlog = loghandlers.RotatingFileHandler(rhizomatica_dir+'/rccn/log/reseller.log', 'a', 104857600, 5)
rlog.setFormatter(formatter)
roaminglog = loghandlers.RotatingFileHandler(rhizomatica_dir+'/rccn/log/roaming.log', 'a', 104857600, 5)
roaminglog.setFormatter(formatter)
logging.basicConfig()
# initialize logger RCCN
log = logging.getLogger('RCCN')
log.addHandler(smlog)
log.setLevel( logging.DEBUG)
# initialize logger BILLING
bill_log = logging.getLogger('RCCN_BILLING')
bill_log.addHandler(blog)
bill_log.setLevel(logging.DEBUG)
# initialize logger API
api_log = logging.getLogger('RCCN_API')
api_log.addHandler(alog)
api_log.setLevel(logging.DEBUG)
# initialize logger RSC
subscription_log = logging.getLogger('RCCN_RSC')
subscription_log.addHandler(slog)
subscription_log.setLevel(logging.DEBUG)
# initialize logger SMS
sms_log = logging.getLogger('RCCN_SMS')
sms_log.addHandler(smslog)
sms_log.setLevel(logging.DEBUG)
# initialize logger RESELLER
res_log = logging.getLogger('RCCN_RESELLER')
res_log.addHandler(rlog)
res_log.setLevel(logging.DEBUG)
# initialize logger ROAMING
roaming_log = logging.getLogger('RCCN_ROAMING')
roaming_log.addHandler(roaminglog)
roaming_log.setLevel(logging.DEBUG)
# Extensions
class ExtensionException(Exception):
pass
extensions_list = []
os.chdir(rhizomatica_dir+'/rccn/extensions/')
files = glob.glob(rhizomatica_dir+'/rccn/extensions/ext_*.py')
for f in files:
file_name = f.rpartition('.')[0]
ext_name = file_name.split('_')[1]
extensions_list.append(ext_name)
# initialize DB handler
db_conn = None
config = {}
try:
db_conn = psycopg2.connect(database=pgsql_db, user=pgsql_user, password=pgsql_pwd, host=pgsql_host)
cur = db_conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cur.execute('SELECT * from site')
site_conf = cur.fetchone()
config['site_name'] = site_conf['site_name']
config['internal_prefix'] = site_conf['postcode']+site_conf['pbxcode']
config['local_ip'] = site_conf['ip_address']
# load SMS shortcode into global config
cur.execute('SELECT smsc_shortcode,sms_sender_unauthorized,sms_destination_unauthorized FROM configuration')
smsc = cur.fetchone()
config['smsc'] = smsc[0]
config['sms_source_unauthorized'] = smsc[1]
config['sms_destination_unauthorized'] = smsc[2]
except psycopg2.DatabaseError as e:
log.error('Database connection error %s' % e)
# Connect to riak
#riak_client = riak.RiakClient(protocol='http', host='127.0.0.1', http_port=8098)
# use protocol buffers
riak_client = riak.RiakClient(pb_port=8087, protocol='pbc')
# load modules
from modules import subscriber
Subscriber = subscriber.Subscriber
SubscriberException = subscriber.SubscriberException
from modules import numbering
Numbering = numbering.Numbering
NumberingException = numbering.NumberingException
from modules import billing
Billing = billing.Billing
from modules import credit
Credit = credit.Credit
CreditException = credit.CreditException
from modules import configuration
Configuration = configuration.Configuration
ConfigurationException = configuration.ConfigurationException
from modules import statistics
CallsStatistics = statistics.CallsStatistics
CostsStatistics = statistics.CostsStatistics
StatisticException = statistics.StatisticException
from modules import sms
SMS = sms.SMS
SMSException = sms.SMSException
from modules import subscription
Subscription = subscription.Subscription
SubscriptionException = subscription.SubscriptionException
from modules import reseller
Reseller = reseller.Reseller
ResellerException = reseller.ResellerException
|
Python
| 0.000001
|
@@ -561,16 +561,64 @@
Loggers%0A
+mode = 'a'%0AmaxBytes = 104857600%0AbackupCount = 5%0A
smlog =
@@ -687,33 +687,43 @@
n.log',
-'a', 104857600, 5
+mode, maxBytes, backupCount
)%0Aformat
@@ -915,33 +915,43 @@
g.log',
-'a', 104857600, 5
+mode, maxBytes, backupCount
)%0Ablog.s
@@ -1051,33 +1051,43 @@
i.log',
-'a', 104857600, 5
+mode, maxBytes, backupCount
)%0Aalog.s
@@ -1195,33 +1195,43 @@
n.log',
-'a', 104857600, 5
+mode, maxBytes, backupCount
)%0Aslog.s
@@ -1332,33 +1332,43 @@
s.log',
-'a', 104857600, 5
+mode, maxBytes, backupCount
)%0Asmslog
@@ -1474,33 +1474,43 @@
r.log',
-'a', 104857600, 5
+mode, maxBytes, backupCount
)%0Arlog.s
@@ -1623,25 +1623,35 @@
g',
-'a', 104857600, 5
+mode, maxBytes, backupCount
)%0Aro
|
34b6d5c04e51e95874141d746dbfc6e16fcca967
|
Use capital letters in all view name words
|
reddit/urls.py
|
reddit/urls.py
|
"""django_reddit URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
import views
urlpatterns = [
url(r'^$', views.frontpage, name="Frontpage"),
url(r'^comments/(?P<thread_id>[0-9]+)$', views.comments, name="Thread"),
url(r'^login/$', views.user_login, name="Login"),
url(r'^logout/$', views.user_logout, name="Logout"),
url(r'^register/$', views.register, name="Register"),
url(r'^submit/$', views.submit, name="Submit"),
url(r'^user/(?P<username>\w+)$', views.user_profile, name="User Profile"),
url(r'^profile/edit/$', views.edit_profile, name="Edit profile"),
url(r'^post/comment/$', views.post_comment, name="Post Comment"),
url(r'^vote/$', views.vote, name="Vote on item"),
url(r'^populate/$', views.test_data, name="Create test data"),
]
|
Python
| 0.00001
|
@@ -1175,17 +1175,17 @@
e=%22Edit
-p
+P
rofile%22)
|
4394515cd5632a7f110993ff75033d407d10861d
|
Fix stray '.' in import statement.
|
doc/cdoc/numpyfilter.py
|
doc/cdoc/numpyfilter.py
|
#!/usr/bin/env python
"""
numpyfilter.py INPUTFILE
Interpret C comments as ReStructuredText, and replace them by the HTML output.
Also, add Doxygen /** and /**< syntax automatically where appropriate.
"""
from __future__ import division, absolute_import
import sys
import re
import os
import textwrap
import optparse
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle.
CACHE_FILE = 'build/rst-cache.pck'
def main():
p = optparse.OptionParser(usage=__doc__.strip())
options, args = p.parse_args()
if len(args) != 1:
p.error("no input file given")
comment_re = re.compile(r'(\n.*?)/\*(.*?)\*/', re.S)
cache = load_cache()
f = open(args[0], 'r')
try:
text = f.read()
text = comment_re.sub(lambda m: process_match(m, cache), text)
sys.stdout.write(text)
finally:
f.close()
save_cache(cache)
def filter_comment(text):
if text.startswith('NUMPY_API'):
text = text[9:].strip()
if text.startswith('UFUNC_API'):
text = text[9:].strip()
html = render_html(text)
return html
def process_match(m, cache=None):
pre, rawtext = m.groups()
preline = pre.split("\n")[-1]
if cache is not None and rawtext in cache:
text = cache[rawtext]
else:
text = re.compile(r'^\s*\*', re.M).sub('', rawtext)
text = textwrap.dedent(text)
text = filter_comment(text)
if cache is not None:
cache[rawtext] = text
if preline.strip():
return pre + "/**< " + text + " */"
else:
return pre + "/** " + text + " */"
def load_cache():
if os.path.exists(CACHE_FILE):
f = open(CACHE_FILE, 'rb')
try:
cache = pickle.load(f)
except:
cache = {}
finally:
f.close()
else:
cache = {}
return cache
def save_cache(cache):
f = open(CACHE_FILE + '.new', 'wb')
try:
pickle.dump(cache, f)
finally:
f.close()
os.rename(CACHE_FILE + '.new', CACHE_FILE)
def render_html(text):
import docutils.parsers.rst
import docutils.writers.html4css1
import docutils.core
docutils.parsers.rst.roles.DEFAULT_INTERPRETED_ROLE = 'title-reference'
writer = docutils.writers.html4css1.Writer()
parts = docutils.core.publish_parts(
text,
writer=writer,
settings_overrides = dict(halt_level=5,
traceback=True,
default_reference_context='title-reference',
stylesheet_path='',
# security settings:
raw_enabled=0,
file_insertion_enabled=0,
_disable_config=1,
)
)
return parts['html_body'].encode('utf-8')
if __name__ == "__main__": main()
|
Python
| 0
|
@@ -395,17 +395,16 @@
s pickle
-.
%0A%0ACACHE_
@@ -1094,20 +1094,16 @@
ml(text)
-
%0A ret
|
21102267231fbdc171b670d62f5ee8baf7d4d4c4
|
Add multiplication operation
|
calc.py
|
calc.py
|
import sys
def add_all(nums):
return sum(nums)
if __name__ == '__main__':
command = sys.argv[1]
nums = map(float, sys.argv[2:])
if command == 'add':
print(add_all(nums))
|
Python
| 0.999777
|
@@ -46,16 +46,85 @@
(nums)%0A%0A
+def multiply_all(nums):%0A return reduce(lambda a, b: a * b, nums)%0A%0A
if __nam
@@ -257,8 +257,74 @@
l(nums))
+%0A elif command == 'multiply':%0A print(multiply_all(nums))
|
f6fdbdc1176cffa0a145170cab583387f26f8649
|
Add module docstring
|
calc.py
|
calc.py
|
import sys
if __name__ == '__main__':
print(sum(map(int, sys.argv[1:])))
|
Python
| 0.000001
|
@@ -1,8 +1,52 @@
+%22%22%22calc.py: A simple python calculator.%22%22%22%0A%0A
import s
|
5223518b1bacdbeaa5f2ac11fede45b30ec15b0e
|
clean up strips
|
cite.py
|
cite.py
|
#!/usr/bin/python3
import sys
from bs4 import BeautifulSoup
import re
from tld import get_tld
import datetime
def soup2dict(soup, dictionary):
"""
Extract info from BeautifulSoup soup into a dictionary. Return a modified
dictionary.
"""
meta = soup.find_all("meta")
for tag in meta:
if tag.get("property") == "og:title":
dictionary["title"] = tag.get("content").strip()
elif tag.get("name") == "title":
dictionary["title"] = tag.get("content").strip()
elif tag.get("name") == "author":
dictionary["author"] = tag.get("content").strip()
elif tag.get("name") == "dat":
dictionary["date"] = tag.get("content").strip()
elif tag.get("name") == "cre":
dictionary["publisher"] = tag.get("content").strip()
elif tag.get("property") == "article:modified_time":
dictionary["date"] = tag.get("content").strip()
elif tag.get("property") == "article:published_time":
dictionary["date"] = tag.get("content").strip()
if "title" not in dictionary and soup.title is not None:
dictionary["title"] = soup.title.string.strip()
#print(soup.find_all("span", class_="author")[0].contents)
#print(soup.find_all("span", class_="date")[0].contents)
s = soup.get_text()
m = re.search(r'By (\w* \w*)', s)
if "author" not in dictionary and m is not None:
dictionary["author"] = m.group(1)
m = re.search(r'((January|February|March|May|June|July|August|September|October|November|December) \d+, \d+|\d+ (January|February|March|May|June|July|August|September|October|November|December) \d+)', s)
if "date" not in dictionary and m is not None:
dictionary["date"] = m.group(0)
if "author" not in dictionary:
author_candidates = []
author_candidates.extend(soup.find_all("div", class_="author"))
author_candidates.extend(soup.find_all("span", class_="author"))
if author_candidates:
dictionary["author"] = author_candidates[0].get_text()
def get_author(dictionary):
return dictionary.get("author")
def get_date(dictionary):
return dictionary.get("date")
def get_title(dictionary):
return dictionary.get("title")
publisher_map = {
"huffingtonpost.com": "The Huffington Post",
"lesswrong.com": "LessWrong",
"nytimes.com": "The New York Times",
"huffingtonpost.ca": "Huffington Post Canada",
"washingtonpost.com": "The Washington Post",
"indiatimes.com": "The Times of India",
"bostonglobe.com": "The Boston Globe",
"mirror.co.uk": "Mirror",
"telegraph.co.uk": "The Telegraph",
"bloomberg.com": "Businessweek",
"ft.com": "Financial Times",
"economist.com": "The Economist",
"arstechnica.com": "Ars Technica",
"wsj.com": "The Wall Street Journal",
"theguardian.com": "The Guardian",
"independent.co.uk": "The Independent",
"theregister.co.uk": "The Register",
}
def get_publisher(dictionary, url):
if get_tld(url) in publisher_map:
return publisher_map[get_tld(url)]
else:
return dictionary.get("publisher")
def get_cite_web(dictionary, url=""):
result = "<ref>{{cite web "
result += "|url=" + url + " "
author = get_author(dictionary)
date = get_date(dictionary)
title = get_title(dictionary)
publisher = get_publisher(dictionary, url)
if author:
result += "|author=" + author + " "
if date:
result += "|date=" + date + " "
if title:
result += "|title=" + title + " "
if publisher:
result += "|publisher=" + publisher + " "
result += "|accessdate=" + datetime.date.today().strftime("%B %-d, %Y")
result = result.strip()
result += "}}</ref>"
return result
if __name__ == "__main__":
soup = BeautifulSoup(sys.stdin, "html.parser")
d = dict()
soup2dict(soup, d)
print(get_cite_web(d, sys.argv[1]))
|
Python
| 0.000001
|
@@ -397,32 +397,24 @@
t(%22content%22)
-.strip()
%0A eli
@@ -491,32 +491,24 @@
t(%22content%22)
-.strip()
%0A eli
@@ -587,32 +587,24 @@
t(%22content%22)
-.strip()
%0A eli
@@ -678,32 +678,24 @@
t(%22content%22)
-.strip()
%0A eli
@@ -774,32 +774,24 @@
t(%22content%22)
-.strip()
%0A eli
@@ -887,32 +887,24 @@
t(%22content%22)
-.strip()
%0A eli
@@ -1005,24 +1005,16 @@
ontent%22)
-.strip()
%0A if
@@ -1114,24 +1114,16 @@
e.string
-.strip()
%0A #pr
@@ -2069,16 +2069,24 @@
author%22)
+.strip()
%0A%0Adef ge
@@ -2138,16 +2138,24 @@
(%22date%22)
+.strip()
%0A%0Adef ge
@@ -2209,16 +2209,24 @@
%22title%22)
+.strip()
%0A%0Apublis
@@ -3175,16 +3175,24 @@
lisher%22)
+.strip()
%0A%0Adef ge
|
72e9bb3ea002f9a86835ef26667de4f0147ba0b3
|
Update at 2017-07-18 23-51-46
|
data.py
|
data.py
|
import json
import random
from pathlib import Path
import numpy as np
from tqdm import tqdm
from utils import *
DATASET = Path('~/dataset/').expanduser()
DIRS = [x for x in DATASET.iterdir() if x.is_dir()]
TRAIN_DIRS = DIRS[:-1]
VAL_DIRS = DIRS[-1:]
IMAGE_TRAIN = Path('npy/image_train')
IMAGE_VAL = Path('npy/image_val')
WINDOW_TRAIN = Path('npy/window_train')
WINDOW_VAL = Path('npy/window_val')
N_IMAGE_TRAIN = 25000
N_IMAGE_VAL = 5000
N_WINDOW_TRAIN = 25000
N_WINDOW_VAL = 2000
TIMESTEPS = 30
def gen_image_npy(video_dirs, target_dir, n_samples):
x_all = []
y_all = []
for video_dir in video_dirs:
imgs = sorted((video_dir / 'frames/').iterdir())
label = json.load((video_dir / 'label.json').open())['label']
x_all.extend(imgs)
y_all.extend(label)
x_use, y_use = sample(x_all, y_all, k=n_samples)
parts = split(x_use, y_use, k=1000)
for idx, x_part, y_part in tqdm(parts):
n = len(x_part)
xs = np.zeros((n, 224, 224, 3), dtype=np.float32)
ys = np.zeros((n, 1), dtype=np.uint8)
for i in range(n):
xs[i] = read_img(x_part[i])
ys[i] = y_part[i]
np.save(str(target_dir / 'x_{:05d}.npy'.format(idx)), xs)
np.save(str(target_dir / 'y_{:05d}.npy'.format(idx)), ys)
del xs, ys
def gen_window_npy(video_dirs, target_dir, n_samples, timesteps):
x_all = []
y_all = []
for video_dir in video_dirs:
n_frames = len(list((video_dir / 'frames/').iterdir()))
labels = read_json(video_dir / 'label.json')['label']
windows = [(video_dir, i, i + timesteps)
for i in range(n_frames - timesteps + 1)]
x_all.extend(windows)
y_all.extend([labels[e - 1] for (_, s, e) in windows])
x_use, y_use = sample(x_all, y_all, k=n_samples)
parts = split(x_use, y_use, k=200)
for idx, x_part, y_part in tqdm(parts):
n = len(x_part)
xs = np.zeros((n, timesteps, 224, 224, 3), dtype=np.float32)
ys = np.zeros((n, 1), dtype=np.uint8)
for i in range(n):
(video_dir, s, e) = x_part[i]
for f in range(s, e):
path = video_dir / 'frames' / '{:08d}.jpg'.format(f)
xs[i][f - s] = read_img(path)
ys[i] = y_part[i]
np.save(str(target_dir / 'x_{:05d}.npy'.format(idx)), xs)
np.save(str(target_dir / 'y_{:05d}.npy'.format(idx)), ys)
del xs, ys
def image_generator(npy_dir, batch_size):
x_paths = sorted(npy_dir.glob('x_*.npy'))
y_paths = sorted(npy_dir.glob('y_*.npy'))
idx = 0
x_batch = np.zeros((batch_size, 224, 224, 3), dtype=np.float32)
y_batch = np.zeros((batch_size, 1), dtype=np.uint8)
while True:
for x_path, y_path in zip(x_paths, y_paths):
x_part = np.load(x_path)
y_part = np.load(y_path)
for x, y in zip(x_part, y_part):
x_batch[idx] = x
y_batch[idx] = y
if idx + 1 == batch_size:
yield x_batch, y_batch
idx = (idx + 1) % batch_size
del x_part, y_part
def window_generator(npy_dir, batch_size):
x_paths = sorted(npy_dir.glob('x_*.npy'))
y_paths = sorted(npy_dir.glob('y_*.npy'))
idx = 0
x_batch = np.zeros((batch_size, TIMESTEPS, 224, 224, 3), dtype=np.float32)
y_batch = np.zeros((batch_size, 1), dtype=np.uint8)
while True:
for x_path, y_path in zip(x_paths, y_paths):
x_part = np.load(x_path)
y_part = np.load(y_path)
for x, y in zip(x_part, y_part):
x_batch[idx] = x
y_batch[idx] = y
if idx + 1 == batch_size:
yield x_batch, y_batch
idx = (idx + 1) % batch_size
del x_part, y_part
image_train_gen = image_generator(IMAGE_TRAIN, 40)
image_val_gen = image_generator(IMAGE_VAL, 40)
window_train_gen = window_generator(WINDOW_TRAIN, 30)
window_val_gen = window_generator(WINDOW_VAL, 30)
if __name__ == '__main__':
for folder in [IMAGE_TRAIN, IMAGE_VAL, WINDOW_TRAIN, WINDOW_VAL]:
folder.mkdir(parents=True, exist_ok=True)
gen_image_npy(TRAIN_DIRS, IMAGE_TRAIN, N_IMAGE_TRAIN)
gen_image_npy(VAL_DIRS, IMAGE_VAL, N_IMAGE_VAL)
gen_window_npy(TRAIN_DIRS, WINDOW_TRAIN, N_WINDOW_TRAIN, TIMESTEPS)
gen_window_npy(VAL_DIRS, WINDOW_VAL, N_WINDOW_VAL, TIMESTEPS)
|
Python
| 0
|
@@ -43,16 +43,42 @@
ort Path
+%0Afrom pprint import pprint
%0A%0Aimport
@@ -4217,16 +4217,116 @@
=True)%0A%0A
+ print('Train data:')%0A pprint(TRAIN_DIRS)%0A print('Validation data:')%0A pprint(VAL_DIRS)%0A%0A
gen_
|
5fd51adbbc136adc28725688c7bf1ecf56e978c1
|
Develop (#105)
|
auth/auth_backend.py
|
auth/auth_backend.py
|
"""
auth_backend.py
Peter Zujko (@zujko)
Defines Django authentication backend for shibboleth.
04/05/17
"""
from django.contrib.auth.models import User
class Attributes():
EDU_AFFILIATION = 'urn:oid:1.3.6.1.4.1.4447.1.41'
FIRST_NAME = 'urn:oid:2.5.4.42'
LAST_NAME = 'urn:oid:2.5.4.4'
USERNAME = 'urn:oid:0.9.2342.19200300.100.1.1'
class SAMLSPBackend(object):
def authenticate(self, request, saml_authentication=None):
if not saml_authentication:
return None
if saml_authentication.is_authenticated():
attributes = saml_authentication.get_attributes()
username = attributes[Attributes.USERNAME][0]
first_name = attributes[Attributes.FIRST_NAME][0]
last_name = attributes[Attributes.LAST_NAME][0]
affiliation = attributes.get(Attributes.EDU_AFFILIATION, ['-1'])
try:
# Grab attributes from shib and auth user
user = User.objects.get(username=username)
except User.DoesNotExist:
# If user does not exist in DB, Create a user object and save to DB
user = User(username=username, email=username + "@rit.edu")
user.set_unusable_password()
user.first_name = first_name
user.last_name = last_name
user.save()
# Set user profile attributes
user.profile.full_name = "{} {}".format(first_name, last_name)
user.profile.display_name = "{}{}".format(
first_name[0], last_name[0])
# Set user Affiliation
user.profile.has_access = 1
if 'Employee' in affiliation or 'Alumni' in affiliation or '-1' in affiliation:
user.profile.has_access = 0
user.profile.save()
return user
return None
def get_user(self, user_id):
try:
return User.objects.get(pk=user_id)
except User.DoesNotExist:
return None
|
Python
| 0
|
@@ -1645,17 +1645,17 @@
ccess =
-1
+0
%0A
@@ -1667,66 +1667,15 @@
if '
-Employee' in affiliation or 'Alumni' in affiliation or '-1
+Student
' in
@@ -1730,17 +1730,17 @@
ccess =
-0
+1
%0A%0A
|
b8f63a7517d6c6189bda0d213ae797c8905868b4
|
add visualization method to see tissue outline in DSA
|
histomicstk/saliency/tests/tissue_detection_test.py
|
histomicstk/saliency/tests/tissue_detection_test.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 18 00:06:28 2019.
@author: mtageld
"""
import unittest
import os
import tempfile
import shutil
from imageio import imread, imwrite
import girder_client
import numpy as np
# from matplotlib import pylab as plt
# from matplotlib.colors import ListedColormap
from histomicstk.saliency.tissue_detection import (
get_slide_thumbnail, get_tissue_mask,
get_tissue_boundary_annotation_documents)
# %%===========================================================================
# Constants & prep work
APIURL = 'http://candygram.neurology.emory.edu:8080/api/v1/'
SAMPLE_SLIDE_ID = "5d586d76bd4404c6b1f286ae"
gc = girder_client.GirderClient(apiUrl=APIURL)
# gc.authenticate(interactive=True)
gc.authenticate(apiKey='kri19nTIGOkWH01TbzRqfohaaDWb6kPecRqGmemb')
savepath = tempfile.mkdtemp()
# %%===========================================================================
# Tests
class TissueDetectionTest(unittest.TestCase):
"""Test methods for getting ROI mask from annotations."""
def test_get_tissue_mask(self):
"""Test get_tissue_mask()."""
thumbnail_rgb = get_slide_thumbnail(gc, SAMPLE_SLIDE_ID)
labeled, mask = get_tissue_mask(
thumbnail_rgb, deconvolve_first=True,
n_thresholding_steps=2, sigma=0., min_size=30)
# # visualize result
# vals = np.random.rand(256,3)
# vals[0, ...] = [0.9, 0.9, 0.9]
# cMap = ListedColormap(1 - vals)
#
# f, ax = plt.subplots(1, 3, figsize=(20, 20))
# ax[0].imshow(thumbnail_rgb)
# ax[1].imshow(labeled, cmap=cMap)
# ax[2].imshow(mask, cmap=cMap)
# plt.show()
self.assertTupleEqual(labeled.shape, (156, 256))
self.assertEqual(len(np.unique(labeled)), 14)
# save for use in the next test
imwrite(os.path.join(
savepath, 'tissue_binmask.png'), np.uint8(0 + (labeled > 0)))
def test_get_tissue_boundary_annotation_documents(self):
"""Test get_tissue_boundary_annotation_documents()."""
labeled = imread(os.path.join(savepath, 'tissue_binmask.png'))
annotation_docs = get_tissue_boundary_annotation_documents(
gc, slide_id=SAMPLE_SLIDE_ID, labeled=labeled)
self.assertTrue('elements' in annotation_docs[0].keys())
self.assertEqual(len(annotation_docs[0]['elements']), 9)
# deleting existing annotations in target slide (if any)
existing_annotations = gc.get('/annotation/item/' + SAMPLE_SLIDE_ID)
for ann in existing_annotations:
gc.delete('/annotation/%s' % ann['_id'])
# post annotations to slide
for doc in annotation_docs:
_ = gc.post("/annotation?itemId=" + SAMPLE_SLIDE_ID, json=doc)
# cleanup
shutil.rmtree(savepath)
def suite():
"""Run chained unit tests in desired order.
See: https://stackoverflow.com/questions/5387299/...
... python-unittest-testcase-execution-order
"""
suite = unittest.TestSuite()
suite.addTest(TissueDetectionTest('test_get_tissue_mask'))
suite.addTest(
TissueDetectionTest('test_get_tissue_boundary_annotation_documents'))
return suite
# %%===========================================================================
if __name__ == '__main__':
runner = unittest.TextTestRunner(failfast=True)
runner.run(suite())
|
Python
| 0
|
@@ -946,16 +946,17 @@
Tests%0A%0A
+%0A
class Ti
@@ -1324,17 +1324,17 @@
eps=
-2
+1
, sigma=
0.,
@@ -1329,18 +1329,19 @@
, sigma=
-0.
+1.5
, min_si
@@ -1815,17 +1815,17 @@
led)), 1
-4
+0
)%0A%0A
|
95ead630018870f293613febc599a50e8c69c792
|
Change in field length
|
hs_core/migrations/0030_resourcefile_file_folder.py
|
hs_core/migrations/0030_resourcefile_file_folder.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hs_core', '0029_auto_20161123_1858'),
]
operations = [
migrations.AddField(
model_name='resourcefile',
name='file_folder',
field=models.CharField(max_length=255, null=True),
),
]
|
Python
| 0.000001
|
@@ -386,11 +386,12 @@
gth=
-255
+4096
, nu
|
1f74e958193b5bf94dc7d7129e08834265d9f82b
|
Remove unused --check
|
pdftables/scripts/render.py
|
pdftables/scripts/render.py
|
#!/usr/bin/env python
"""pdftables-render: obtain pdftables debugging information from pdfs
Usage:
pdftables-render [options] <pdfpath>...
pdftables-render (-h | --help)
pdftables-render --version
pdftables-render --check <pdfpath>
Example page number lists:
<pdfpath> may contain a [:page-number-list].
pdftables-render my.pdf:1
pdftables-render my.pdf:2,5-10,15-
Options:
-h --help Show this screen.
--version Show version.
-D --debug Additional debug information
-O --output-dir=<path> Path to write debug data to
-a --ascii Show ascii table
-p --pprint pprint.pprint() the table
-i --interactive jump into an interactive debugger (ipython)
"""
# Use $ pip install --user --editable pdftables
# to install this util in your path.
import sys
import os
import pdftables
from os.path import basename
from pprint import pprint
from docopt import docopt
from pdftables.pdf_document import PDFDocument
from pdftables.diagnostics import render_page, make_annotations
from pdftables.display import to_string
from pdftables.pdftables import page_to_tables
def main(args=None):
if args is not None:
argv = args
else:
argv = sys.argv[1:]
arguments = docopt(
__doc__,
argv=argv,
version='pdftables-render experimental')
if arguments["--debug"]:
print(arguments)
if arguments["--check"]:
return check(arguments["<pdfpath>"][0])
for pdf_filename in arguments["<pdfpath>"]:
render_pdf(arguments, pdf_filename)
def ensure_dirs():
try:
os.mkdir('png')
except OSError:
pass
try:
os.mkdir('svg')
except OSError:
pass
def parse_page_ranges(range_string, npages):
ranges = range_string.split(',')
result = []
def string_to_pagenumber(s):
if s == "":
return npages
return int(x)
for r in ranges:
if '-' not in r:
result.append(int(r))
else:
# Convert 1-based indices to 0-based and make integer.
points = [string_to_pagenumber(x) for x in r.split('-')]
if len(points) == 2:
start, end = points
else:
raise RuntimeError(
"Malformed range string: {0}"
.format(range_string))
# Plus one because it's (start, end) inclusive
result.extend(xrange(start, end + 1))
# Convert from one based to zero based indices
return [x - 1 for x in result]
def render_pdf(arguments, pdf_filename):
ensure_dirs()
page_range_string = ''
page_set = []
if ':' in pdf_filename:
pdf_filename, page_range_string = pdf_filename.split(':')
doc = PDFDocument.from_path(pdf_filename)
if page_range_string:
page_set = parse_page_ranges(page_range_string, len(doc))
for page_number, page in enumerate(doc.get_pages()):
if page_set and page_number not in page_set:
# Page ranges have been specified by user, and this page not in
continue
svg_file = 'svg/{0}_{1:02d}.svg'.format(
basename(pdf_filename), page_number)
png_file = 'png/{0}_{1:02d}.png'.format(
basename(pdf_filename), page_number)
table_container = page_to_tables(page)
annotations = make_annotations(table_container)
render_page(
pdf_filename, page_number, annotations, svg_file, png_file)
print "Rendered", svg_file, png_file
if arguments["--interactive"]:
from ipdb import set_trace
set_trace()
for table in table_container:
if arguments["--ascii"]:
print to_string(table.data)
if arguments["--pprint"]:
pprint(table.data)
def check(path):
fileobj = open(path, 'rb')
doc = PDFDocument.from_fileobj(fileobj)
tables = page_to_tables(doc.get_page(0))
print tables
|
Python
| 0
|
@@ -207,47 +207,8 @@
sion
-%0A pdftables-render --check %3Cpdfpath%3E
%0A%0AEx
@@ -1451,86 +1451,8 @@
s)%0A%0A
- if arguments%5B%22--check%22%5D:%0A return check(arguments%5B%22%3Cpdfpath%3E%22%5D%5B0%5D)%0A%0A
@@ -3834,158 +3834,4 @@
)%0A%0A%0A
-def check(path):%0A fileobj = open(path, 'rb')%0A doc = PDFDocument.from_fileobj(fileobj)%0A tables = page_to_tables(doc.get_page(0))%0A print tables%0A
|
8e845851f5630cf56562e0b8f9b1241f643052a3
|
Remove resolved TODO (to self)
|
pebble/PblProjectCreator.py
|
pebble/PblProjectCreator.py
|
import os
import string
import uuid
from PblCommand import PblCommand
class PblProjectCreator(PblCommand):
name = 'new-project'
help = 'Create a new Pebble project'
def configure_subparser(self, parser):
parser.add_argument("name", help = "Name of the project you want to create")
parser.add_argument("--javascript", action="store_true", help = "Generate javascript related files")
def run(self, args):
print "Creating new project {}".format(args.name)
# User can give a path to a new project dir
project_path = args.name
project_name = os.path.split(project_path)[1]
project_root = os.path.join(os.getcwd(), project_path)
project_src = os.path.join(project_root, "src")
# Create directories
os.makedirs(project_root)
os.makedirs(os.path.join(project_root, "resources"))
os.makedirs(project_src)
# Create main .c file
with open(os.path.join(project_src, "%s.c" % (project_name)), "w") as f:
f.write(FILE_DUMMY_MAIN)
# Add wscript file
with open(os.path.join(project_root, "wscript"), "w") as f:
f.write(FILE_WSCRIPT)
# Add appinfo.json file
appinfo_dummy = DICT_DUMMY_APPINFO.copy()
appinfo_dummy['uuid'] = str(uuid.uuid4())
appinfo_dummy['project_name'] = project_name
with open(os.path.join(project_root, "appinfo.json"), "w") as f:
f.write(FILE_DUMMY_APPINFO.substitute(**appinfo_dummy))
# Add .gitignore file
with open(os.path.join(project_root, ".gitignore"), "w") as f:
f.write(FILE_GITIGNORE)
if args.javascript:
project_js_src = os.path.join(project_src, "js")
os.makedirs(project_js_src)
with open(os.path.join(project_js_src, "pebble-js-app.js"), "w") as f:
f.write(FILE_DUMMY_JAVASCRIPT_SRC)
FILE_GITIGNORE = """
# Ignore build generated files
build
"""
FILE_WSCRIPT = """
#
# This file is the default set of rules to compile a Pebble project.
#
# Feel free to customize this to your needs.
#
top = '.'
out = 'build'
def options(ctx):
ctx.load('pebble_sdk')
def configure(ctx):
ctx.load('pebble_sdk')
def build(ctx):
ctx.load('pebble_sdk')
ctx.pbl_program(source=ctx.path.ant_glob('src/**/*.c'),
target='pebble-app.elf')
ctx.pbl_bundle(elf='pebble-app.elf',
js=ctx.path.ant_glob('src/js/**/*.js'))
"""
FILE_DUMMY_MAIN = """#include <pebble.h>
static Window *window;
static TextLayer *text_layer;
static void select_click_handler(ClickRecognizerRef recognizer, void *context) {
text_layer_set_text(text_layer, "Select");
}
static void up_click_handler(ClickRecognizerRef recognizer, void *context) {
text_layer_set_text(text_layer, "Up");
}
static void down_click_handler(ClickRecognizerRef recognizer, void *context) {
text_layer_set_text(text_layer, "Down");
}
static void click_config_provider(void *context) {
window_single_click_subscribe(BUTTON_ID_SELECT, select_click_handler);
window_single_click_subscribe(BUTTON_ID_UP, up_click_handler);
window_single_click_subscribe(BUTTON_ID_DOWN, down_click_handler);
}
static void window_load(Window *window) {
Layer *window_layer = window_get_root_layer(window);
GRect bounds = layer_get_bounds(window_layer);
text_layer = text_layer_create((GRect) { .origin = { 0, 72 }, .size = { bounds.size.w, 20 } });
text_layer_set_text(text_layer, "Press a button");
text_layer_set_text_alignment(text_layer, GTextAlignmentCenter);
layer_add_child(window_layer, text_layer_get_layer(text_layer));
}
static void window_unload(Window *window) {
text_layer_destroy(text_layer);
}
static void init(void) {
window = window_create();
window_set_click_config_provider(window, click_config_provider);
window_set_window_handlers(window, (WindowHandlers) {
.load = window_load,
.unload = window_unload,
});
const bool animated = true;
window_stack_push(window, animated);
}
static void deinit(void) {
window_destroy(window);
}
int main(void) {
init();
APP_LOG(APP_LOG_LEVEL_DEBUG, "Done initializing, pushed window: %p", window);
app_event_loop();
deinit();
}
"""
# @ROBERT TODO: how I make subst here?
DICT_DUMMY_APPINFO = {
'company_name': 'MakeAwesomeHappen',
'version_code': 1,
'version_label': '1.0.0',
'is_watchface': 'false',
'app_keys': """{
"dummy": 0
}""",
'resources_media': '[]'
}
FILE_DUMMY_APPINFO = string.Template("""{
"uuid": "${uuid}",
"shortName": "${project_name}",
"longName": "${project_name}",
"companyName": "${company_name}",
"versionCode": ${version_code},
"versionLabel": "${version_label}",
"watchapp": {
"watchface": ${is_watchface}
},
"appKeys": ${app_keys},
"resources": {
"media": ${resources_media}
}
}
""")
FILE_DUMMY_JAVASCRIPT_SRC = """\
Pebble.addEventListener("ready",
function(e) {
console.log("Hello world!", "Sent from your javascript application.")
}
};
"""
class InvalidProjectException(Exception):
pass
class OutdatedProjectException(Exception):
pass
def check_project_directory():
"""Check to see if the current directly matches what is created by PblProjectCreator.run.
Raises an InvalidProjectException or an OutdatedProjectException if everything isn't quite right.
"""
if not os.path.isdir('src') or not os.path.exists('wscript'):
raise InvalidProjectException
if os.path.islink('pebble_app.ld') or os.path.exists('resources/src/resource_map.json'):
raise OutdatedProjectException
def requires_project_dir(func):
def wrapper(self, args):
check_project_directory()
return func(self, args)
return wrapper
|
Python
| 0
|
@@ -4261,47 +4261,8 @@
%22%22%0A%0A
-# @ROBERT TODO: how I make subst here?%0A
DICT
|
57c2fbd6843d7a86d02983378926696cfd519f34
|
remove unnecessary log, and make a save-log more informative
|
jsk_apc2016_common/node_scripts/collect_sib_data.py
|
jsk_apc2016_common/node_scripts/collect_sib_data.py
|
#!/usr/bin/env python
from jsk_apc2016_common.msg import BinInfoArray, SegmentationInBinSync
import rospy
from cv_bridge import CvBridge
import cv2
import numpy as np
import pickle
from time import gmtime, strftime
from sensor_msgs.msg import Image
import os
import threading
class CollectSIBData(object):
"""
"""
def __init__(self):
self.mask_img = None
self.dist_img = None
self.depth_img = None
self.bin_info_dict = {}
self.lock = threading.Lock()
self.bridge = CvBridge()
self.debug_color_pub = rospy.Publisher(
'~debug_color', Image, queue_size=1)
self.debug_depth_pub = rospy.Publisher(
'~debug_depth', Image, queue_size=1)
self.bin_info_sub = rospy.Subscriber(
'~input/bin_info_array', BinInfoArray, self.topic_cb)
self.sync_sub = rospy.Subscriber(
'~input', SegmentationInBinSync, self.save_data_callback)
self.depth_sub = rospy.Subscriber(
'~input/depth', Image, self.depth_cb)
def topic_cb(self, bin_info_arr_msg):
rospy.loginfo('get bin_info')
json_path = rospy.get_param('/visualize_on_rviz/json')
self.layout_name = json_path.split('/')[-1][:-5]
self.bin_info_dict = self.bin_info_array_to_dict(bin_info_arr_msg)
def bin_info_array_to_dict(self, bin_info_array):
bin_info_dict = {}
for bin_ in bin_info_array.array:
bin_info_dict[bin_.name] = bin_
return bin_info_dict
def depth_cb(self, depth_msg):
self.lock.acquire()
self.depth_msg = depth_msg
rospy.loginfo('get depth')
self.depth_img = self.bridge.imgmsg_to_cv2(
depth_msg, "passthrough")
self.lock.release()
def save_data_callback(self, sync_msg):
if self.bin_info_dict == {}:
rospy.loginfo('bin_info_dict is not stored yet')
return
rospy.loginfo(rospy.get_param('~ready_to_save'))
if rospy.get_param('~ready_to_save') is True:
self.lock.acquire()
dist_msg = sync_msg.dist_msg
height_msg = sync_msg.height_msg
color_msg = sync_msg.color_msg
mask_msg = sync_msg.mask_msg
self.mask_img = self.bridge.imgmsg_to_cv2(
mask_msg, "passthrough").astype(np.bool)
self.dist_img = self.bridge.imgmsg_to_cv2(
dist_msg, "passthrough")
self.height_img = self.bridge.imgmsg_to_cv2(
height_msg, "passthrough")
self.color_img = self.bridge.imgmsg_to_cv2(color_msg, "bgr8")
# self.color_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2HSV)
self.target_bin_name = rospy.get_param('~target_bin_name')
self.target_object =\
self.bin_info_dict[self.target_bin_name].target
self.target_bin_info = self.bin_info_dict[self.target_bin_name]
# debug message
self.debug_color_pub.publish(color_msg)
self.debug_depth_pub.publish(self.depth_msg)
rospy.loginfo(self.target_object)
self.save_data()
self.lock.release()
rospy.set_param('~ready_to_save', False)
def save_data(self):
"""Save data
1. Color: 3 channel uint8, BGR
2. Mask Image: bool->uint8, 0 or 255
3. Depth: uint8 (mm)
4. Dist2Shelf: uint8 (mm)
5. Height3D_image: uint8 (mm)
"""
# path of files to save
dir_path = rospy.get_param('~save_dir')
print dir_path
if not os.path.exists(dir_path):
os.makedirs(dir_path)
time = strftime('%Y%m%d%H%M%S', gmtime())
save_path = (dir_path + '/' + self.layout_name + '_' + time + '_bin_' +
self.target_bin_name)
# save data images
mask_img = self.mask_img.astype(np.uint8) * 255
cv2.imwrite(save_path + '.jpg', self.color_img)
cv2.imwrite(save_path + '.pbm', mask_img)
# save data pkl
data = {}
data['target_object'] = self.target_object
data['objects'] = self.target_bin_info.objects
data['dist2shelf_image'] = self.dist_img.astype(np.float16)
data['height3D_image'] = self.height_img.astype(np.float16)
data['depth_image'] = self.depth_img.astype(np.float16)
with open(save_path + '.pkl', 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
# log mesage
rospy.loginfo('saved to {}'.format(save_path))
if __name__ == '__main__':
rospy.init_node('save_data')
# wait until gui button is pressed
seg = CollectSIBData()
rospy.spin()
|
Python
| 0
|
@@ -1096,46 +1096,8 @@
g):%0A
- rospy.loginfo('get bin_info')%0A
@@ -1155,16 +1155,16 @@
/json')%0A
+
@@ -1587,43 +1587,8 @@
msg%0A
- rospy.loginfo('get depth')%0A
@@ -1868,65 +1868,8 @@
rn%0A%0A
- rospy.loginfo(rospy.get_param('~ready_to_save'))%0A
@@ -2967,54 +2967,8 @@
msg)
-%0A rospy.loginfo(self.target_object)
%0A%0A
@@ -4373,32 +4373,83 @@
aved
- to %7B%7D'.format(save_path
+: %7B%7D%5Cn target_object: %7B%7D'.format(%0A save_path, self.target_object
))%0A%0A
|
0fc84d4cefde2446f3fbb2ab77d48c0f557d2496
|
Complete pyinstaller hooks.
|
kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py
|
kivy/tools/packaging/pyinstaller_hooks/hook-kivy.py
|
'''
Kivy hook for PyInstaller
=========================
Kivy load itself in a complete dynamic way. PyImported don't see most of the
import cause of the Factory and Core.
In addition, the data and missing module are not copied automatically.
With this hook, everything needed for running kivy is correctly copied.
Check kivy documentation about how to use theses hook for packaging application.
'''
import kivy
from kivy.factory import Factory
def get_modules():
return [x.get('module', None) for x in Factory.classes.values()]
datas = [
(kivy.kivy_data_dir, 'kivy_install'),
(kivy.kivy_modules_dir, 'kivy_install'),
(kivy.kivy_exts_dir, 'kivy_install'),
]
# extensions
_kivy_modules = [
# pygame
'pygame.event',
'pygame.video',
'pygame.image',
'pygame.display',
'pygame',
# external modules
'kivy.cache',
'kivy.atlas',
'kivy.network',
'kivy.network.urlrequest',
'kivy.lib.osc',
'kivy.lib.osc.OSC',
'kivy.lib.osc.oscAPI',
'kivy.lib.mtdev',
'kivy.factory_registers',
'kivy.input.recorder',
'kivy.input.providers',
'kivy.input.providers.tuio',
'kivy.input.providers.mouse',
'kivy.input.providers.wm_common',
'kivy.input.providers.wm_touch',
'kivy.input.providers.wm_pen',
'kivy.input.providers.hidinput',
'kivy.input.providers.linuxwacom',
'kivy.input.providers.mactouch',
'kivy.input.providers.mouse',
'kivy.input.providers.mtdev',
# compiled modules
'kivy.event',
'kivy.graphics.buffer',
'kivy.graphics.c_opengl_debug',
'kivy.graphics.compiler',
'kivy.graphics.context_instructions',
'kivy.graphics.fbo',
'kivy.graphics.instructions',
'kivy.graphics.opengl',
'kivy.graphics.opengl_utils',
'kivy.graphics.shader',
'kivy.graphics.stenctil_instructions',
'kivy.graphics.texture',
'kivy.graphics.transformation',
'kivy.graphics.vbo',
'kivy.graphics.vertex',
'kivy.graphics.vertex_instructions',
'kivy.properties',
# core
'kivy.core.image.img_pygame',
'kivy.core.image.img_pil',
'kivy.core.audio.audio_gstplayer',
'kivy.core.audio.audio_pygame',
'kivy.core.camera.camera_avfoundation',
'kivy.core.camera.camera_pygst',
'kivy.core.camera.camera_opencv',
'kivy.core.camera.camera_videocapture',
'kivy.core.video.video_gstplayer',
'kivy.core.video.video_pyglet',
'kivy.core.video.video_pygst',
'kivy.core.text.text_pygame',
'kivy.core.text.markup',
'kivy.core.clipboard.clipboard_pygame',
'kivy.core.clipboard.clipboard_dummy',
'kivy.core.window.window_pygame',
]
hiddenimports = _kivy_modules + get_modules()
hiddenimports = list(set(hiddenimports))
|
Python
| 0
|
@@ -2045,24 +2045,29 @@
ore.
-image.img_pygame
+audio.audio_gstplayer
',%0A
@@ -2084,21 +2084,25 @@
ore.
-image.img_pil
+audio.audio_pygst
',%0A
@@ -2123,33 +2123,27 @@
audio.audio_
-gstplayer
+sdl
',%0A 'kivy
@@ -2351,65 +2351,253 @@
ore.
-video.video_gstplayer',%0A 'kivy.core.video.video
+clipboard.clipboard_android',%0A 'kivy.core.clipboard.clipboard_pygame',%0A 'kivy.core.clipboard.clipboard_dummy',%0A 'kivy.core.image.img_imageio',%0A 'kivy.core.image.img_tex',%0A 'kivy.core.image.img_dds',%0A 'kivy.core.image.img
_pyg
-let
+ame
',%0A
@@ -2614,24 +2614,139 @@
ore.
-video.video_pygs
+image.img_pil',%0A 'kivy.core.image.img_gif',%0A 'kivy.core.spelling.spelling_enchant',%0A 'kivy.core.spelling.spelling_osxappki
t',%0A
@@ -2803,14 +2803,19 @@
ext.
-markup
+text_sdlttf
',%0A
@@ -2832,34 +2832,95 @@
ore.
-clipboard.clipboard
+text.text_pil',%0A 'kivy.core.video.video_gstplayer',%0A 'kivy.core.video.video
_pyg
-ame
+st
',%0A
@@ -2937,71 +2937,243 @@
ore.
-clipboard.clipboard_dummy',%0A 'kivy.core.window.window_pygame
+video.video_ffmpeg',%0A 'kivy.core.video.video_pyglet',%0A 'kivy.core.video.video_null',%0A 'kivy.core.window.window_egl_rpi',%0A 'kivy.core.window.window_pygame',%0A 'kivy.core.window.window_sdl',%0A 'kivy.core.window.window_x11
',%0A%5D
|
9b9d6db9d99bec69e61070a743d0b2194c35e375
|
Mark as dead
|
module/plugins/hoster/FreevideoCz.py
|
module/plugins/hoster/FreevideoCz.py
|
# -*- coding: utf-8 -*-
"""
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, see <http://www.gnu.org/licenses/>.
"""
import re
from module.plugins.Hoster import Hoster
from module.network.RequestFactory import getURL
def getInfo(urls):
result = []
for url in urls:
html = getURL(url)
if re.search(FreevideoCz.OFFLINE_PATTERN, html):
# File offline
result.append((url, 0, 1, url))
else:
result.append((url, 0, 2, url))
yield result
class FreevideoCz(Hoster):
__name__ = "FreevideoCz"
__type__ = "hoster"
__pattern__ = r'http://(?:www\.)?freevideo.cz/vase-videa/(.*)\.html'
__version__ = "0.2"
__description__ = """Freevideo.cz hoster plugin"""
__author_name__ = "zoidberg"
__author_mail__ = "zoidberg@mujmail.cz"
OFFLINE_PATTERN = r'<h2 class="red-corner-full">Str.nka nebyla nalezena</h2>'
LINK_PATTERN = r'clip: {\s*url: "([^"]+)"'
def setup(self):
self.multiDL = self.resumeDownload = True
def process(self, pyfile):
self.html = self.load(pyfile.url, decode=True)
if re.search(self.OFFLINE_PATTERN, self.html):
self.offline()
found = re.search(self.LINK_PATTERN, self.html)
if found is None:
self.fail("Parse error (URL)")
download_url = found.group(1)
pyfile.name = re.match(self.__pattern__, pyfile.url).group(1) + ".mp4"
self.download(download_url)
|
Python
| 0.00004
|
@@ -22,1113 +22,166 @@
*-%0A%0A
-%22%22%22%0A This program is free software; you can redistribute it and/or modify%0A it under the terms of the GNU General Public License as published by%0A the Free Software Foundation; either version 3 of the License,%0A or (at your option) any later version.%0A%0A This program is distributed in the hope that it will be useful,%0A but WITHOUT ANY WARRANTY; without even the implied warranty of%0A MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.%0A See the GNU General Public License for more details.%0A%0A You should have received a copy of the GNU General Public License%0A along with this program; if not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%22%22%22%0A%0Aimport re%0Afrom module.plugins.Hoster import Hoster%0Afrom module.network.RequestFactory import getURL%0A%0A%0Adef getInfo(urls):%0A result = %5B%5D%0A%0A for url in urls:%0A%0A html = getURL(url)%0A if re.search(FreevideoCz.OFFLINE_PATTERN, html):%0A # File offline%0A result.append((url, 0, 1, url))%0A else:%0A result.append((url, 0, 2, url))%0A yield result%0A%0A%0Aclass FreevideoCz(Hoster):%0A __name__ = %22FreevideoCz
+from module.plugins.internal.DeadHoster import DeadHoster, create_getInfo%0A%0A%0Aclass FreevideoCz(DeadHoster):%0A __name__ = %22FreevideoCz%22%0A __version__ = %220.3
%22%0A
@@ -202,16 +202,17 @@
hoster%22%0A
+%0A
__pa
@@ -249,16 +249,17 @@
reevideo
+%5C
.cz/vase
@@ -269,43 +269,12 @@
dea/
-(.*)%5C.html'%0A __version__ = %220.2%22
+.+'%0A
%0A
@@ -407,659 +407,42 @@
z%22%0A%0A
- OFFLINE_PATTERN = r'%3Ch2 class=%22red-corner-full%22%3EStr.nka nebyla nalezena%3C/h2%3E'%0A%0A LINK_PATTERN = r'clip: %7B%5Cs*url: %22(%5B%5E%22%5D+)%22'%0A%0A%0A def setup(self):%0A self.multiDL = self.resumeDownload = True%0A%0A def process(self, pyfile):%0A%0A self.html = self.load(pyfile.url, decode=True)%0A%0A if re.search(self.OFFLINE_PATTERN, self.html):%0A self.offline()%0A%0A found = re.search(self.LINK_PATTERN, self.html)%0A if found is None:%0A self.fail(%22Parse error (URL)%22)%0A download_url = found.group(1)%0A%0A pyfile.name = re.match(self.__pattern__, pyfile.url).group(1) + %22.mp4%22%0A%0A self.download(download_url)%0A
+%0AgetInfo = create_getInfo(FreevideoCz)
|
1fed9f26010f24af14abff9444862ed0861adb63
|
Add simplification between parsing and execution
|
thinglang/runner.py
|
thinglang/runner.py
|
from thinglang.execution.execution import ExecutionEngine
from thinglang.lexer.lexer import lexer
from thinglang.parser.parser import parse
def run(source):
if not source:
raise ValueError('Got empty source')
source = source.strip().replace(' ' * 4, '\t')
lexical_groups = list(lexer(source))
root_node = parse(lexical_groups)
with ExecutionEngine(root_node) as engine:
engine.execute()
return engine.results()
|
Python
| 0.000066
|
@@ -133,16 +133,65 @@
t parse%0A
+from thinglang.parser.simplifier import simplify%0A
%0A%0Adef ru
@@ -250,24 +250,30 @@
or('
-Got empty source
+Source cannot be empty
')%0A%0A
@@ -369,24 +369,19 @@
e))%0A
-root_nod
+tre
e = pars
@@ -397,16 +397,47 @@
_groups)
+%0A root_node = simplify(tree)
%0A%0A wi
|
7167b03d3f2b4100b4d7f229e78e74b98f104def
|
version update
|
tibanna/_version.py
|
tibanna/_version.py
|
"""Version information."""
# The following line *must* be the last in the module, exactly as formatted:
__version__ = "1.2.0"
|
Python
| 0.000001
|
@@ -121,7 +121,7 @@
1.2.
-0
+1
%22%0A
|
e57e003b85f0a88ac6e3c19d5765144f95ac9959
|
Increase version to 0.3.2rc
|
tmserver/version.py
|
tmserver/version.py
|
# TmServer - TissueMAPS server application.
# Copyright (C) 2016 Markus D. Herrmann, University of Zurich and Robin Hafen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
__version__ = '0.3.1'
|
Python
| 0.000002
|
@@ -792,12 +792,14 @@
= '0.3.
-1
+2rc
'%0A%0A
|
c09bfe422d6dd705e5e38402dd8754f461fabe59
|
Support filtering by list of jobNo
|
tools/accounting.py
|
tools/accounting.py
|
#!/usr/bin/env python3
"""
collection: gisds.accountinglogs
"""
#--- standard library imports
#
from argparse import ArgumentParser
from datetime import datetime
import os
from pprint import PrettyPrinter
import sys
from time import gmtime, strftime
#--- project specific imports
#
# add lib dir for this pipeline installation to PYTHONPATH
LIB_PATH = os.path.abspath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "..", "lib"))
if LIB_PATH not in sys.path:
sys.path.insert(0, LIB_PATH)
from mongodb import mongodb_conn
__author__ = "LIEW Jun Xian"
__email__ = "liewjx@gis.a-star.edu.sg"
__copyright__ = "2016 Genome Institute of Singapore"
__license__ = "The MIT License (MIT)"
def main():
"""
Main function
"""
instance = ArgumentParser(description=__doc__)
instance.add_argument("-j", "--jobNo", help="filter records by jobNo of jobs")
instance.add_argument("-o", "--owner", help="filter records by owner of jobs")
args = instance.parse_args()
selection = {}
if args.jobNo:
selection["jobs.jobNo"] = args.jobNo
if args.owner:
selection["jobs.owner"] = args.owner
projection = {}
projection["jobs"] = 1
# print("SELECTION:\t" + str(selection))
# print("PROJECTION:\t" + str(projection))
for document in mongodb_conn(False).gisds.accountinglogs.find(selection, projection):
for job in document["jobs"]:
if job["jobNo"] == args.jobNo:
job["ruWallClock"] = strftime("%Hh%Mm%Ss", gmtime(job["ruWallClock"]))
job["submissionTime"] = str(datetime.fromtimestamp(
job["submissionTime"]).isoformat()).replace(":", "-")
PrettyPrinter(indent=2).pprint(job)
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -843,88 +843,16 @@
o%22,
-help=%22filter records by jobNo of jobs%22)%0A instance.add_argument(%22-o%22, %22--owner
+nargs=%22*
%22, h
@@ -874,21 +874,21 @@
ords by
-owner
+jobNo
of jobs
@@ -928,27 +928,8 @@
()%0A%0A
- selection = %7B%7D%0A
@@ -955,64 +955,92 @@
-selection%5B%22jobs.jobNo%22%5D = args.
+projection = %7B%7D%0A projection%5B%22jobs%22%5D = 1%0A for
jobNo
-%0A
i
-f
+n
args.
-owner:%0A
+jobNo:%0A
@@ -1056,86 +1056,67 @@
tion
-%5B%22jobs.owner%22%5D = args.owner%0A%0A projection = %7B%7D%0A projection%5B%22jobs%22%5D = 1%0A%0A#
+ = %7B%7D%0A selection%5B%22jobs.jobNo%22%5D = jobNo%0A#
@@ -1155,16 +1155,24 @@
tion))%0A#
+
prin
@@ -1208,17 +1208,24 @@
ction))%0A
-%0A
+
for
@@ -1306,16 +1306,24 @@
ction):%0A
+
@@ -1363,16 +1363,24 @@
+
+
if job%5B%22
@@ -1390,28 +1390,31 @@
No%22%5D ==
-args.
jobNo:%0A
+
@@ -1500,32 +1500,40 @@
+
+
job%5B%22submissionT
@@ -1568,16 +1568,24 @@
estamp(%0A
+
@@ -1650,16 +1650,24 @@
%22, %22-%22)%0A
+
|
35830058d50e2e5cb8a54cbf166c28b5720b0b81
|
Add tests for empty edges and duplicate edges
|
networkx/testing/tests/test_utils.py
|
networkx/testing/tests/test_utils.py
|
from nose.tools import *
import networkx as nx
from networkx.testing import *
# thanks to numpy for this GenericTest class (numpy/testing/test_utils.py)
class _GenericTest(object):
def _test_equal(self, a, b):
self._assert_func(a, b)
def _test_not_equal(self, a, b):
try:
self._assert_func(a, b)
passed = True
except AssertionError:
pass
else:
raise AssertionError("a and b are found equal but are not")
class TestNodesEqual(_GenericTest):
def setUp(self):
self._assert_func = assert_nodes_equal
def test_nodes_equal(self):
a = [1,2,5,4]
b = [4,5,1,2]
self._test_equal(a,b)
def test_nodes_not_equal(self):
a = [1,2,5,4]
b = [4,5,1,3]
self._test_not_equal(a,b)
def test_nodes_with_data_equal(self):
G = nx.Graph()
G.add_nodes_from([1,2,3],color='red')
H = nx.Graph()
H.add_nodes_from([1,2,3],color='red')
self._test_equal(G.nodes(data=True), H.nodes(data=True))
def test_edges_with_data_not_equal(self):
G = nx.Graph()
G.add_nodes_from([1,2,3],color='red')
H = nx.Graph()
H.add_nodes_from([1,2,3],color='blue')
self._test_not_equal(G.nodes(data=True), H.nodes(data=True))
class TestEdgesEqual(_GenericTest):
def setUp(self):
self._assert_func = assert_edges_equal
def test_edges_equal(self):
a = [(1,2),(5,4)]
b = [(4,5),(1,2)]
self._test_equal(a,b)
def test_edges_not_equal(self):
a = [(1,2),(5,4)]
b = [(4,5),(1,3)]
self._test_not_equal(a,b)
def test_edges_with_data_equal(self):
G = nx.MultiGraph()
G.add_path([0,1,2],weight=1)
H = nx.MultiGraph()
H.add_path([0,1,2],weight=1)
self._test_equal(G.edges(data=True, keys=True),
H.edges(data=True, keys=True))
def test_edges_with_data_not_equal(self):
G = nx.MultiGraph()
G.add_path([0,1,2],weight=1)
H = nx.MultiGraph()
H.add_path([0,1,2],weight=2)
self._test_not_equal(G.edges(data=True, keys=True),
H.edges(data=True, keys=True))
class TestGraphsEqual(_GenericTest):
def setUp(self):
self._assert_func = assert_graphs_equal
def test_graphs_equal(self):
G = nx.path_graph(4)
H = nx.Graph()
H.add_path(range(4))
H.name='path_graph(4)'
self._test_equal(G,H)
def test_digraphs_equal(self):
G = nx.path_graph(4, create_using=nx.DiGraph())
H = nx.DiGraph()
H.add_path(range(4))
H.name='path_graph(4)'
self._test_equal(G,H)
def test_multigraphs_equal(self):
G = nx.path_graph(4, create_using=nx.MultiGraph())
H = nx.MultiGraph()
H.add_path(range(4))
H.name='path_graph(4)'
self._test_equal(G,H)
def test_multigraphs_equal(self):
G = nx.path_graph(4, create_using=nx.MultiDiGraph())
H = nx.MultiDiGraph()
H.add_path(range(4))
H.name='path_graph(4)'
self._test_equal(G,H)
def test_graphs_not_equal(self):
G = nx.path_graph(4)
H = nx.Graph()
H.add_cycle(range(4))
self._test_not_equal(G,H)
def test_graphs_not_equal2(self):
G = nx.path_graph(4)
H = nx.Graph()
H.add_path(range(3))
H.name='path_graph(4)'
self._test_not_equal(G,H)
def test_graphs_not_equal3(self):
G = nx.path_graph(4)
H = nx.Graph()
H.add_path(range(4))
H.name='path_graph(foo)'
self._test_not_equal(G,H)
|
Python
| 0
|
@@ -2243,16 +2243,345 @@
True))%0A%0A
+ def test_no_edges(self):%0A G = nx.MultiGraph()%0A H = nx.MultiGraph()%0A self._test_equal(G.edges(data=True, keys=True),%0A H.edges(data=True, keys=True))%0A%0A%0A def test_duplicate_edges(self):%0A a = %5B(1,2),(5,4),(1,2)%5D%0A b = %5B(4,5),(1,2)%5D%0A self._test_not_equal(a,b)%0A%0A%0A
class Te
|
35268b43a315b2d44d9912159f189e2b80c610cf
|
Remove leftover imports (NC-1469)
|
nodeconductor/cost_tracking/admin.py
|
nodeconductor/cost_tracking/admin.py
|
from django.conf.urls import patterns, url
from django.contrib import admin
from django.contrib.admin import SimpleListFilter
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.db import transaction
from django.shortcuts import redirect
from django.utils.translation import ungettext
from nodeconductor.core.admin import DynamicModelAdmin
from nodeconductor.cost_tracking import models, CostTrackingRegister
from nodeconductor.structure import SupportedServices
from nodeconductor.structure import models as structure_models, admin as structure_admin
def _get_content_type_queryset(models_list):
""" Get list of services content types """
content_type_ids = {c.id for c in ContentType.objects.get_for_models(*models_list).values()}
return ContentType.objects.filter(id__in=content_type_ids)
class PriceListItemAdmin(admin.ModelAdmin):
list_display = ('uuid', 'default_price_list_item', 'service', 'units', 'value')
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "content_type":
kwargs["queryset"] = _get_content_type_queryset(structure_models.Service.get_all_models())
return super(PriceListItemAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
class ResourceTypeFilter(SimpleListFilter):
title = 'resource_type'
parameter_name = 'resource_type'
def lookups(self, request, model_admin):
return [(k, k) for k in SupportedServices.get_resource_models().keys()]
def queryset(self, request, queryset):
if self.value():
model = SupportedServices.get_resource_models().get(self.value(), None)
if model:
return queryset.filter(resource_content_type=ContentType.objects.get_for_model(model))
return queryset
class DefaultPriceListItemAdmin(DynamicModelAdmin,
structure_admin.ChangeReadonlyMixin,
admin.ModelAdmin):
list_display = ('full_name', 'item_type', 'key', 'value', 'monthly_rate', 'resource_type')
list_filter = ('item_type', ResourceTypeFilter)
fields = ('name', ('value', 'monthly_rate'), 'resource_content_type', ('item_type', 'key'))
readonly_fields = ('monthly_rate',)
change_readonly_fields = ('resource_content_type', 'item_type', 'key')
change_list_template = 'admin/core/change_list.html'
def full_name(self, obj):
return obj.name or obj.units or obj.uuid
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "resource_content_type":
kwargs["queryset"] = _get_content_type_queryset(structure_models.ResourceMixin.get_all_models())
return super(DefaultPriceListItemAdmin, self).formfield_for_foreignkey(db_field, request, **kwargs)
def get_extra_actions(self):
return (
super(DefaultPriceListItemAdmin, self).get_extra_actions() +
[self.init_from_registered_applications]
)
def init_from_registered_applications(self, request):
created_items = []
for backend in CostTrackingRegister.get_registered_backends():
try:
default_items = backend.get_default_price_list_items()
except NotImplementedError:
continue
with transaction.atomic():
for default_item in default_items:
item, created = models.DefaultPriceListItem.objects.update_or_create(
resource_content_type=default_item.resource_content_type,
item_type=default_item.item_type,
key=default_item.key,
defaults={
'name': '{}: {}'.format(default_item.item_type, default_item.key),
'metadata': default_item.metadata,
'units': default_item.units
}
)
if created:
item.value = default_item.value
item.save()
created_items.append(item)
if created_items:
message = ungettext(
'Price item was created: {}'.format(created_items[0].name),
'Price items were created: {}'.format(', '.join(item.name for item in created_items)),
len(created_items)
)
self.message_user(request, message)
else:
self.message_user(request, "Price items for all registered applications have been updated")
return redirect(reverse('admin:cost_tracking_defaultpricelistitem_changelist'))
init_from_registered_applications.name = 'Init from registered applications'
class PriceEstimateAdmin(admin.ModelAdmin):
fields = ('content_type', 'object_id', 'total',
('month', 'year'), ('is_manually_input', 'is_visible'))
list_display = ('content_type', 'object_id', 'total', 'month', 'year')
list_filter = ('is_manually_input', 'is_visible')
search_fields = ('month', 'year', 'object_id', 'total')
admin.site.register(models.PriceListItem, PriceListItemAdmin)
admin.site.register(models.DefaultPriceListItem, DefaultPriceListItemAdmin)
admin.site.register(models.PriceEstimate, PriceEstimateAdmin)
|
Python
| 0
|
@@ -1,47 +1,4 @@
-from django.conf.urls import patterns, url%0A
from
|
5ac6c93073c98ea17a0786e6e1a1de3837e460d9
|
Handle RSS feeds for blogs that don't have dates
|
observatory/dashboard/models/Blog.py
|
observatory/dashboard/models/Blog.py
|
# Copyright (c) 2010, Nate Stedman <natesm@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from dashboard.util import find_author
from django.db import models
from lib import feedparser, dateutil
from EventSet import EventSet
# a blog for a project
class Blog(EventSet):
class Meta:
app_label = 'dashboard'
# link to the blog, if it isn't hosted on dashboard
url = models.URLField("Blog Web Address", max_length = 64)
rss = models.URLField("Blog Feed", max_length = 64)
# fetches the posts from the rss feed
def fetch(self):
import BlogPost
# don't fetch internally hosted blogs
if not self.from_feed: return
events = []
# parse and iterate the feed
entries = feedparser.parse(self.rss).entries
for post in entries:
# time manipation is fun
date = dateutil.parser.parse(post.date)
try:
date = (date - date.utcoffset()).replace(tzinfo=None)
except:
pass
# don't re-add old posts
if self.most_recent_date >= date:
continue
try:
content = post.content[0].value
except:
content = post.description
try:
author_name = post.author_detail["name"]
except:
author_name = None
events.append(self.add_event(BlogPost.BlogPost,
title = post.title,
summary = post.description,
from_feed = True,
author_name = author_name,
date = date,
extra_args = {
"external_link": post.link,
"content": content,
"blog_id": self.id
}
))
# find the new most recent date
dates = [event.date for event in events if event is not None]
dates.append(self.most_recent_date)
self.most_recent_date = max(dates)
self.save()
|
Python
| 0
|
@@ -768,16 +768,32 @@
TWARE.%0A%0A
+import datetime%0A
from das
@@ -1485,33 +1485,15 @@
-# time manipation is fun%0A
+try:%0A
@@ -1537,60 +1537,8 @@
ate)
-%0A try:%0A date = (date - date.utcoffset())
.rep
@@ -1581,12 +1581,41 @@
-pass
+date = datetime.datetime.utcnow()
%0A
|
ff30fbd3adef0de27c7b3f690fff1c47c6d42b6a
|
set tree self.depth to minDepth
|
DecisionTree.py
|
DecisionTree.py
|
class Node:
def __init__(self, data=None):
self.data = data
self.left = None
self.right = None
class Tree: # passing (object ) into class is no longer needed in python3
def __init__(self, dataset, minDepth, depth = 3):
self.root = None
self.left = None
self.right = None
self.data = dataset
self.depth = depth
def add_head(self, data):
if self.root == None:
self.root = data #unsafe lol
def add_left_child(self, node, data):
"""possible implementation?"""
if node.left == None: node.left = Node(data)
def add_right_child(self, data):
"""possible implementation?"""
if node.right == None: node.right = Node(data)
def get_data(self, node):
return node.data
def gen_tree(self, depth=self.depth):
""" builds full tree recursively """
if depth == 1:
return Node()
self.root = Node()
self.root.left = gen_tree(depth-1)
self.root.right = gen_tree(depth-1)
return self.root
#gets a random split point for the dataset
def getRandomSplit(dataset):
split = np.transpose(np.matrix(np.zeros(2)))
coordN = np.round(np.random.rand())
coordM = np.floor(dataset.size.m * np.random.rand())
split[coordN, 0] = dataset[coordN + 1, coordM]
return split
#get the best split point for dataset
def getBestGiniSplit(dataset, labelsCount):
return 0
#calculates gini value of a given dataset
def calcGini(histogram, labelsCount):
gini = 0
for i in range(0, histogram.size.m)
gini += histogram[0, i] * histogram[0, i]
gini = 1 - gini
return gini
#returns a 1 * labelCount matrix of histogram data
def getHistogram(dataset, labelsCount):
histogram = np.matrix(np.zeros(labelsCount))
for i in range(dataset.size.m)
j = dataset[0, i]
histogram[0, j] += 1
return histogram
if __name__ == '__main__':
# toy tree demo
root = Tree()
|
Python
| 0
|
@@ -133,70 +133,8 @@
ree:
- # passing (object ) into class is no longer needed in python3
%0A%0A
@@ -175,19 +175,8 @@
epth
-, depth = 3
):%0A
@@ -299,17 +299,20 @@
depth =
-d
+minD
epth%0A%0A
|
3976ad2e9d1ded6d36bced785b88ea186af5b01f
|
add urlencode
|
ItasaFlexGet.py
|
ItasaFlexGet.py
|
import urllib, urllib2, cookielib,urlparse
import os
from contextlib import closing
from flexget.plugin import register_plugin
from BeautifulSoup import BeautifulSoup
BASE_PATH = 'http://www.italiansubs.net/index.php'
class Itasa(object):
"""
rss: http://www.italiansubs.net/index.php?option=com_rsssub... #myitasa or itasa subtitle feed
accept_all: yes #accept all from myitasa
itasa:
username: itasaUsername
password: itasaPassword
path: ~/subtitle/download/folder # absolute or starting from $HOME
"""
def validator(self):
'''validator'''
from flexget import validator
d = validator.factory('dict')
d.accept('text',key='username')
d.accept('text',key='password')
d.accept('text',key='path')
return d
def on_process_start(self, feed):
'''Itasa login, storing cookie'''
self.config = feed.config['itasa']
cj = cookielib.CookieJar()
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
login_data = urllib.urlencode({'username' : self.config['username']
, 'passwd' : self.config['password']
, 'Submit' :'Login'
, 'silent' : True
, 'option' : 'com_user'
, 'task' : 'login'
, 'remember':'yes'})
self.opener.open(BASE_PATH, login_data)
def on_feed_download(self,feed):
'''download zip file'''
for entry in feed.entries:
if entry.get('urls'):
urls = entry.get('urls')
else:
urls = [entry['url']]
for url in urls:
with closing(self.opener.open(url)) as page:
z = self._zip(page)
filename = z.headers.dict['content-disposition'].split('=')[1]
filename = os.path.join(self.config['path'],filename)
filename = os.path.expanduser(filename)
with open(filename,'wb') as f:
f.write(z.read())
def _zip(self,page):
'''extract zip subtitle link from page, open download zip link'''
content = page.read()
start = content.index('<center><a href="')
end = content.index('" rel',start)
url = content[start+17:end]
return self.opener.open(url)
def _post_comment(self,page):
soup = BeautifulSoup(page.read())
form = soup.find(id='jc_commentForm')
arg2_dict = []
for input in form.findAll('input'):
if input.name == 'jc_comment':
arg2_dict.append([input.name,'grazie!!'])
else:
arg2_dict.append([input.name,input.value])
data = { arg2: str(arg2_dict)
, func : "jcxAddComment"
, task : "azrul_ajax"
, no_html: 1
, option : jomcomment}
self.opener.open(page.geturl(),data)
register_plugin(Itasa, 'itasa')
|
Python
| 0.000042
|
@@ -3107,21 +3107,45 @@
eturl(),
+urllib.parse.urlencode(
data)
+)
%0A%0Aregist
|
6ea2a6cf6af7dfdb7767c7961a3fd192d4739f2f
|
Add rudimentary encryption
|
seedbox/models.py
|
seedbox/models.py
|
import os
import pickle
import click
import paramiko
class SeedBox():
"""Simple interface to view recently available files."""
def __init__(self):
self.home_dir = os.path.expanduser('~')
self.config_file = os.path.join(self.home_dir, '.sbconfig')
if not self._has_creds():
yes = click.confirm("You do not have information credentials stored. Would you like to enter them now?")
if yes:
self._add_auth_credentials()
else:
exit()
with open(self.config_file, 'rb') as f:
self.creds = pickle.load(f)
self.hostname = self.creds['hostname']
self.port = int(self.creds['port'])
self.username = self.creds['username']
self.password = self.creds['password']
if not all([self.hostname, self.port, self.username, self.password]):
yes = click.confirm("You do not have information credentials stored. Would you like to enter them now?")
if yes:
self._add_auth_credentials()
else:
exit()
self.conn = None
self.transport = None
def _has_creds(self):
home_dir = os.path.expanduser('~')
if not os.path.exists(os.path.join(home_dir, '.sbconfig')):
return False
return True
def _add_auth_credentials(self):
_creds = {}
_creds['hostname'] = click.prompt('Enter the hostname')
_creds['port'] = click.prompt('Enter the port', type=int)
_creds['username'] = click.prompt('Enter the username')
_creds['password'] = click.prompt('Enter the password (input is hidden)', hide_input=True)
with open(self.config_file, 'wb') as f:
pickle.dump(_creds, f, pickle.HIGHEST_PROTOCOL)
return
def __enter__(self):
return self._login()
def __exit__(self, *args):
self._logout()
def _login(self):
self.transport = paramiko.Transport((self.hostname, self.port))
self.transport.connect(username=self.username, password=self.password)
self.conn = paramiko.SFTPClient.from_transport(self.transport)
return self.conn
def _logout(self):
self.conn.close()
self.transport.close()
|
Python
| 0.999993
|
@@ -1,12 +1,23 @@
+import bz2%0A
import os%0Aim
@@ -598,21 +598,17 @@
-self.
+_
creds =
@@ -638,45 +638,116 @@
-self.hostname = self.creds%5B'hostname'
+_creds = bz2.decompress(_creds)%0A _creds = _creds.split(';;')%0A self.hostname = _creds%5B0
%5D%0A
@@ -776,25 +776,16 @@
int(
-self.
+_
creds%5B
-'port'
+1
%5D)%0A
@@ -815,29 +815,16 @@
e =
-self.
+_
creds%5B
-'username'
+2
%5D%0A
@@ -853,29 +853,16 @@
d =
-self.
+_
creds%5B
-'password'
+3
%5D%0A%0A
@@ -1451,44 +1451,16 @@
-_creds = %7B%7D%0A _creds%5B'
hostname
'%5D =
@@ -1451,26 +1451,24 @@
hostname
-'%5D
= click.pro
@@ -1505,22 +1505,12 @@
-_creds%5B'
port
-'%5D
= c
@@ -1561,24 +1561,16 @@
-_creds%5B'
username
'%5D =
@@ -1565,18 +1565,16 @@
username
-'%5D
= click
@@ -1615,24 +1615,16 @@
-_creds%5B'
password
'%5D =
@@ -1623,10 +1623,8 @@
word
-'%5D
= c
@@ -1691,16 +1691,93 @@
ut=True)
+%0A _creds = %22%7B%7D;;%7B%7D;;%7B%7D;;%7B%7D%22.format(hostname, port, username, password)
%0A%0A
@@ -1842,22 +1842,36 @@
le.dump(
+bz2.compress(
_creds
+)
, f, pic
@@ -1913,118 +1913,8 @@
rn%0A%0A
- def __enter__(self):%0A return self._login()%0A%0A def __exit__(self, *args):%0A self._logout()%0A%0A
@@ -2251,16 +2251,237 @@
ansport.close()%0A
+%0A def __enter__(self):%0A return self._login()%0A%0A def __exit__(self, *args):%0A self._logout()%0A%0A def __enter__(self):%0A return self._login()%0A%0A def __exit__(self, *args):%0A self._logout()%0A%0A
|
2fd677035118b80e4dfb04e380b526b3264492eb
|
Remove debugger (oops)
|
holmes/material.py
|
holmes/material.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from uuid import uuid4
from functools import partial
from collections import defaultdict
from holmes.cli import BaseCLI
from holmes.models.domain import Domain
from holmes.models.page import Page
from holmes.models.violation import Violation
from holmes.utils import get_domain_from_url
def configure_materials(girl, db, config):
import ipdb; ipdb.set_trace() # BREAKPOINT
girl.add_material(
'domains_details',
partial(Domain.get_domains_details, db),
config.MATERIALS_EXPIRATION_IN_SECONDS['domains_details'],
config.MATERIALS_GRACE_PERIOD_IN_SECONDS['domains_details']
)
girl.add_material(
'next_jobs_count',
partial(Page.get_next_jobs_count, db, config),
config.MATERIALS_EXPIRATION_IN_SECONDS['next_jobs_count'],
config.MATERIALS_GRACE_PERIOD_IN_SECONDS['next_jobs_count']
)
girl.add_material(
'violation_count_by_category_for_domains',
partial(Violation.get_group_by_category_id_for_all_domains, db),
config.MATERIALS_EXPIRATION_IN_SECONDS['violation_count_by_category_for_domains'],
config.MATERIALS_GRACE_PERIOD_IN_SECONDS['violation_count_by_category_for_domains']
)
girl.add_material(
'blacklist_domain_count',
partial(MaterialConveyor.get_blacklist_domain_count, db),
config.MATERIALS_EXPIRATION_IN_SECONDS['blacklist_domain_count'],
config.MATERIALS_GRACE_PERIOD_IN_SECONDS['blacklist_domain_count']
)
class MaterialConveyor(object):
@classmethod
def get_blacklist_domain_count(cls, db):
ungrouped = defaultdict(int)
for urls, count in Violation.get_group_by_value_for_key(db, 'blacklist.domains'):
for url in urls:
domain, null = get_domain_from_url(url)
ungrouped[domain] += count
blacklist = sorted(ungrouped.items(), key=lambda xz: -xz[1])
return [dict(zip(('domain', 'count'), x)) for x in blacklist]
class MaterialWorker(BaseCLI):
def initialize(self):
self.uuid = uuid4().hex
self.error_handlers = [handler(self.config) for handler in self.load_error_handlers()]
self.connect_sqlalchemy()
self.connect_to_redis()
self.configure_material_girl()
def do_work(self):
self.info('Running material girl...')
self.girl.run()
def main():
worker = MaterialWorker(sys.argv[1:])
worker.run()
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -388,56 +388,8 @@
g):%0A
- import ipdb; ipdb.set_trace() # BREAKPOINT%0A
|
01b12e4a9994dbbb70fd2b8faea6aa7b9e2c1900
|
fix path in make.py
|
DesktopEditor/xmlsec/src/wasm/module/make.py
|
DesktopEditor/xmlsec/src/wasm/module/make.py
|
#!/usr/bin/env python
import sys
sys.path.append('../../build_tools/scripts')
import base
import os
import codecs
def run_as_bash(file, commands):
if base.is_file(file):
base.delete_file(file)
file_bash = codecs.open(file, "w", "utf-8")
file_bash.write("#!/bin/bash\n")
file_bash.write("\n".join(commands))
file_bash.close()
base.cmd("chmod", ["+x", file])
base.cmd(file)
return
if ("windows" == base.host_platform()):
print("Windows system not supported. Please use Linux or MacOS")
exit(0)
base.configure_common_apps()
base_dir = os.getcwd()
# fetch emsdk
if not base.is_dir("emsdk"):
base.print_info("Fetching emsdk...")
base.cmd("git", ["clone", "https://github.com/emscripten-core/emsdk.git"])
os.chdir(base_dir + "/emsdk")
base.cmd("./emsdk", ["install", "latest"])
base.cmd("./emsdk", ["activate", "latest"])
os.chdir(base_dir)
# fetch freetype
if not base.is_dir("openssl"):
base.print_info("Fetching openssl...")
base.cmd("git", ["clone", "--depth=1", "--branch", "OpenSSL_1_1_1f", "https://github.com/openssl/openssl.git"])
# compile openssl
if not base.is_file(base_dir + "/openssl/libcrypto.a"):
base.print_info("Compile openssl...")
os.chdir(base_dir + "/openssl")
#run_as_bash("./compile_openssl.sh", ["./config no-shared no-asm no-ssl2 no-ssl3", "source ./../emsdk/emsdk_env.sh", "export CC=emcc", "export CXX=emcc", "make"])
run_as_bash("./compile_openssl.sh", ["source ./../emsdk/emsdk_env.sh", "emconfigure ./config no-shared no-asm no-threads", "sed -i 's|^CROSS_COMPILE.*$|CROSS_COMPILE=|g' Makefile", "emmake make build_generated libssl.a libcrypto.a"])
os.chdir(base_dir)
# compile wasm module with bindings
compiler_flags = ["-o openssl.js",
"-O3",
"-fno-exceptions",
"-fno-rtti",
"-s WASM=1",
"-s ALLOW_MEMORY_GROWTH=1",
"-s FILESYSTEM=0",
"-s ENVIRONMENT='web'"]
exported_functions = ["_malloc",
"_free",
"_ASC_Generate_Param",
"_ASC_GetHash"]
sources = ["./openssl/libcrypto.a", "./openssl.c"]
compiler_flags.append("-Iopenssl/include")
# arguments
arguments = ""
for item in compiler_flags:
arguments += (item + " ")
arguments += "-s EXPORTED_FUNCTIONS=\"["
for item in exported_functions:
arguments += ("'" + item + "',")
arguments = arguments[:-1]
arguments += "]\" "
for item in sources:
arguments += (item + " ")
run_as_bash("./compile_module.sh", ["source ./emsdk/emsdk_env.sh", "emcc " + arguments])
# finalize
base.replaceInFile("./openssl.js", "__ATPOSTRUN__=[];", "__ATPOSTRUN__=[function(){window.AscCrypto.onLoadModule();}];")
base.replaceInFile("./openssl.js", "function getBinaryPromise(){", "function getBinaryPromise2(){")
openssl_js_content = base.readFile("./openssl.js")
engine_base_js_content = base.readFile("./engine.js")
engine_js_content = engine_base_js_content.replace("//module", openssl_js_content)
if not base.is_dir("./deploy"):
base.create_dir("./deploy")
# remove previous version
if base.is_file("./deploy/openssl.js"):
base.delete_file("./deploy/openssl.js")
if base.is_file("./deploy/openssl.wasm"):
base.delete_file("./deploy/openssl.wasm")
# write new version
base.writeFile("./deploy/openssl.js", engine_js_content)
base.copy_file("./openssl.wasm", "./deploy/openssl.wasm")
|
Python
| 0.000005
|
@@ -50,16 +50,28 @@
('../../
+../../../../
build_to
|
2fb944f5f069be47ed4560c813daf1fa6e99da81
|
fix some bugs and limit sites for now
|
htdocs/cscap/dl.py
|
htdocs/cscap/dl.py
|
#!/usr/bin/python
"""
"""
import sys
import psycopg2
import psycopg2.extras
import ConfigParser
import cgi
config = ConfigParser.ConfigParser()
config.read('/mesonet/www/apps/iemwebsite/scripts/cscap/mytokens.cfg')
def clean( val ):
''' Clean the value we get '''
if val is None:
return val
if val.strip().lower() == 'did not collect':
return 'DNC'
if val.strip().lower() == 'n/a':
return 'NA'
return val
def check_auth(form):
''' Make sure request is authorized '''
if form.getfirst('hash') != config.get('appauth', 'sharedkey'):
sys.exit()
def get_nitratedata():
''' Fetch some nitrate data, for now '''
pgconn = psycopg2.connect(database='sustainablecorn', host='iemdb',
user='nobody')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
res = "uniqueid,plotid,depth,soil15,soil16,soil23\n"
cursor.execute("""SELECT site, plotid, depth, varname, year, value
from soil_nitrate_data WHERE value is not null""")
data = {}
for row in cursor:
key = "%s|%s|%s|%s" % (row['site'], row['plotid'], row['year'],
row['depth'])
if not data.has_key(key):
data[key] = {}
data[key][row['varname']] = clean(row["value"])
for key in data.keys():
tokens = key.split("|")
res += "%s,%s,%s,%s,%s,%s,%s\n" % (tokens[0], tokens[1],
tokens[2], tokens[3],
data[key].get('SOIL15', ''), data[key].get('SOIL16', ''),
data[key].get('SOIL23', ''))
return res
def get_agdata():
"""
Go to Google and demand my data back!
"""
pgconn = psycopg2.connect(database='sustainablecorn', host='iemdb',
user='nobody')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
cursor.execute("""SELECT site, a.plotid, year, varname, value,
p.rep, p.tillage, p.rotation
from agronomic_data a JOIN plotids p on (p.uniqueid = a.site and
p.plotid = a.plotid) where
varname in ('AGR1', 'AGR2', 'AGR7', 'AGR15', 'AGR16', 'AGR17', 'AGR19')""")
data = {}
for row in cursor:
key = "%s|%s|%s|%s|%s|%s" % (row['site'], row['plotid'], row['year'],
row['rep'], row['tillage'], row['rotation'])
if not data.has_key(key):
data[key] = {}
data[key][row['varname']] = clean(row["value"])
res = ("uniqueid,plotid,year,rep,tillage,rotation,agr1,agr2,agr7,"
+"agr15,agr16,agr17,agr19\n")
for key in data.keys():
tokens = key.split("|")
res += "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n" % (tokens[0], tokens[1],
tokens[2], tokens[3], tokens[4], tokens[5],
data[key].get('AGR1', ''), data[key].get('AGR2', ''),
data[key].get('AGR7', ''), data[key].get('AGR15', ''),
data[key].get('AGR16', ''), data[key].get('AGR16', ''),
data[key].get('AGR17', ''), data[key].get('AGR18', ''))
return res
if __name__ == '__main__':
''' See how we are called '''
form = cgi.FieldStorage()
sys.stdout.write("Content-type: text/plain\n\n")
check_auth(form)
report = form.getfirst('report', 'ag1')
if report == 'ag1':
sys.stdout.write( get_agdata() )
else:
sys.stdout.write( get_nitratedata() )
|
Python
| 0
|
@@ -887,16 +887,21 @@
,plotid,
+year,
depth,so
@@ -922,16 +922,16 @@
il23%5Cn%22%0A
-
curs
@@ -1043,16 +1043,218 @@
not null
+%0A and value ~* '%5B0-9%5C.%5D' and value != '.' and value !~* '%3C'%0A and site in ('MASON', 'KELLOGG', 'GILMORE', 'ISUAG', 'WOOSTER.COV',%0A 'SEPAC', 'BRADFORD.A', 'BRADFORD.B1', 'BRADFORD.B2', 'FREEMAN')
%22%22%22)%0A
@@ -2362,16 +2362,219 @@
'AGR19')
+%0A and value ~* '%5B0-9%5C.%5D' and value != '.' and value !~* '%3C'%0A and site in ('MASON', 'KELLOGG', 'GILMORE', 'ISUAG', 'WOOSTER.COV',%0A 'SEPAC', 'BRADFORD.A', 'BRADFORD.B1', 'BRADFORD.B2', 'FREEMAN')
%22%22%22)%0A
@@ -3097,35 +3097,32 @@
s,%25s,%25s,%25s,%25s,%25s
-,%25s
%5Cn%22 %25 (tokens%5B0%5D
@@ -3395,25 +3395,25 @@
y%5D.get('AGR1
-6
+7
', ''),%0A
@@ -3447,37 +3447,9 @@
AGR1
-7', ''), data%5Bkey%5D.get('AGR18
+9
', '
|
b1d83fc13ec2d71e78fea406f76c48d5cc528f46
|
Fix imports
|
spacy/ml/models/parser.py
|
spacy/ml/models/parser.py
|
from typing import Optional
from thinc.api import Model, chain, list2array, Linear, zero_init, use_ops
from ...util import registry
from .._precomputable_affine import PrecomputableAffine
from ..tb_framework import TransitionModel
@registry.architectures.register("spacy.TransitionBasedParser.v1")
def build_tb_parser_model(
tok2vec: Model[List[Doc], List[Floats2d]],
nr_feature_tokens: int,
hidden_width: int,
maxout_pieces: int,
use_upper: bool = True,
nO: Optional[int] = None,
) -> Model:
"""
Build a transition-based parser model. Can apply to NER or dependency-parsing.
Transition-based parsing is an approach to structured prediction where the
task of predicting the structure is mapped to a series of state transitions.
You might find this tutorial helpful as background:
https://explosion.ai/blog/parsing-english-in-python
The neural network state prediction model consists of either two or three
subnetworks:
* tok2vec: Map each token into a vector representations. This subnetwork
is run once for each batch.
* lower: Construct a feature-specific vector for each (token, feature) pair.
This is also run once for each batch. Constructing the state
representation is then simply a matter of summing the component features
and applying the non-linearity.
* upper (optional): A feed-forward network that predicts scores from the
state representation. If not present, the output from the lower model is
ued as action scores directly.
tok2vec (Model[List[Doc], List[Floats2d]]):
Subnetwork to map tokens into vector representations.
nr_feature_tokens (int): The number of tokens in the context to use to
construct the state vector. Valid choices are 1, 2, 3, 6, 8 and 13. The
2, 8 and 13 feature sets are designed for the parser, while the 3 and 6
feature sets are designed for the NER. The recommended feature sets are
3 for NER, and 8 for the dependency parser.
TODO: This feature should be split into two, state_type: ["deps", "ner"]
and extra_state_features: [True, False]. This would map into:
(deps, False): 8
(deps, True): 13
(ner, False): 3
(ner, True): 6
hidden_width (int): The width of the hidden layer.
maxout_pieces (int): How many pieces to use in the state prediction layer.
Recommended values are 1, 2 or 3. If 1, the maxout non-linearity
is replaced with a ReLu non-linearity if use_upper=True, and no
non-linearity if use_upper=False.
use_upper (bool): Whether to use an additional hidden layer after the state
vector in order to predict the action scores. It is recommended to set
this to False for large pretrained models such as transformers, and False
for smaller networks. The upper layer is computed on CPU, which becomes
a bottleneck on larger GPU-based models, where it's also less necessary.
nO (int or None): The number of actions the model will predict between.
Usually inferred from data at the beginning of training, or loaded from
disk.
"""
t2v_width = tok2vec.get_dim("nO") if tok2vec.has_dim("nO") else None
tok2vec = chain(tok2vec, list2array(), Linear(hidden_width, t2v_width),)
tok2vec.set_dim("nO", hidden_width)
lower = PrecomputableAffine(
nO=hidden_width if use_upper else nO,
nF=nr_feature_tokens,
nI=tok2vec.get_dim("nO"),
nP=maxout_pieces,
)
if use_upper:
with use_ops("numpy"):
# Initialize weights at zero, as it's a classification layer.
upper = Linear(nO=nO, init_W=zero_init)
else:
upper = None
return TransitionModel(tok2vec, lower, upper)
|
Python
| 0.000002
|
@@ -20,16 +20,22 @@
Optional
+, List
%0Afrom th
@@ -101,16 +101,49 @@
use_ops
+%0Afrom thinc.types import Floats2d
%0A%0Afrom .
|
a5bef7ac44a688b9d4493c28210a1a3fbcb64ffe
|
Fix channel comparison with # prefix
|
slackclient/_channel.py
|
slackclient/_channel.py
|
class Channel(object):
def __init__(self, server, name, channel_id, members=None):
self.server = server
self.name = name
self.id = channel_id
self.members = [] if members is None else members
def __eq__(self, compare_str):
if self.name == compare_str or self.name == "#" + compare_str or self.id == compare_str:
return True
else:
return False
def __str__(self):
data = ""
for key in list(self.__dict__.keys()):
data += "{0} : {1}\n".format(key, str(self.__dict__[key])[:40])
return data
def __repr__(self):
return self.__str__()
def send_message(self, message):
message_json = {"type": "message", "channel": self.id, "text": message}
self.server.send_to_websocket(message_json)
|
Python
| 0
|
@@ -291,24 +291,30 @@
mpare_str or
+ %22#%22 +
self.name =
@@ -314,22 +314,16 @@
.name ==
- %22#%22 +
compare
|
a2a652620fa4d7504baa42f08fc80bd2a7db1341
|
Make frozendict peristently-hasheable
|
edgedb/lang/common/datastructures/immutables.py
|
edgedb/lang/common/datastructures/immutables.py
|
##
# Copyright (c) 2008-2010 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import abc
import collections
class ImmutableMeta(type):
def __new__(mcls, name, bases, dct):
if '_shadowed_methods_' in dct:
shadowed = dct['_shadowed_methods_']
del dct['_shadowed_methods_']
for method in shadowed:
def meth(self, *args, _allow_mutation_=False, **kwargs):
if not _allow_mutation_:
raise TypeError('%r is immutable' % self.__class__.__name__)
return super()[method](*args, **kwargs)
meth.__name__ = method
dct[method] = meth
return super().__new__(mcls, name, bases, dct)
class frozendict(dict, metaclass=ImmutableMeta):
"""Immutable dict (like ``frozenset`` for ``set``.)"""
_shadowed_methods_ = ('__setitem__', '__delitem__', 'update', 'clear',
'pop', 'popitem', 'setdefault')
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, super().__repr__())
__eq__ = dict.__eq__
def __hash__(self):
return hash(frozenset(self.items()))
|
Python
| 0.000004
|
@@ -21,16 +21,22 @@
008-2010
+, 2014
Sprymix
@@ -129,16 +129,83 @@
ctions%0A%0A
+from metamagic.utils.algos.persistent_hash import persistent_hash%0A%0A
%0Aclass I
@@ -1345,8 +1345,96 @@
ems()))%0A
+%0A def persistent_hash(self):%0A return persistent_hash(frozenset(self.items()))%0A
|
4cda993213fce2b4567ba31f2dc6a116445ce664
|
rollback on dummy database now has no effect (previously raised an error). This means that custom 500 error pages (and e-mailed exceptions) now work even if a database has not been configured. Fixes #4429.
|
django/db/backends/dummy/base.py
|
django/db/backends/dummy/base.py
|
"""
Dummy database backend for Django.
Django uses this if the DATABASE_ENGINE setting is empty (None or empty string).
Each of these API functions, except connection.close(), raises
ImproperlyConfigured.
"""
from django.core.exceptions import ImproperlyConfigured
def complain(*args, **kwargs):
raise ImproperlyConfigured, "You haven't set the DATABASE_ENGINE setting yet."
class DatabaseError(Exception):
pass
class IntegrityError(DatabaseError):
pass
class DatabaseWrapper:
cursor = complain
_commit = complain
_rollback = complain
def __init__(self, **kwargs):
pass
def close(self):
pass # close()
supports_constraints = False
quote_name = complain
dictfetchone = complain
dictfetchmany = complain
dictfetchall = complain
get_last_insert_id = complain
get_date_extract_sql = complain
get_date_trunc_sql = complain
get_limit_offset_sql = complain
get_random_function_sql = complain
get_deferrable_sql = complain
get_fulltext_search_sql = complain
get_drop_foreignkey_sql = complain
get_sql_flush = complain
get_sql_sequence_reset = complain
OPERATOR_MAPPING = {}
|
Python
| 0.000004
|
@@ -377,16 +377,55 @@
yet.%22%0A%0A
+def ignore(*args, **kwargs):%0A pass%0A%0A
class Da
@@ -589,24 +589,22 @@
lback =
-complain
+ignore
%0A%0A de
|
1ec9d3b5d7a2fdfd6e7d0e763c95e1a3117cd96d
|
Update middleware to be django1.10-compatible
|
django_user_agents/middleware.py
|
django_user_agents/middleware.py
|
from django.utils.functional import SimpleLazyObject
from .utils import get_user_agent
class UserAgentMiddleware(object):
# A middleware that adds a "user_agent" object to request
def process_request(self, request):
request.user_agent = SimpleLazyObject(lambda: get_user_agent(request))
|
Python
| 0.000603
|
@@ -45,16 +45,69 @@
zyObject
+%0Afrom django.utils.deprecation import MiddlewareMixin
%0A%0Afrom .
@@ -162,22 +162,31 @@
dleware(
-object
+MiddlewareMixin
):%0A #
|
27ebfe1f8b5b258dfb9db6fe8148bf22fe3a9c91
|
fix start parameter
|
djangosolr/documents/queryset.py
|
djangosolr/documents/queryset.py
|
from djangosolr.documents.query import Query
from djangosolr import solr
class QuerySet(object):
def __init__(self, model, query=None):
self._model = model
self._query = query or Query()
self._responses = []
self._responses_more = True
self._result_cache = None
self._iter = None
def _get_responses(self):
for response in self._responses:
yield response
rows = 10 if self._query._rows is None else self._query._rows
start = len(self._responses) * rows
while self._responses_more:
query = self._query.clone()
query.set_limits(start, start + rows)
response = solr.select(query.get_query_string(self._model._meta))
start += rows
self._responses.append(response)
self._responses_more = len(response['response']['docs']) == rows
yield response
def _get_response(self):
return self._get_responses().next()
response = property(_get_response)
def _clone(self):
return QuerySet(self._model, self._query.clone())
def __len__(self):
if self._result_cache is None:
if self._iter:
self._result_cache = list(self._iter)
else:
self._result_cache = list(self.iterator())
elif self._iter:
self._result_cache.extend(self._iter)
return len(self._result_cache)
def __iter__(self):
if self._result_cache is None:
self._iter = self.iterator()
self._result_cache = []
if self._iter:
return self._result_iter()
return iter(self._result_cache)
def _result_iter(self):
pos = 0
while 1:
upper = len(self._result_cache)
while pos < upper:
yield self._result_cache[pos]
pos = pos + 1
if not self._iter:
raise StopIteration
if len(self._result_cache) <= pos:
self._fill_cache()
def _fill_cache(self, num=None):
if self._iter:
try:
for _ in range(num or 20):
self._result_cache.append(self._iter.next())
except StopIteration:
self._iter = None
def iterator(self):
for response in self._get_responses():
for doc in response['response']['docs']:
yield self._model.create(doc)
def __nonzero__(self):
if self._result_cache is not None:
return bool(self._result_cache)
try:
iter(self).next()
except StopIteration:
return False
return True
def __getitem__(self, k):
if not isinstance(k, (slice, int, long)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0))
or (isinstance(k, slice) and (k.start is None or k.start >= 0)
and (k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
if self._iter is not None:
if isinstance(k, slice):
if k.stop is not None:
bound = int(k.stop)
else:
bound = None
else:
bound = k + 1
if len(self._result_cache) < bound:
self._fill_cache(bound - len(self._result_cache))
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs._query.set_limits(start, stop)
return k.step and list(qs)[::k.step] or qs
try:
qs = self._clone()
qs._query.set_limits(k, k + 1)
return list(qs)[0]
except:
raise IndexError(0)
def sort(self, *fields):
clone = self._clone()
clone._query.sort(*fields)
return clone
def fl(self, *fields):
clone = self._clone()
clone._query.fl(*fields)
return clone
def count(self):
return self.response['response']['numFound']
def raw(self, **kwargs):
clone = self._clone()
clone._query.raw(**kwargs)
return clone
def q(self, *qs, **filters):
clone = self._clone()
clone._query.q(*qs, **filters)
return clone
def fq(self, *qs, **filters):
clone = self._clone()
clone._query.fq(*qs, **filters)
return clone
|
Python
| 0.000002
|
@@ -547,24 +547,78 @@
nses) * rows
+ if self._query._start is None else self._query._start
%0A whi
|
9fe4b5fee790b7e21eb5810176a2cfa49abde7b2
|
Create author obj only for active users
|
doc/deployer/create_auth_objs.py
|
doc/deployer/create_auth_objs.py
|
from gnowsys_ndf.ndf.models import *
from django.contrib.auth.models import User
all_users = User.objects.all()
auth_gst = node_collection.one({'_type': u'GSystemType', 'name': u'Author'})
new_auth_instances = 0
for each_user in all_users:
auth = node_collection.one({'_type': u"Author", 'created_by': int(each_user.id)})
# This will create user document in Author collection to behave user as a group.
if auth is None:
print "\n Creating new Author obj for ",each_user.username
auth = node_collection.collection.Author()
auth.name = unicode(each_user.username)
auth.email = unicode(each_user.email)
auth.password = u""
auth.member_of.append(auth_gst._id)
auth.group_type = u"PUBLIC"
auth.edit_policy = u"NON_EDITABLE"
auth.subscription_policy = u"OPEN"
auth.created_by = each_user.id
auth.modified_by = each_user.id
auth.contributors.append(each_user.id)
auth.group_admin.append(each_user.id)
auth.preferred_languages = {'primary': ('en', 'English')}
auth.agency_type = "Student"
auth_id = ObjectId()
auth._id = auth_id
auth.save(groupid=auth._id)
home_group_obj = node_collection.one({'_type': u"Group", 'name': unicode("home")})
if each_user.id not in home_group_obj.author_set:
node_collection.collection.update({'_id': home_group_obj._id}, {'$push': {'author_set': each_user.id }}, upsert=False, multi=False)
home_group_obj.reload()
desk_group_obj = node_collection.one({'_type': u"Group", 'name': unicode("desk")})
if desk_group_obj and each_user.id not in desk_group_obj.author_set:
node_collection.collection.update({'_id': desk_group_obj._id}, {'$push': {'author_set': each_user.id }}, upsert=False, multi=False)
desk_group_obj.reload()
new_auth_instances = new_auth_instances + 1
print "\n Total Author objects created: ", new_auth_instances
|
Python
| 0
|
@@ -414,16 +414,40 @@
is None
+ and each_user.is_active
:%0A%09%09prin
|
69f28c471935d5e8136a4b32f51310f1f46046f0
|
set lower for envvar keys
|
docku/build/__init__.py
|
docku/build/__init__.py
|
import json
import os
class BuildConfig(dict):
def __init__(self, path):
cc = {}
with open(path) as fh:
cc = json.load(fh)
super().__init__(cc)
self.populate_envvars()
def populate_envvars(self):
keys = ['BINTRAY_TOKEN', 'BINTRAY_USER', 'BINTRAY_REPO']
for key in keys:
value = os.getenv(key)
if value:
self[key] = value
|
Python
| 0.000002
|
@@ -414,16 +414,24 @@
self%5Bkey
+.lower()
%5D = valu
|
b289569a228ff574f2c469d0d2a7fbb019c19c9e
|
Update version
|
snipsskills/__init__.py
|
snipsskills/__init__.py
|
# -*-: coding utf-8 -*-
""" snipsskills module """
__version__ = '0.1.4.934'
|
Python
| 0
|
@@ -67,11 +67,11 @@
0.1.4.93
-4
+5
'%0A
|
5ad52608a470796ac09d3d675c54aee234a9d8ef
|
修改python 空格和tab混用的问题;
|
bsp/stm32/stm32f767-fire-challenger/rtconfig.py
|
bsp/stm32/stm32f767-fire-challenger/rtconfig.py
|
import os
# toolchains options
ARCH='arm'
CPU='cortex-m7'
CROSS_TOOL='gcc'
# bsp lib config
BSP_LIBRARY_TYPE = None
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
# cross_tool provides the cross compiler
# EXEC_PATH is the compiler execute path, for example, CodeSourcery, Keil MDK, IAR
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'C:\Users\XXYYZZ'
elif CROSS_TOOL == 'keil':
PLATFORM = 'armcc'
EXEC_PATH = r'C:/Keil_v5'
elif CROSS_TOOL == 'iar':
PLATFORM = 'iar'
EXEC_PATH = r'C:/Program Files (x86)/IAR Systems/Embedded Workbench 8.0'
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'debug'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'arm-none-eabi-'
CC = PREFIX + 'gcc'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
CXX = PREFIX + 'g++'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcpu=cortex-m7 -mthumb -mfpu=fpv5-d16 -mfloat-abi=hard -ffunction-sections -fdata-sections'
CFLAGS = DEVICE + ' -Dgcc'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp -Wa,-mimplicit-it=thumb '
LFLAGS = DEVICE + ' -Wl,--gc-sections,-Map=rt-thread.map,-cref,-u,Reset_Handler -T board/linker_scripts/link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -gdwarf-2 -g'
AFLAGS += ' -gdwarf-2'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
elif PLATFORM == 'armcc':
# toolchains
CC = 'armcc'
CXX = 'armcc'
AS = 'armasm'
AR = 'armar'
LINK = 'armlink'
TARGET_EXT = 'axf'
DEVICE = ' --cpu Cortex-M7.fp.sp'
CFLAGS = '-c ' + DEVICE + ' --apcs=interwork --c99'
AFLAGS = DEVICE + ' --apcs=interwork '
LFLAGS = DEVICE + ' --scatter "board\linker_scripts\link.sct" --info sizes --info totals --info unused --info veneers --list rt-thread.map --strict'
CFLAGS += ' -I' + EXEC_PATH + '/ARM/ARMCC/include'
LFLAGS += ' --libpath=' + EXEC_PATH + '/ARM/ARMCC/lib'
CFLAGS += ' -D__MICROLIB '
AFLAGS += ' --pd "__MICROLIB SETA 1" '
LFLAGS += ' --library_type=microlib '
EXEC_PATH += '/ARM/ARMCC/bin/'
if BUILD == 'debug':
CFLAGS += ' -g -O0'
AFLAGS += ' -g'
else:
CFLAGS += ' -O2'
CXXFLAGS = CFLAGS
CFLAGS += ' -std=c99'
POST_ACTION = 'fromelf --bin $TARGET --output rtthread.bin \nfromelf -z $TARGET'
elif PLATFORM == 'iar':
# toolchains
CC = 'iccarm'
CXX = 'iccarm'
AS = 'iasmarm'
AR = 'iarchive'
LINK = 'ilinkarm'
TARGET_EXT = 'out'
DEVICE = '-Dewarm'
CFLAGS = DEVICE
CFLAGS += ' --diag_suppress Pa050'
CFLAGS += ' --no_cse'
CFLAGS += ' --no_unroll'
CFLAGS += ' --no_inline'
CFLAGS += ' --no_code_motion'
CFLAGS += ' --no_tbaa'
CFLAGS += ' --no_clustering'
CFLAGS += ' --no_scheduling'
CFLAGS += ' --endian=little'
CFLAGS += ' --cpu=Cortex-M7'
CFLAGS += ' -e'
CFLAGS += ' --fpu=VFPv5_sp'
CFLAGS += ' --dlib_config "' + EXEC_PATH + '/arm/INC/c/DLib_Config_Normal.h"'
CFLAGS += ' --silent'
AFLAGS = DEVICE
AFLAGS += ' -s+'
AFLAGS += ' -w+'
AFLAGS += ' -r'
AFLAGS += ' --cpu Cortex-M7'
AFLAGS += ' --fpu VFPv5_sp'
AFLAGS += ' -S'
if BUILD == 'debug':
CFLAGS += ' --debug'
CFLAGS += ' -On'
else:
CFLAGS += ' -Oh'
LFLAGS = ' --config "board/linker_scripts/link.icf"'
LFLAGS += ' --entry __iar_program_start'
CXXFLAGS = CFLAGS
EXEC_PATH = EXEC_PATH + '/arm/bin/'
POST_ACTION = 'ielftool --bin $TARGET rtthread.bin'
def dist_handle(BSP_ROOT, dist_dir):
import sys
cwd_path = os.getcwd()
sys.path.append(os.path.join(os.path.dirname(BSP_ROOT), 'tools'))
from sdk_dist import dist_do_building
dist_do_building(BSP_ROOT, dist_dir)
|
Python
| 0
|
@@ -552,17 +552,20 @@
'iar':%0A
-%09
+
PLATFORM
@@ -574,17 +574,20 @@
= 'iar'%0A
-%09
+
EXEC_PAT
@@ -2511,17 +2511,16 @@
= CFLAGS
-
%0A CFL
@@ -3733,20 +3733,16 @@
CFLAGS%0A
-
%0A EXE
|
6092e01013d233d77d9d1350d7e3923a5e68f85c
|
Fix to get shape.
|
chainer/functions/pooling/average_pooling_nd.py
|
chainer/functions/pooling/average_pooling_nd.py
|
import numpy
import functools
import operator
import six
from chainer import cuda
from chainer.functions.pooling import average_pooling_nd_kernel
from chainer.functions.pooling import pooling_nd
from chainer import utils
from chainer.utils import conv_nd
if cuda.cudnn_enabled:
cudnn = cuda.cudnn
libcudnn = cudnn.cudnn
class AveragePoolingND(pooling_nd._PoolingND):
"""Average pooling over a set of N-dimensional planes."""
# TODO(takagi) Support cover_all mode.
def __init__(self, ndim, ksize, stride=None, pad=0, use_cudnn=True):
utils.experimental('chainer.functions.pooling.AveragePoolingND')
super(AveragePoolingND, self).__init__(
ndim, ksize, stride=stride, pad=pad, cover_all=False,
use_cudnn=use_cudnn)
def forward_cpu(self, x):
col = conv_nd.im2col_nd_cpu(
x[0], self.ksize, self.stride, self.pad, cover_all=False)
# mean along (_, _, k_1, k_2, ..., k_N, _, ..., _)
y_axis = tuple(six.moves.range(2, 2 + len(self.ksize)))
y = col.mean(axis=y_axis)
return y,
def forward_gpu(self, x):
if (cuda.cudnn_enabled and self.use_cudnn and self.ndim > 1 and
pooling_nd._check_cudnn_acceptable_type(x[0].dtype)):
return super(AveragePoolingND, self).forward_gpu(x)
n, c = x[0].shape[:2]
dims = x[0].shape[2:]
ys = tuple(conv_nd.get_conv_outsize(d, k, s, p, cover_all=False)
for (d, k, s, p) in six.moves.zip(
dims, self.ksize, self.stride, self.pad))
# (n, c, y_1, y_2, ..., y_N)
y_shape = (n, c) + ys
y = cuda.cupy.empty(y_shape, dtype=x[0].dtype)
coeff = 1. / functools.reduce(operator.mul, self.ksize)
in_params, out_params, operation, name = \
average_pooling_nd_kernel.AveragePoolingNDKernelForward.generate(
self.ndim)
cuda.elementwise(in_params, out_params, operation, name)(
x[0].reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad + (coeff, y)))
return y,
def backward_cpu(self, x, gy):
dims = x[0].shape[2:]
outs = gy[0].shape[2:]
colon = slice(None, None, None)
gy_index = (colon, colon) + (None,) * len(dims)
gcol_reps = (1, 1) + self.ksize + (1,) * len(outs)
gcol = numpy.tile(gy[0][gy_index], gcol_reps)
gx = conv_nd.col2im_nd_cpu(gcol, self.stride, self.pad, dims)
gx /= functools.reduce(operator.mul, self.ksize)
return gx,
def backward_gpu(self, x, gy):
if (cuda.cudnn_enabled and self.use_cudnn and self.ndim > 1 and
pooling_nd._check_cudnn_acceptable_type(x[0].dtype)):
return super(AveragePoolingND, self).backward_gpu(x, gy)
n, c = x[0].shape[:2]
dims = x[0].shape[2:]
ys = gy[0].shape[2:]
gx = cuda.cupy.empty_like(x[0])
coeff = 1. / functools.reduce(operator.mul, self.ksize)
in_params, out_params, operation, name = \
average_pooling_nd_kernel.AveragePoolingNDKernelBackward.generate(
self.ndim)
cuda.elementwise(in_params, out_params, operation, name)(
gy[0].reduced_view(),
*(dims + ys + self.ksize + self.stride + self.pad + (coeff, gx)))
return gx,
def create_pool_desc(self):
return cudnn.create_pooling_descriptor(
self.ksize, self.stride, self.pad,
libcudnn.CUDNN_POOLING_AVERAGE_COUNT_INCLUDE_PADDING)
def average_pooling_nd(x, ksize, stride=None, pad=0, use_cudnn=True):
"""N-dimensionally spatial average pooling function.
This function provides a N-dimensionally generalized version of
:func:`~functions.average_pooling_2d`. This acts similarly to
:class:`~functions.ConvolutionND`, but it computes the average of input
spatial patch for each channel without any parameter instead of computing
the inner products.
Args:
x(~chainer.Variable): Input variable.
ksize (int or tuple of ints): Size of pooling window. ``ksize=k`` and
``ksize=(k, k, ..., k)`` are equivalent.
stride (int or tuple of ints or None): Stride of pooling applications.
``stride=s`` and ``stride=(s,s, ..., s)`` are equivalent. If
``None`` is specified, then it uses same stride as the pooling
window size.
pad (int or tuple of ints): Spatial padding width for the input array.
``pad=p`` and ``pad=(p, p, ..., p)`` are equivalent.
use_cudnn (bool): If ``True`` and cuDNN is enabled, then this function
uses cuDNN as the core implementation. cuDNN supports more than
one-dimensional pooling.
Returns:
~chainer.Variable: Output variable.
.. note::
This function currently does not support ``cover_all`` mode as
:func:`max_pooling_nd`. Average pooling runs in non-cover-all mode.
"""
ndim = len(x.data.shape[2:])
return AveragePoolingND(ndim, ksize, stride, pad, use_cudnn)(x)
|
Python
| 0
|
@@ -5026,13 +5026,8 @@
n(x.
-data.
shap
|
5e7dbbcc47ec12e07e4acd5c41cd31d88dd0ca21
|
Exclude the archs that are not supported in the webapp too
|
autocloud/web/app.py
|
autocloud/web/app.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import flask
import flask.ext.restless
from flask import request, url_for, render_template
from sqlalchemy import desc
from werkzeug.exceptions import abort
import autocloud
from autocloud.models import init_model
from autocloud.models import JobDetails, ComposeJobDetails, ComposeDetails
from autocloud.web.pagination import RangeBasedPagination
from autocloud.web.utils import get_object_or_404
app = flask.Flask(__name__)
session = init_model()
class JobDetailsPagination(RangeBasedPagination):
def get_page_link(self, page_key, limit):
get_params = dict(request.args)
get_params.update({
'from': page_key, 'limit': limit})
return url_for('job_details', **dict(
[(key, value) for key, value in get_params.items()])
)
def order_queryset(self):
if self.direction == 'next':
self.queryset = self.queryset.order_by(desc(
ComposeJobDetails.id))
else:
self.queryset = self.queryset.order_by(ComposeJobDetails.id)
def filter_queryset(self):
if self.page_key is None:
return
from_jobdetails = session.query(ComposeJobDetails).get(self.page_key)
if from_jobdetails:
if self.direction == 'prev':
self.queryset = self.queryset.filter(
ComposeJobDetails.id > from_jobdetails.id)
else:
self.queryset = self.queryset.filter(
ComposeJobDetails.id < from_jobdetails.id)
class ComposeDetailsPagination(RangeBasedPagination):
def get_page_link(self, page_key, limit):
get_params = dict(request.args)
get_params.update({
'from': page_key, 'limit': limit})
return url_for('compose_details', **dict(
[(key, value) for key, value in get_params.items()])
)
def order_queryset(self):
if self.direction == 'next':
self.queryset = self.queryset.order_by(desc(
ComposeDetails.id))
else:
self.queryset = self.queryset.order_by(ComposeDetails.id)
def filter_queryset(self):
if self.page_key is None:
return
from_jobdetails = session.query(ComposeDetails).get(self.page_key)
if from_jobdetails:
if self.direction == 'prev':
self.queryset = self.queryset.filter(
ComposeDetails.id > from_jobdetails.id)
else:
self.queryset = self.queryset.filter(
ComposeDetails.id < from_jobdetails.id)
@app.route('/')
def index():
return flask.render_template('index.html', navbar_fixed=True)
@app.route('/compose/')
@app.route('/compose')
def compose_details():
queryset = session.query(ComposeDetails)
limit = int(request.args.get('limit', 5))
compose_details, prev_link, next_link = ComposeDetailsPagination(
queryset, request.args.get('from'), limit, request.path,
request.referrer, dict(request.args)).paginate()
compose_ids = [item.compose_id for item in compose_details]
compose_locations = dict(session.query(
ComposeDetails.compose_id,
ComposeDetails.location).filter(
ComposeDetails.compose_id.in_(compose_ids)).all())
return flask.render_template(
'compose_details.html', compose_details=compose_details,
prev_link=prev_link, next_link=next_link,
compose_locations=compose_locations,
navbar_fixed=True
)
@app.route('/jobs/')
@app.route('/jobs')
@app.route('/jobs/<compose_pk>/')
@app.route('/jobs/<compose_pk>')
def job_details(compose_pk=None):
queryset = session.query(ComposeJobDetails)
if compose_pk is not None:
compose_obj = session.query(ComposeDetails).get(compose_pk)
if compose_obj is None:
abort(404)
compose_id = compose_obj.compose_id
queryset = queryset.filter_by(compose_id=compose_id)
# Apply filters
filters = ('family', 'arch', 'status', 'image_type')
selected_filters = {}
for filter in filters:
if request.args.get(filter):
queryset = queryset.filter(
getattr(ComposeJobDetails, filter) == request.args[filter])
selected_filters[filter] = request.args[filter]
limit = int(request.args.get('limit', 50))
job_details, prev_link, next_link = JobDetailsPagination(
queryset, request.args.get('from'), limit,
request.path,
request.referrer, dict(request.args)).paginate()
filter_fields = (
{'label': 'Family', 'name': 'family',
'options': ComposeJobDetails.IMAGE_FAMILY_TYPES},
{'label': 'Architecture', 'name': 'arch',
'options': ComposeJobDetails.ARCH_TYPES},
{'label': 'Type', 'name': 'image_type',
'options': [(value[0], value[0])
for value in session.query(
ComposeJobDetails.image_type).distinct()]},
{'label': 'Status', 'name': 'status',
'options': ComposeJobDetails.STATUS_TYPES}
)
compose_ids = [item.compose_id for item in job_details]
compose_locations = dict(session.query(
ComposeDetails.compose_id,
ComposeDetails.location).filter(
ComposeDetails.compose_id.in_(compose_ids)).all())
return flask.render_template(
'job_details.html', job_details=job_details, prev_link=prev_link,
next_link=next_link, filter_fields=filter_fields,
selected_filters=selected_filters, compose_locations=compose_locations,
navbar_fixed=True
)
@app.route('/jobs/<jobid>/output')
def job_output(jobid):
job_detail = get_object_or_404(session,
ComposeJobDetails,
ComposeJobDetails.id == jobid)
_id = session.query(ComposeDetails.id).filter_by(
compose_id=job_detail.compose_id).all()[0][0]
compose_locations = dict(session.query(
ComposeDetails.compose_id,
ComposeDetails.location).filter(
ComposeDetails.compose_id.in_(job_detail.compose_id)).all())
job_output_lines = []
if job_detail.output:
job_output_lines = job_detail.output.split('\n')
return flask.render_template(
'job_output.html', job_detail=job_detail,
compose_locations=compose_locations, _id=_id,
job_output_lines=job_output_lines, navbar_fixed=False)
# Custom Error pages
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
# API stuff
apimanager = flask.ext.restless.APIManager(app, session=session)
apimanager.create_api(JobDetails, methods=['GET'])
if __name__ == '__main__':
app.run(host=autocloud.HOST, port=autocloud.PORT, debug=autocloud.DEBUG)
|
Python
| 0
|
@@ -3757,24 +3757,97 @@
eJobDetails)
+%0A supported_archs = %5Barch for arch, _ in ComposeJobDetails.ARCH_TYPES%5D
%0A%0A if com
@@ -4096,16 +4096,85 @@
pose_id)
+.filter(%0A ComposeJobDetails.arch.in_(supported_archs))
%0A%0A #
|
45e515efbe7242f3f8871242cd1ddb7ceb29ae32
|
fix file perms
|
src/main/python/netkraken/__init__.py
|
src/main/python/netkraken/__init__.py
|
from datetime import datetime, timedelta
import os
settings = {
"stagedir": "/tmp/netconns/__stage__",
"finaldir": "/tmp/netconns/final"}
formats = {
"day": "%Y-%m-%d",
"hour": "%Y-%m-%dT%H",
"minute": "%Y-%m-%dT%H:%M"}
thresholds = {
"day": timedelta(days=14),
"hour": timedelta(hours=4*24),
"minute": timedelta(minutes=60)}
def get_current_datetime():
return datetime.now()
def get_current_timestrings():
now = get_current_datetime()
tmp = {}
for key, format in formats.items():
tmp[key] = now.strftime(format)
return tmp
def get_stage_filename(timestamp):
return os.path.join(settings["stagedir"], timestamp)
def get_final_filename(filename_or_timestamp):
if os.sep in filename_or_timestamp:
return filename_or_timestamp.replace(settings["stagedir"], settings["finaldir"])
return os.path.join(settings["finaldir"], filename_or_timestamp)
def get_current_stage_filename():
return get_stage_filename(get_current_timestrings()["minute"])
def get_timestamp(filename_or_timestamp):
if os.sep in filename_or_timestamp:
timestamp_string = os.path.basename(filename_or_timestamp)
else:
timestamp_string = filename_or_timestamp
for level, format in formats.items():
try:
datetime.strptime(timestamp_string, format)
return (level, timestamp_string)
except:
pass
raise Exception("cannot determine timestamp of %s" % timestamp_string)
def get_higher_timestamp(filename_or_timestamp):
if os.sep in filename_or_timestamp:
timestamp_string = os.path.basename(filename_or_timestamp)
else:
timestamp_string = filename_or_timestamp
last_level = None
for level in sorted(formats): # here: order is day -> hour -> minute, perfect :o)
format = formats[level]
try:
timestamp = datetime.strptime(timestamp_string, format)
return (last_level, timestamp.strftime(formats[last_level]))
except:
last_level = level
return (None, None)
# raise Exception("cannot determine higher timestamp of %s" % timestamp_string)
|
Python
| 0.000001
| |
bc7c3322e027578f79119e6836111244ba1445cc
|
revert out
|
autonetkit/config.py
|
autonetkit/config.py
|
import pkg_resources
import ConfigParser
from configobj import ConfigObj, flatten_errors
import os
import validate
validator = validate.Validator()
import os.path
#TODO: check this works on Windows
ank_user_dir = os.path.join(os.environ['HOME'], ".autonetkit")
def load_config():
settings = ConfigParser.RawConfigParser()
spec_file = pkg_resources.resource_filename(__name__,"/config/configspec.cfg")
settings = ConfigObj(configspec=spec_file, encoding='UTF8')
# User's ANK settings
user_config_file = os.path.join(ank_user_dir, "autonetkit.cfg")
settings.merge(ConfigObj(user_config_file))
# ANK settings in current directory
settings.merge(ConfigObj("autonetkit.cfg"))
# ANK settings specified by environment variable
try:
ankcfg = os.environ['AUTONETKIT_CFG']
settings.merge(ConfigObj(ankcfg))
except KeyError:
pass
results = settings.validate(validator)
if results != True:
for (section_list, key, _) in flatten_errors(settings, results):
if key is not None:
print "Error loading configuration file:"
print 'Invalid key "%s" in section "%s"' % (key, ', '.join(section_list))
raise SystemExit
else:
# ignore missing sections - use defaults
#print 'The following section was missing:%s ' % ', '.join(section_list)
pass
return settings
#NOTE: this only gets loaded once package-wide if imported as import autonetkit.config
settings = load_config()
|
Python
| 0.000001
|
@@ -163,41 +163,56 @@
th%0A#
-TODO: check this works on Windows
+ from http://stackoverflow.com/questions/4028904
%0Aank
@@ -243,23 +243,28 @@
(os.
-environ%5B'HOME'%5D
+path.expanduser(%22~%22)
, %22
|
d6a229deb0db1b8ef050e2271b25da73d1117cc8
|
add element properties
|
library/pyjamas/ui/__init__.py
|
library/pyjamas/ui/__init__.py
|
# Copyright 2006 James Tauber and contributors
# Copyright 2009 Luke Kenneth Casson Leighton
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class HasHorizontalAlignment:
ALIGN_LEFT = "left"
ALIGN_CENTER = "center"
ALIGN_RIGHT = "right"
class HasVerticalAlignment:
ALIGN_TOP = "top"
ALIGN_MIDDLE = "middle"
ALIGN_BOTTOM = "bottom"
class HasAlignment:
ALIGN_BOTTOM = "bottom"
ALIGN_MIDDLE = "middle"
ALIGN_TOP = "top"
ALIGN_CENTER = "center"
ALIGN_LEFT = "left"
ALIGN_RIGHT = "right"
PROP_NAME = 0
PROP_DESC = 1
PROP_FNAM = 2
PROP_TYPE = 3
def get_list_columns(props, cols):
res = []
for p in props:
r = ()
for idx in cols:
r.append(p[idx])
res.append(r)
return res
def get_prop_widget_function_names(props):
return get_list_columns(props, (PROP_FNAM,))
class Applier(object):
_props = []
def __init__(self, **kwargs):
""" use this to apply properties as a dictionary, e.g.
x = klass(..., StyleName='class-name')
will do:
x = klass(...)
x.setStyleName('class-name')
and:
x = klass(..., Size=("100%", "20px"), Visible=False)
will do:
x = klass(...)
x.setSize("100%", "20px")
x.setVisible(False)
"""
self.applyValues(**kwargs)
def applyValues(self, **kwargs):
if not kwargs:
return
k = kwargs.keys()
l = len(k)
i = -1
while i < l-1:
i += 1
prop = k[i]
fn = getattr(self, "set%s" % prop, None)
if not fn:
return
args = kwargs[prop]
if isinstance(args, tuple):
fn(*args)
else:
fn(args)
def retrieveValues(self, *args):
""" use this function to obtain a dictionary of properties, as
stored in getXXX functions.
"""
res = {}
for prop in args:
fn = getattr(self, "get%s" % prop, None)
if not fn:
continue
res[prop] = fn()
return res
def _getProps(self):
return self.props
def setDefaults(self, defaults):
divs = self.retrieveValues(wnames)
for p in get_prop_widget_function_names(self._getProps()):
defaults[p[PROP_NAME]] = divs[p[PROP_FNAM]]
def updateInstance(self, app_context):
args = {}
for p in self._getProps():
val = app_context.getAppProperty(p[0])
convert_to_type = p[PROP_TYPE]
if convert_to_type:
val = convert_to_type(val) if val else None
args[p[PROP_FNAM]] = val
self.applyValues(args)
|
Python
| 0.000001
|
@@ -1083,16 +1083,97 @@
PE = 3%0A%0A
+ELPROP_NAME = 0%0AELPROP_DESC = 1%0AELPROP_FNAM = 2%0AELPROP_TYPE = 3%0AELPROP_DFLT = 4%0A%0A
def get_
@@ -1484,16 +1484,37 @@
ops = %5B%5D
+%0A _elem_props = %5B%5D
%0A%0A de
@@ -2869,16 +2869,17 @@
rn self.
+_
props%0A%0A
@@ -3436,9 +3436,413 @@
args) %0A%0A
+ def setElementProperties(self, context, elemProps):%0A args = %7B%7D%0A for p in self._getElementProps():%0A if not elemProps.has_key(p):%0A continue%0A convert_to_type = p%5BELPROP_TYPE%5D%0A if convert_to_type:%0A val = convert_to_type(val) if val else None%0A args%5Bp%5BELPROP_FNAM%5D%5D = (context, val,)%0A%0A self.applyValues(args) %0A
%0A
|
cba6caf0eed1efce421926abaa14742893dd1bbd
|
remove trailing empty line (PEP8 conformance)
|
debsources/tests/test_filetype.py
|
debsources/tests/test_filetype.py
|
# Copyright (C) 2013-2015 The Debsources developers <info@sources.debian.net>.
# See the AUTHORS file at the top-level directory of this distribution and at
# https://anonscm.debian.org/gitweb/?p=qa/debsources.git;a=blob;f=AUTHORS;hb=HEAD
#
# This file is part of Debsources. Debsources is free software: you can
# redistribute it and/or modify it under the terms of the GNU Affero General
# Public License as published by the Free Software Foundation, either version 3
# of the License, or (at your option) any later version. For more information
# see the COPYING file at the top-level directory of this distribution and at
# https://anonscm.debian.org/gitweb/?p=qa/debsources.git;a=blob;f=COPYING;hb=HEAD
from __future__ import absolute_import
import unittest
from nose.tools import istest
from nose.plugins.attrib import attr
from debsources.filetype import get_filetype, get_highlightjs_language
from debsources.filetype import HTML, PHP, PYTHON, RUBY, XML, MAKEFILE
@attr('filetype')
class FiletypeTests(unittest.TestCase):
""" Unit tests for debsources.filetype """
@istest
def pythonShebang(self):
self.assertEqual(get_filetype('foo', '#!/usr/bin/python'), PYTHON)
@istest
def envPythonShebang(self):
self.assertEqual(get_filetype('foo', '#!/usr/bin/env python'), PYTHON)
@istest
def envRubyShebang(self):
self.assertEqual(get_filetype('foo', '#!/usr/bin/env ruby'), RUBY)
@istest
def testUnknownShebang(self):
self.assertIsNone(get_filetype('foo', '#!/usr/bin/foobar'))
@istest
def pythonExtension(self):
self.assertEqual(get_filetype('foo.py', 'foobar'), PYTHON)
@istest
def rubyExtension(self):
self.assertEqual(get_filetype('foo.rb', 'foobar'), RUBY)
@istest
def unknownExtension(self):
self.assertIsNone(get_filetype('foo.bar', 'foobar'))
@istest
def htmlTag(self):
self.assertEqual(get_filetype('foo', '<html><head>'), HTML)
@istest
def xmlTag(self):
self.assertEqual(get_filetype('foo', '<?xml>'), XML)
@istest
def phpTag(self):
self.assertEqual(get_filetype('foo', "<?php echo('hello') ?>"), PHP)
@istest
def hilightjsLanguageDjango(self):
self.assertEqual(get_highlightjs_language("foo.html", "foobar", None),
"django")
@istest
def hilightjsLanguagePerl(self):
self.assertEqual(get_highlightjs_language("foo",
"#!/bin/perl\n",
None),
"perl")
@istest
def makefileFilename(self):
self.assertEqual(get_filetype('Makefile', 'foobar'), MAKEFILE)
@istest
def makefileFilenameLowerCase(self):
self.assertEqual(get_filetype('makefile', 'foobar'), MAKEFILE)
@istest
def assertAutomakeNotMakefile(self):
self.assertNotEqual(get_filetype('Makefile.am', 'foobar'), MAKEFILE)
@istest
def makefileShebang(self):
self.assertEqual(get_filetype('foo', '#!/usr/bin/make -f'), MAKEFILE)
@istest
def hilightjsLanguageMakefile(self):
self.assertEqual(get_highlightjs_language("Makefile", "foobar", None),
"makefile")
@istest
def hilightjsLanguageMakeShebang(self):
self.assertEqual(get_highlightjs_language("foo",
"#!/usr/bin/make -f",
None),
"makefile")
|
Python
| 0
|
@@ -3542,29 +3542,28 @@
%22makefile%22)%0A
-%0A
|
080b967c0854d416532449dba96bbbd8f0318d8a
|
remove time_per_record since it does not make real sense
|
pikos/benchmark/monitors.py
|
pikos/benchmark/monitors.py
|
# -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# Package: Pikos toolkit
# File: benchmark/monitors.py
# License: LICENSE.TXT
#
# Copyright (c) 2012, Enthought, Inc.
# All rights reserved.
#------------------------------------------------------------------------------
""" Estimate the overhead cost of using a monitor.
The benchmark runs the pystones benchmark under each monitor and calculates
the overhead.
"""
from test import pystone
from pikos.benchmark.record_counter import RecordCounter
def pymonitors():
""" Pure python monitors """
from pikos.monitors.api import (
FunctionMonitor, LineMonitor,
FunctionMemoryMonitor, LineMemoryMonitor)
return {
'FunctionMonitor': lambda recorder, record_type: FunctionMonitor(
recorder, None if record_type is None else tuple),
'LineMonitor': LineMonitor,
'FunctionMemoryMonitor': FunctionMemoryMonitor,
'LineMemoryMonitor': LineMemoryMonitor}
def cymonitors():
""" Cython monitors """
from pikos.cymonitors.api import FunctionMonitor
from pikos.cymonitors.api import FunctionMemoryMonitor
return {
'CFunctionMonitor': FunctionMonitor,
'CFunctionMemoryMonitor': FunctionMemoryMonitor}
def run(monitors, loops, record_type=None):
""" Time the monitors overhead using pystones.
Parameter
---------
monitors : list
The list of monitors to time.
loops : int
The number of loops to run pystones.
record_type : object
The type of record to use.
"""
header = (
"Overhead time | Relative overhead | "
"{:^10} | Per record | {:^{length}}".format(
'Records', 'Name',
length=max(len(key) for key in monitors) - 4))
line = ('{time:>13} | {relative:>17} | {records:>10} '
'| {time_per_record:.6e} | {name}')
print header
print len(header) * '-'
expected_time, _ = pystone.pystones(loops)
for name, monitor in monitors.iteritems():
recorder = RecordCounter()
with monitor(recorder=recorder, record_type=record_type):
time, _ = pystone.pystones(loops)
time_per_record = (time - expected_time) / recorder.records
print line.format(
name=name,
time='{:2.2f}'.format(time - expected_time),
relative='{:.2%}'.format((time - expected_time) / expected_time),
time_per_record=time_per_record,
records='{:10d}'.format(recorder.records))
def main(monitors, loops=1000):
print 'With default record types'
run(monitors, loops)
print
print 'Using tuples as records'
run(monitors, loops, record_type=tuple)
if __name__ == '__main__':
monitors = pymonitors()
monitors.update(cymonitors())
main(monitors)
|
Python
| 0.000181
|
@@ -776,116 +776,23 @@
r':
-lambda recorder, record_type: FunctionMonitor(%0A recorder, None if record_type is None else tuple)
+FunctionMonitor
,%0A
@@ -1603,23 +1603,8 @@
0%7D %7C
- Per record %7C
%7B:%5E
@@ -1772,47 +1772,8 @@
%3E10%7D
- '%0A '%7C %7Btime_per_record:.6e%7D
%7C %7B
@@ -2070,76 +2070,8 @@
ps)%0A
- time_per_record = (time - expected_time) / recorder.records%0A
@@ -2165,32 +2165,32 @@
expected_time),%0A
+
rela
@@ -2255,53 +2255,8 @@
e),%0A
- time_per_record=time_per_record,%0A
|
50f06cbc588cfc099105b6a0b62abff027f3ad21
|
Comment out potentially conflicting packages in results
|
piptools/scripts/compile.py
|
piptools/scripts/compile.py
|
# coding: utf-8
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import pip
# Make sure we're using a reasonably modern version of pip # isort:skip
if not tuple(int(digit) for digit in pip.__version__.split('.')[:2]) >= (6, 1):
print('pip-compile requires at least version 6.1 of pip ({} found), '
'perhaps run `pip install --upgrade pip`?'.format(pip.__version__))
sys.exit(4)
from itertools import chain, cycle
from os.path import basename
import click
from pip.req import parse_requirements
from ..exceptions import PipToolsError
from ..logging import log
from ..repositories import PyPIRepository
from ..resolver import Resolver
from ..utils import format_requirement
DEFAULT_REQUIREMENTS_FILE = 'requirements.in'
@click.command()
@click.option('-v', '--verbose', is_flag=True, help="Show more output")
@click.option('--dry-run', is_flag=True, help="Only show what would happen, don't change anything")
@click.option('-p', '--pre', is_flag=True, help="Allow resolving to prereleases (default is not)")
@click.option('-r', '--rebuild', is_flag=True, help="Clear any caches upfront, rebuild from scratch")
@click.option('-f', '--find-links', multiple=True, help="Look for archives in this directory or on this HTML page")
@click.option('-i', '--index-url', help="Change index URL (defaults to PyPI)")
@click.option('--extra-index-url', multiple=True, help="Add additional index URL to search")
@click.option('--no-header', is_flag=True, help="Disable the header comment in autogenerated file")
@click.option('-a', '--annotate', is_flag=True,
help="Annotate results, indicating where dependencies come from")
@click.argument('src_file', required=False, type=click.Path(exists=True), default=DEFAULT_REQUIREMENTS_FILE)
def cli(verbose, dry_run, pre, rebuild, find_links, index_url,
extra_index_url, no_header, annotate, src_file):
"""Compiles requirements.txt from requirements.in specs."""
log.verbose = verbose
if not src_file:
log.warning('No input files to process.')
sys.exit(2)
###
# Setup
###
repository = PyPIRepository()
# Configure the finder
if index_url:
repository.finder.index_urls = [index_url]
repository.finder.index_urls.extend(extra_index_url)
repository.finder.find_links.extend(find_links)
log.debug('Using indexes:')
for index_url in repository.finder.index_urls:
log.debug(' {}'.format(index_url))
if repository.finder.find_links:
log.debug('')
log.debug('Configuration:')
for find_link in repository.finder.find_links:
log.debug(' -f {}'.format(find_link))
###
# Parsing/collecting initial requirements
###
constraints = []
for line in parse_requirements(src_file, finder=repository.finder, session=repository.session):
constraints.append(line)
try:
resolver = Resolver(constraints, repository, prereleases=pre, clear_caches=rebuild)
results = resolver.resolve()
except PipToolsError as e:
log.error(str(e))
sys.exit(2)
# Format the results for outputting
log.debug('')
formatted_results = sorted(results, key=lambda r: (not r.editable, str(r.req).lower()))
# In verbose mode, we format the results in blue
blue = 'blue' if verbose else None
if not no_header:
log.info('#', fg=blue)
log.info('# This file is autogenerated by pip-compile', fg=blue)
log.info('# Make changes in {}, then run this to update:'.format(basename(src_file)), fg=blue)
log.info('#', fg=blue)
log.info('# pip-compile {}'.format(basename(src_file)), fg=blue)
log.info('#', fg=blue)
for directive, index_url in zip(chain(['--index-url'], cycle(['--extra-index-url'])),
repository.finder.index_urls):
if index_url == repository.DEFAULT_INDEX_URL:
continue
log.info('{} {}'.format(directive, repository.finder.index_urls[0]), fg=blue)
if annotate:
# Compute reverse dependency annotations statically, from the
# dependency cache that the resolver has populated by now.
#
# TODO (1a): reverse deps for any editable package are lost
# what SHOULD happen is that they are cached in memory, just
# not persisted to disk!
#
# TODO (1b): perhaps it's easiest if the dependency cache has an API
# that could take InstallRequirements directly, like:
#
# cache.set(ireq, ...)
#
# then, when ireq is editable, it would store in
#
# editables[egg_name][link_without_fragment] = deps
# editables['pip-tools']['git+...ols.git@future'] = {'click>=3.0', 'six'}
#
# otherwise:
#
# self[as_name_version_tuple(ireq)] = {'click>=3.0', 'six'}
#
# TODO (2): consider dropping annotations for top-level packages, e.g.
# when both django and django-debug-toolbar are top-level
# requirements, django gets this stupid annotation:
#
# django-debug-toolbar==1.3.0
# django==1.8 # via django-debug-toolbar
#
rev_deps = resolver.reverse_dependencies(results)
for result in formatted_results:
annotation = None
if annotate:
annotation = ', '.join(sorted(rev_deps.get(result.name, [])))
if annotation:
annotation = 'via ' + annotation
log.info(format_requirement(result, annotation=annotation), fg=blue)
if dry_run:
log.warning('Dry-run, so nothing updated.')
|
Python
| 0
|
@@ -5476,16 +5476,76 @@
results)
+%0A%0A EXCLUDE_PACKAGES = %7B'setuptools', 'distribute', 'pip'%7D
%0A for
@@ -5780,24 +5780,22 @@
l
-og.info(
+ine =
format_r
@@ -5835,16 +5835,123 @@
otation)
+%0A%0A if result.name in EXCLUDE_PACKAGES:%0A line = '# %7B%7D'.format(line)%0A%0A log.info(line
, fg=blu
|
9b52967bd0b4fcf411ce4303170fa77d1d417669
|
fix repr exception when using raven
|
dpark/task.py
|
dpark/task.py
|
import os,os.path
import socket
import marshal
import cPickle
import logging
import struct
from dpark.util import compress, decompress, atomic_file
from dpark.serialize import marshalable, load_func, dump_func, dumps, loads
from dpark.shuffle import LocalFileShuffle
logger = logging.getLogger(__name__)
class Task:
def __init__(self):
self.id = Task.newId()
nextId = 0
@classmethod
def newId(cls):
cls.nextId += 1
return cls.nextId
def run(self, id):
raise NotImplementedError
def preferredLocations(self):
raise NotImplementedError
class DAGTask(Task):
def __init__(self, stageId):
Task.__init__(self)
self.stageId = stageId
def __repr__(self):
return '<task %d:%d>'%(self.stageId, self.id)
class ResultTask(DAGTask):
def __init__(self, stageId, rdd, func, partition, locs, outputId):
DAGTask.__init__(self, stageId)
self.rdd = rdd
self.func = func
self.partition = partition
self.split = rdd.splits[partition]
self.locs = locs
self.outputId = outputId
def run(self, attemptId):
logger.debug("run task %s with %d", self, attemptId)
return self.func(self.rdd.iterator(self.split))
def preferredLocations(self):
return self.locs
def __repr__(self):
return "<ResultTask(%d) of %s" % (self.partition, self.rdd)
def __getstate__(self):
d = dict(self.__dict__)
del d['func']
del d['rdd']
return d, dumps(self.rdd), dump_func(self.func)
def __setstate__(self, state):
d, rdd, func = state
self.__dict__.update(d)
self.rdd = loads(rdd)
self.func = load_func(func)
class ShuffleMapTask(DAGTask):
def __init__(self, stageId, rdd, dep, partition, locs):
DAGTask.__init__(self, stageId)
self.rdd = rdd
self.shuffleId = dep.shuffleId
self.aggregator = dep.aggregator
self.partitioner = dep.partitioner
self.partition = partition
self.split = rdd.splits[partition]
self.locs = locs
def __repr__(self):
return '<ShuffleTask(%d, %d) of %s>' % (self.shuffleId, self.partition, self.rdd)
def __getstate__(self):
d = dict(self.__dict__)
del d['rdd']
return d, dumps(self.rdd)
def __setstate__(self, state):
d, rdd = state
self.__dict__.update(d)
self.rdd = loads(rdd)
def preferredLocations(self):
return self.locs
def run(self, attempId):
logger.debug("shuffling %d of %s", self.partition, self.rdd)
for i, bucket in self.rdd._prepare_shuffle(self.split, self.partitioner, self.aggregator):
try:
if marshalable(bucket):
flag, d = 'm', marshal.dumps(bucket)
else:
flag, d = 'p', cPickle.dumps(bucket, -1)
except ValueError:
flag, d = 'p', cPickle.dumps(bucket, -1)
cd = compress(d)
for tried in range(1, 4):
try:
path = LocalFileShuffle.getOutputFile(self.shuffleId, self.partition, i, len(cd) * tried)
with atomic_file(path, bufsize=1024*4096) as f:
f.write(flag + struct.pack("I", 5 + len(cd)))
f.write(cd)
break
except IOError, e:
logger.warning("write %s failed: %s, try again (%d)", path, e, tried)
else:
raise
return LocalFileShuffle.getServerUri()
|
Python
| 0.000001
|
@@ -2138,32 +2138,179 @@
__repr__(self):%0A
+ shuffleId = getattr(self, 'shuffleId', None)%0A partition = getattr(self, 'partition', None)%0A rdd = getattr(self, 'rdd', None)%0A
return '
@@ -2327,13 +2327,13 @@
sk(%25
-d, %25d
+s, %25s
) of
@@ -2341,21 +2341,16 @@
%25s%3E' %25 (
-self.
shuffleI
@@ -2344,37 +2344,32 @@
' %25 (shuffleId,
-self.
partition, self.
@@ -2355,37 +2355,32 @@
eId, partition,
-self.
rdd)%0A%0A def __
|
cc17153f777a275065be9a08d7ecbcb480cd8759
|
add within_frame function to __init__.py
|
plantcv/plantcv/__init__.py
|
plantcv/plantcv/__init__.py
|
import os
import matplotlib
# If there is no display or a matplotlib backend already defined, use the non-GUI backend
if "DISPLAY" not in os.environ and "MPLBACKEND" not in os.environ:
matplotlib.use("Agg")
class Params:
"""PlantCV parameters class
Keyword arguments/parameters:
device = device number. Used to count steps in the pipeline. (default: 0)
debug = None, print, or plot. Print = save to file, Plot = print to screen. (default: None)
debug_outdir = Debug images output directory. (default: .)
:param device: int
:param debug: str
:param debug_outdir: str
:param line_thickness: numeric
:param dpi: int
"""
def __init__(self, device=0, debug=None, debug_outdir=".", line_thickness=5, dpi=100):
self.device = device
self.debug = debug
self.debug_outdir = debug_outdir
self.line_thickness = line_thickness
self.dpi = dpi
class Outputs:
"""PlantCV outputs class
"""
def __init__(self):
self.measurements = {}
self.images = []
# Add a method to clear out the
def clear(self):
self.measurements = {}
self.images = []
# Initialize an instance of the Params and Outputs class with default values
# params and outputs are available when plantcv is imported
params = Params()
outputs = Outputs()
from plantcv.plantcv.fatal_error import fatal_error
from plantcv.plantcv.print_image import print_image
from plantcv.plantcv.plot_image import plot_image
from plantcv.plantcv.color_palette import color_palette
from plantcv.plantcv.apply_mask import apply_mask
from plantcv.plantcv.readimage import readimage
from plantcv.plantcv.readbayer import readbayer
from plantcv.plantcv.laplace_filter import laplace_filter
from plantcv.plantcv.sobel_filter import sobel_filter
from plantcv.plantcv.scharr_filter import scharr_filter
from plantcv.plantcv.hist_equalization import hist_equalization
from plantcv.plantcv.image_add import image_add
from plantcv.plantcv.image_subtract import image_subtract
from plantcv.plantcv.erode import erode
from plantcv.plantcv.dilate import dilate
from plantcv.plantcv.watershed import watershed_segmentation
from plantcv.plantcv.rectangle_mask import rectangle_mask
from plantcv.plantcv.rgb2gray_hsv import rgb2gray_hsv
from plantcv.plantcv.rgb2gray_lab import rgb2gray_lab
from plantcv.plantcv.rgb2gray import rgb2gray
from plantcv.plantcv.median_blur import median_blur
from plantcv.plantcv.fill import fill
from plantcv.plantcv.invert import invert
from plantcv.plantcv.logical_and import logical_and
from plantcv.plantcv.logical_or import logical_or
from plantcv.plantcv.logical_xor import logical_xor
from plantcv.plantcv.find_objects import find_objects
from plantcv.plantcv.roi_objects import roi_objects
from plantcv.plantcv.object_composition import object_composition
from plantcv.plantcv.analyze_object import analyze_object
from plantcv.plantcv.analyze_bound_horizontal import analyze_bound_horizontal
from plantcv.plantcv.analyze_bound_vertical import analyze_bound_vertical
from plantcv.plantcv.analyze_color import analyze_color
from plantcv.plantcv.analyze_nir_intensity import analyze_nir_intensity
from plantcv.plantcv.fluor_fvfm import fluor_fvfm
from plantcv.plantcv.print_results import print_results
from plantcv.plantcv.resize import resize
from plantcv.plantcv.flip import flip
from plantcv.plantcv.crop_position_mask import crop_position_mask
from plantcv.plantcv.get_nir import get_nir
from plantcv.plantcv.report_size_marker_area import report_size_marker_area
from plantcv.plantcv.white_balance import white_balance
from plantcv.plantcv.acute_vertex import acute_vertex
from plantcv.plantcv.scale_features import scale_features
from plantcv.plantcv.landmark_reference_pt_dist import landmark_reference_pt_dist
from plantcv.plantcv.x_axis_pseudolandmarks import x_axis_pseudolandmarks
from plantcv.plantcv.y_axis_pseudolandmarks import y_axis_pseudolandmarks
from plantcv.plantcv.gaussian_blur import gaussian_blur
from plantcv.plantcv.cluster_contours import cluster_contours
from plantcv.plantcv.cluster_contour_splitimg import cluster_contour_splitimg
from plantcv.plantcv.rotate import rotate
from plantcv.plantcv.shift_img import shift_img
from plantcv.plantcv.output_mask_ori_img import output_mask
from plantcv.plantcv.auto_crop import auto_crop
from plantcv.plantcv.background_subtraction import background_subtraction
from plantcv.plantcv.naive_bayes_classifier import naive_bayes_classifier
from plantcv.plantcv.acute import acute
from plantcv.plantcv.distance_transform import distance_transform
from plantcv.plantcv.canny_edge_detect import canny_edge_detect
from plantcv.plantcv.opening import opening
from plantcv.plantcv.closing import closing
from plantcv.plantcv import roi
from plantcv.plantcv import threshold
from plantcv.plantcv import transform
from plantcv.plantcv import visualize
from plantcv.plantcv import morphology
# add new functions to end of lists
__all__ = ['fatal_error', 'print_image', 'plot_image', 'color_palette', 'apply_mask', 'readimage',
'readbayer', 'laplace_filter', 'sobel_filter', 'scharr_filter', 'hist_equalization', 'erode',
'image_add', 'image_subtract', 'dilate', 'watershed', 'rectangle_mask', 'rgb2gray_hsv', 'rgb2gray_lab',
'rgb2gray', 'median_blur', 'fill', 'invert', 'logical_and', 'logical_or', 'logical_xor',
'find_objects', 'roi_objects', 'transform', 'object_composition', 'analyze_object', 'morphology',
'analyze_bound_horizontal', 'analyze_bound_vertical', 'analyze_color', 'analyze_nir_intensity',
'fluor_fvfm', 'print_results', 'resize', 'flip', 'crop_position_mask', 'get_nir', 'report_size_marker_area',
'white_balance', 'acute_vertex', 'scale_features', 'landmark_reference_pt_dist', 'outputs',
'x_axis_pseudolandmarks', 'y_axis_pseudolandmarks', 'gaussian_blur', 'cluster_contours', 'visualize',
'cluster_contour_splitimg', 'rotate', 'shift_img', 'output_mask', 'auto_crop', 'canny_edge_detect',
'background_subtraction', 'naive_bayes_classifier', 'acute', 'distance_transform', 'params', 'opening',
'closing']
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
Python
| 0.000088
|
@@ -4772,24 +4772,78 @@
ort closing%0A
+from plantcv.plantcv.within_frame import within_frame%0A
from plantcv
@@ -6271,16 +6271,31 @@
closing'
+,'within_frame'
%5D%0A%0A%0Afrom
|
a477de34625f9fc4076eaa093b606463063e33b3
|
add check if TwitterAPI was installed and installed it if not
|
gimp_be/network/twitter.py
|
gimp_be/network/twitter.py
|
from gimp_be.settings.settings import *
from gimp_be.utils.string_tools import *
def tweetImage(message,image_file):
"""
Tweet image with message
:param message:
:param image_file:
:return:
"""
from TwitterAPI import TwitterAPI
global settings_data
CONSUMER_KEY = settings_data['twitter']['CONSUMER_KEY']
CONSUMER_SECRET = settings_data['twitter']['CONSUMER_SECRET']
ACCESS_TOKEN_KEY = settings_data['twitter']['ACCESS_TOKEN_KEY']
ACCESS_TOKEN_SECRET = settings_data['twitter']['ACCESS_TOKEN_SECRET']
api = TwitterAPI(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN_KEY, ACCESS_TOKEN_SECRET)
file = open(image_file, 'rb')
data = file.read()
r = api.request('statuses/update_with_media', {'status':message}, {'media[]':data})
return str(str(r.status_code))
def tweetText(opt=0):
"""
return string of twitter message
:param opt:
:return:
"""
global settings_data
import datetime
now = datetime.datetime.now()
updateLocationData()
title = imageTitle(2)
city = settings_data["location"]["city"]
state = settings_data["location"]["state"]
host_name = settings_data["network"]["host_name"]
tempf = settings_data["location"]["tempf"]
weather = settings_data["location"]["weather"]
hashtags = settings_data["twitter"]["hashtags"]
time_stamp = str(datetime.datetime.now())
tweet_text = ''
if opt == 0:
tweet_text = title + '\nby ' + settings_data['user']['author'] + '\n' + city + ' ' + state + ' | ' + host_name + '\n' + tempf + 'F ' + weather + '\n' + now.strftime("%A %B %d - %I:%M%p")
elif opt == 1:
tweet_text = title + '\nby ' + settings_data['user']['author'] + ' ' + time_stamp[:4] + '\n' + hashtags
else:
tweet_text = title + '\nby ' + settings_data['user']['author'] + ' ' + time_stamp[:4]
return tweet_text
def tweetHashtags(hashtags=('0', '1', '2')):
"""
hashtag string
:param hashtags:
:return:
"""
tag_string = ''
for tag in hashtags:
tag_string = tag_string + '#' + tag + ' '
return tag_string.strip()
|
Python
| 0
|
@@ -74,16 +74,111 @@
mport *%0A
+from gimp_be.utils.pip import *%0Atry:%0A import TwitterAPI%0Aexcept:%0A pipInstall(%22TwitterAPI%22)
%0A%0Adef tw
|
6138f02896bc865a98480be36300bf670a6defa8
|
Replace re by os.path utils
|
plugin/complete_database.py
|
plugin/complete_database.py
|
import vim
import re
import json
from os import path
current = vim.eval("expand('%:p')")
ccd = vim.eval("l:ccd")
opts = []
with open(ccd) as database:
data = json.load(database)
for d in data:
# hax for headers
fmatch = re.search(r'(.*)\.(\w+)$', current)
dmatch = re.search(r'(.*)\.(\w+)$', d['file'])
if fmatch.group(1) == dmatch.group(1):
for result in re.finditer(r'-D\s*[^\s]+', d['command']):
opts.append(result.group(0))
for result in re.finditer(r'-isystem\s*[^\s]+', d['command']):
opts.append(result.group(0))
for result in re.finditer(r'-I\s*([^\s]+)', d['command']):
opts.append('-I' + path.join(d['directory'], result.group(1)))
break
vim.command("let l:clang_options = '" + ' '.join(opts) + "'")
|
Python
| 0
|
@@ -51,19 +51,21 @@
th%0A%0Acurr
-ent
+_file
= vim.e
@@ -85,16 +85,63 @@
%25:p')%22)%0A
+curr_file_noext = path.splitext(curr_file)%5B0%5D%0A%0A
ccd = vi
@@ -203,178 +203,422 @@
-data = json.load(database)%0A%0A for d in data:%0A # hax for headers%0A fmatch = re.search(r'(.*)%5C.(%5Cw+)$', current)%0A dmatch = re.search(r'(.*)%5C.(%5Cw+)$',
+# Search for the right entry in the database matching file names%0A for d in json.load(database):%0A # This is an entry without a file attribute%0A if 'file' not in d:%0A continue%0A%0A # This entry is about a different file. We consider file names%0A # without extension to handle header files which do not have%0A # an entry in the database.%0A d_file_noext = path.splitext(
d%5B'f
@@ -623,17 +623,19 @@
'file'%5D)
-%0A
+%5B0%5D
%0A
@@ -642,48 +642,63 @@
if
-fmatch.group(1) == dmatch.group(1):%0A
+d_file_noext != curr_file_noext:%0A continue%0A%0A
@@ -762,36 +762,32 @@
%5D):%0A
-
opts.append(resu
@@ -791,36 +791,32 @@
esult.group(0))%0A
-
for resu
@@ -874,36 +874,32 @@
%5D):%0A
-
opts.append(resu
@@ -907,28 +907,24 @@
t.group(0))%0A
-
for
@@ -982,36 +982,32 @@
%5D):%0A
-
opts.append('-I'
@@ -1053,20 +1053,16 @@
up(1)))%0A
-
|
cf056c8840e224a1f15478832434c7df33fa97f8
|
Limit Urban Dictionary definition to 300 characters
|
plugins/urbandict/plugin.py
|
plugins/urbandict/plugin.py
|
import logging
from cardinal.decorators import command, help
import requests
from twisted.internet import defer
from twisted.internet.threads import deferToThread
URBANDICT_API_PREFIX = 'http://api.urbandictionary.com/v0/define'
class UrbanDictPlugin:
def __init__(self):
self.logger = logging.getLogger(__name__)
@defer.inlineCallbacks
@command(['ud', 'urbandict'])
@help('Returns the top Urban Dictionary definition for a given word.')
@help('Syntax: .ud <word>')
def get_ud(self, cardinal, user, channel, msg):
try:
word = msg.split(' ', 1)[1]
except IndexError:
cardinal.sendMsg(channel, 'Syntax: .ud <word>')
return
try:
url = URBANDICT_API_PREFIX
r = yield deferToThread(requests.get, url, params={'term': word})
data = r.json()
entry = data['list'].pop(0)
definition = entry['definition']
thumbs_up = entry['thumbs_up']
thumbs_down = entry['thumbs_down']
link = entry['permalink']
response = 'UD [%s]: %s [\u25b2%d|\u25bc%d] - %s' % (
word, definition, thumbs_up, thumbs_down, link
)
cardinal.sendMsg(channel, response)
except Exception:
self.logger.exception("Error with definition: %s", word)
cardinal.sendMsg(channel,
"Could not retrieve definition for %s" % word)
entrypoint = UrbanDictPlugin
|
Python
| 0.998394
|
@@ -949,16 +949,119 @@
nition'%5D
+%5B0:300%5D%0A if definition != entry%5B'definition'%5D:%0A definition = definition + %22%E2%80%A6%22
%0A
|
7903c7604a54a8786a5d4b658c224b6d28ed43af
|
Add iterator for lists
|
popeui/widgets/structure.py
|
popeui/widgets/structure.py
|
from .base import BaseContainer
from .abstract import HeadLink
class Document(BaseContainer):
"""
A document. Analogous to the HTML ``<html>`` element.
"""
html_tag = "html"
def __init__(self, id, view, classname=None, parent=None, **kwargs):
"""
:param view: :class:`~.application.View` in which the document is declared.
"""
super(Document, self).__init__(id, classname, parent, **kwargs)
self.view = view
class Head(BaseContainer):
html_tag = "head"
def load_script(self, id, path):
"""
Proper way to dynamically inject a script in a page.
:param path: Path of the script to inject.
"""
self.view.dispatch({'name': 'script', 'path': path})
def load_stylesheet(self, id, path):
"""
Proper way to dynamically inject a stylesheet in a page.
:param path: Path of the stylesheet to inject.
"""
self.add_child(HeadLink(id=id, link_type="stylesheet", path=path))
class Body(BaseContainer):
"""
Simple container analogous to the html ``<body>`` element.
"""
html_tag = "body"
class Panel(BaseContainer):
"""
Simple container analogous to the html ``<div>`` element.
"""
html_tag = "div"
class List(BaseContainer):
"""
Bridges python and HTML lists. :class:`List` exposes an interface similar to
python lists and takes care of updating the corresponding HTML ``<ul>`` when the python object is updated.
"""
html_tag = "ul"
def __init__(self, id, classname=None, parent=None, **kwargs):
super(List, self).__init__(id, classname, parent, **kwargs)
self._count = 0
self._items = []
def append(self, widget):
"""
Append a widget to the list.
:param widget: Object inheriting :class:`~.widgets.base.BaseElement`
"""
li_itm = _li(id=self.id + str(self._count))
li_itm.add_child(widget)
self.add_child(li_itm)
self._items.append((widget, li_itm))
self._count += 1
def remove(self, widget):
"""
Remove a widget from the list.
:param widget: Object inheriting :class:`~.widgets.base.BaseElement`
"""
raw = list(filter(lambda x: x[0] == widget, self._items))
if raw:
itm, wrapped = raw[0]
self._items.remove(raw[0])
self.remove_child(wrapped)
else:
raise ValueError("Child not in list.")
def __len__(self):
return len(self._items)
def __getitem__(self, index):
return self._items[index][0]
def __setitem__(self, index, widget):
old_li = self._items[index]
li_itm = _li(id=old_li[1].id)
li_itm.add_child(widget)
old_wid = self.children[index]
self.replace_child(old_wid, li_itm)
self._items[index] = (widget, li_itm)
class _li(BaseContainer):
html_tag = "li"
|
Python
| 0.000002
|
@@ -2300,16 +2300,90 @@
ist.%22)%0A%0A
+ def __iter__(self):%0A return iter(list(%5Bx%5B0%5D for x in self._items%5D))%0A%0A
def __
|
19b6aecd0cc2a1447c0f659d3aa5565e66c4a7e7
|
Handle uniqueness in generators
|
populous/generators/base.py
|
populous/generators/base.py
|
import random
from cached_property import cached_property
from faker import Factory
from populous.exceptions import ValidationError
from populous.generators.vars import Expression
fake = Factory.create()
class BaseGenerator(object):
def __init__(self, item, field_name, **kwargs):
self.item = item
self.field_name = field_name
self.blueprint = item.blueprint
self.get_arguments(**kwargs)
def __iter__(self):
return self
def next(self):
return next(self.iterator)
@cached_property
def iterator(self):
return iter(self.get_generator())
def get_generator(self):
return self.generate()
def generate(self):
raise NotImplementedError()
def get_arguments(self, shadow=False, **kwargs):
# should this field be written in the table?
self.shadow = shadow
if kwargs:
raise ValidationError(
"Item '{}', field '{}': Got extra param(s) for generator "
"'{}': {}".format(
self.item.name, self.field_name, type(self).__name__,
', '.join(kwargs.keys())
)
)
def evaluate(self, value):
if isinstance(value, Expression):
return value.evaluate(**self.blueprint.vars)
return value
class NullableMixin(object):
def get_arguments(self, nullable=0, **kwargs):
super(NullableMixin, self).get_arguments(**kwargs)
if not nullable:
self.nullable = 0
else:
self.nullable = 0.5 if nullable is True else nullable
def get_generator(self):
if self.nullable:
return self.generate_with_null()
return super(NullableMixin, self).get_generator()
def generate_with_null(self):
for value in super(NullableMixin, self).get_generator():
if random.random() <= self.nullable:
yield None
else:
yield value
class Generator(NullableMixin, BaseGenerator):
pass
|
Python
| 0.000001
|
@@ -1996,24 +1996,671 @@
eld value%0A%0A%0A
+class UniquenessMixin(object):%0A%0A def get_arguments(self, unique=False, **kwargs):%0A super(UniquenessMixin, self).get_arguments(**kwargs)%0A%0A self.unique = unique%0A%0A def get_generator(self):%0A if self.unique:%0A return self.generate_uniquely()%0A return super(UniquenessMixin, self).get_generator()%0A%0A def generate_uniquely(self):%0A seen = set() # TODO: use a bloom filter instead%0A for value in super(UniquenessMixin, self).get_generator():%0A if value in seen:%0A # TODO: avoid inifinite loops%0A continue%0A seen.add(value)%0A yield value%0A%0A%0A
class Genera
@@ -2677,16 +2677,33 @@
leMixin,
+ UniquenessMixin,
BaseGen
|
e43d0190c00e9b0b0f2cc72900ac288d33fae435
|
add missing names to easy:consume_args, PavementError
|
trunk/paver/easy.py
|
trunk/paver/easy.py
|
import subprocess
import sys
from paver import tasks
from paver.options import Bunch
def dry(message, func, *args, **kw):
"""Wraps a function that performs a destructive operation, so that
nothing will happen when a dry run is requested.
Runs func with the given arguments and keyword arguments. If this
is a dry run, print the message rather than running the function."""
info(message)
if tasks.environment.dry_run:
return
return func(*args, **kw)
def error(message, *args):
"""Displays an error message to the user."""
tasks.environment.error(message, *args)
def info(message, *args):
"""Displays a message to the user. If the quiet option is specified, the
message will not be displayed."""
tasks.environment.info(message, *args)
def debug(message, *args):
"""Displays a message to the user, but only if the verbose flag is
set."""
tasks.environment.debug(message, *args)
def sh(command, capture=False, ignore_error=False):
"""Runs an external command. If capture is True, the output of the
command will be captured and returned as a string. If the command
has a non-zero return code raise a BuildFailure. You can pass
ignore_error=True to allow non-zero return codes to be allowed to
pass silently, silently into the night.
If the dry_run option is True, the command will not
actually be run."""
def runpipe():
p = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE)
p.wait()
if p.returncode and not ignore_error:
raise BuildFailure(p.returncode)
return p.stdout.read()
if capture:
return dry(command, runpipe)
else:
returncode = dry(command, subprocess.call, command, shell=True)
if returncode and not ignore_error:
raise BuildFailure("Subprocess return code: %s" % returncode)
class _SimpleProxy(object):
__initialized = False
def __init__(self, rootobj, name):
self.__rootobj = rootobj
self.__name = name
self.__initialized = True
def __get_object(self):
return getattr(self.__rootobj, self.__name)
def __getattr__(self, attr):
return getattr(self.__get_object(), attr)
def __setattr__(self, attr, value):
if self.__initialized:
setattr(self.__get_object(), attr, value)
else:
super(_SimpleProxy, self).__setattr__(attr, value)
def __call__(self, *args, **kw):
return self.__get_object()(*args, **kw)
def __str__(self):
return str(self.__get_object())
def __repr__(self):
return repr(self.__get_object())
environment = _SimpleProxy(tasks, "environment")
options = _SimpleProxy(environment, "options")
call_pavement = tasks.call_pavement
task = tasks.task
needs = tasks.needs
cmdopts = tasks.cmdopts
BuildFailure = tasks.BuildFailure
# these are down here to avoid circular dependencies. Ideally, nothing would
# be using paver.easy other than pavements.
if sys.version_info > (2,5):
from paver.path25 import path, pushd
else:
from paver.path import path
import paver.misctasks
|
Python
| 0.99623
|
@@ -2924,41 +2924,111 @@
pts%0A
-BuildFailure = tasks.BuildFailure
+consume_args = tasks.consume_args%0ABuildFailure = tasks.BuildFailure%0APavementError = tasks.PavementError
%0A%0A#
|
a0e8acde31c32b23a453f73ec2996ce71d647dab
|
Add hard coded list_locations for VPS.net
|
libcloud/drivers/vpsnet.py
|
libcloud/drivers/vpsnet.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# libcloud.org licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
VPS.net driver
"""
from libcloud.providers import Provider
from libcloud.types import NodeState
from libcloud.base import Node, Response, ConnectionUserAndKey, NodeDriver, NodeSize, NodeImage
import base64
# JSON is included in the standard library starting with Python 2.6. For 2.5
# and 2.4, there's a simplejson egg at: http://pypi.python.org/pypi/simplejson
try: import json
except: import simplejson as json
API_HOST = 'vps.net'
API_VERSION = 'api10json'
RAM_PER_NODE = 256
DISK_PER_NODE = 10
BANDWIDTH_PER_NODE = 250
PRICE_PER_NODE = {1: 20,
2: 19,
3: 18,
4: 17,
5: 16,
6: 15,
7: 14,
15: 13,
30: 12,
60: 11,
100: 10}
class VPSNetResponse(Response):
def parse_body(self):
try:
js = json.loads(self.body)
return js
except ValueError:
return self.body
def parse_error(self):
try:
errors = json.loads(self.body)['errors'][0]
except ValueError:
return self.body
else:
return "\n".join(errors)
class VPSNetConnection(ConnectionUserAndKey):
host = API_HOST
responseCls = VPSNetResponse
def add_default_headers(self, headers):
user_b64 = base64.b64encode('%s:%s' % (self.user_id, self.key))
headers['Authorization'] = 'Basic %s' % (user_b64)
return headers
class VPSNetNodeDriver(NodeDriver):
type = Provider.VPSNET
name = "vps.net"
connectionCls = VPSNetConnection
def _to_node(self, vm):
if vm['running']:
state = NodeState.RUNNING
else:
state = NodeState.PENDING
n = Node(id=vm['id'],
name=vm['label'],
state=state,
public_ip=vm.get('primary_ip_address', None),
private_ip=None,
driver=self.connection.driver)
return n
def _to_image(self, image, cloud):
image = NodeImage(id=image['id'],
name="%s: %s" % (cloud, image['label']),
driver=self.connection.driver)
return image
def _to_size(self, num):
size = NodeSize(id=num,
name="%d Node" % (num,),
ram=RAM_PER_NODE * num,
disk=DISK_PER_NODE,
bandwidth=BANDWIDTH_PER_NODE * num,
price=self._get_price_per_node(num) * num,
driver=self.connection.driver)
return size
def _get_price_per_node(self, num):
keys = sorted(PRICE_PER_NODE.keys())
if num >= max(keys):
return PRICE_PER_NODE[keys[-1]]
for i in range(0,len(keys)):
if keys[i] <= num < keys[i+1]:
return PRICE_PER_NODE[keys[i]]
def create_node(self, name, image, size, **kwargs):
headers = {'Content-Type': 'application/json'}
request = {'virtual_machine':
{'label': name,
'fqdn': kwargs.get('fqdn', ''),
'system_template_id': image.id,
'backups_enabled': kwargs.get('backups_enabled', 0),
'slices_required': size.id}}
res = self.connection.request('/virtual_machines.%s' % (API_VERSION,),
data=json.dumps(request),
headers=headers,
method='POST')
node = self._to_node(res.object['virtual_machine'])
return node
def reboot_node(self, node):
res = self.connection.request('/virtual_machines/%s/%s.%s' %
(node.id, 'reboot', API_VERSION),
method="POST")
node = self._to_node(res.object['virtual_machine'])
return True
def list_sizes(self):
res = self.connection.request('/nodes.%s' % (API_VERSION,))
available_nodes = len([size for size in res.object
if not size['slice']["virtual_machine_id"]])
sizes = [self._to_size(i) for i in range(1,available_nodes + 1)]
return sizes
def destroy_node(self, node):
res = self.connection.request('/virtual_machines/%s.%s' % (node.id, API_VERSION),
method='DELETE')
return res.status == 200
def list_nodes(self):
res = self.connection.request('/virtual_machines.%s' % (API_VERSION,))
return [self._to_node(i['virtual_machine']) for i in res.object]
def list_images(self):
res = self.connection.request('/available_clouds.%s' % (API_VERSION,))
images = []
for cloud in res.object:
label = cloud['cloud']['label']
templates = cloud['cloud']['system_templates']
images.extend([self._to_image(image, label) for image in templates])
return images
|
Python
| 0.000001
|
@@ -974,16 +974,30 @@
odeImage
+, NodeLocation
%0A%0Aimport
@@ -5918,8 +5918,106 @@
images%0A
+%0A def list_locations(self):%0A return %5BNodeLocation(0, %22VPS.net Western US%22, 'us', self)%5D%0A
|
3dc0b7baee5640d12dc8a910810db4cd3931ac09
|
Update various docstrings.
|
colour/models/rgb/transfer_functions/red_log.py
|
colour/models/rgb/transfer_functions/red_log.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
RED Log Encodings
=================
Defines the *RED* log encodings:
- :func:`log_encoding_REDLog`
- :func:`log_decoding_REDLog`
See Also
--------
`RGB Colourspaces Jupyter Notebook
<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\
blob/master/notebooks/models/rgb.ipynb>`_
References
----------
.. [1] Sony Imageworks. (2012). make.py. Retrieved November 27, 2014, from
https://github.com/imageworks/OpenColorIO-Configs/\
blob/master/nuke-default/make.py
"""
from __future__ import division, unicode_literals
import numpy as np
from colour.models.rgb.transfer_functions import (
log_encoding_Cineon,
log_decoding_Cineon)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2016 - Colour Developers'
__license__ = 'New BSD License - http://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = 'colour-science@googlegroups.com'
__status__ = 'Production'
__all__ = ['log_encoding_REDLog',
'log_decoding_REDLog',
'log_encoding_REDLogFilm',
'log_decoding_REDLogFilm']
def log_encoding_REDLog(x,
black_offset=10 ** ((0 - 1023) / 511)):
"""
Defines the *REDLog* log encoding curve / opto-electronic transfer
function.
Parameters
----------
x : numeric or array_like
Linear data :math:`x`.
black_offset : numeric or array_like
Black offset.
Returns
-------
numeric or ndarray
Non-linear data :math:`y`.
Examples
--------
>>> log_encoding_REDLog(0.18) # doctest: +ELLIPSIS
0.6376218...
"""
x = np.asarray(x)
return ((1023 +
511 * np.log10(x * (1 - black_offset) + black_offset)) / 1023)
def log_decoding_REDLog(y,
black_offset=10 ** ((0 - 1023) / 511)):
"""
Defines the *REDLog* log decoding curve / electro-optical transfer
function.
Parameters
----------
y : numeric or array_like
Non-linear data :math:`y`.
black_offset : numeric or array_like
Black offset.
Returns
-------
numeric or ndarray
Linear data :math:`x`.
Examples
--------
>>> log_decoding_REDLog(0.637621845988175) # doctest: +ELLIPSIS
0.1...
"""
y = np.asarray(y)
return (((10 **
((1023 * y - 1023) / 511)) - black_offset) /
(1 - black_offset))
def log_encoding_REDLogFilm(x,
black_offset=10 ** ((95 - 685) / 300)):
"""
Defines the *REDLogFilm* log encoding curve / opto-electronic transfer
function.
Parameters
----------
x : numeric or array_like
Linear data :math:`x`.
black_offset : numeric or array_like
Black offset.
Returns
-------
numeric or ndarray
Non-linear data :math:`y`.
Examples
--------
>>> log_encoding_REDLogFilm(0.18) # doctest: +ELLIPSIS
0.4573196...
"""
return log_encoding_Cineon(x, black_offset)
def log_decoding_REDLogFilm(y,
black_offset=10 ** ((95 - 685) / 300)):
"""
Defines the *REDLogFilm* log decoding curve / electro-optical transfer
function.
Parameters
----------
y : numeric or array_like
Non-linear data :math:`y`.
black_offset : numeric or array_like
Black offset.
Returns
-------
numeric or ndarray
Linear data :math:`x`.
Examples
--------
>>> log_decoding_REDLogFilm(0.457319613085418) # doctest: +ELLIPSIS
0.1799999...
"""
return log_decoding_Cineon(y, black_offset)
|
Python
| 0
|
@@ -178,16 +178,88 @@
_REDLog%60
+%0A- :func:%60log_encoding_REDLogFilm%60%0A- :func:%60log_decoding_REDLogFilm%60
%0A%0ASee Al
|
c4b408bdf84333a5e41d10ee3d46f926069b5548
|
Delete deprecated with_coverage task
|
lutrisweb/settings/test.py
|
lutrisweb/settings/test.py
|
from base import * # noqa
DEBUG = False
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS += (
'django_jenkins',
)
JENKINS_TASKS = (
'django_jenkins.tasks.with_coverage',
'django_jenkins.tasks.run_pylint',
'django_jenkins.tasks.run_pep8',
)
PROJECT_APPS = (
'games',
'accounts',
'common'
)
|
Python
| 0.000012
|
@@ -218,50 +218,8 @@
= (%0A
- 'django_jenkins.tasks.with_coverage',%0A
|
421ace15d779cc686aa83489c0e965bbeabe49b9
|
Update script to repeat test set experiment 10 times
|
cptm/experiment_testset_without_perspectives.py
|
cptm/experiment_testset_without_perspectives.py
|
"""Script to extract a document/topic matrix for a set of text documents.
The corpus is not divided in perspectives.
Used to calculate theta for the CAP vragenuurtje data.
"""
import logging
import argparse
import pandas as pd
import os
from CPTCorpus import CPTCorpus
from cptm.utils.experiment import get_sampler, thetaFileName, load_config, \
topicFileName
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(levelname)s : %(message)s', level=logging.DEBUG)
parser = argparse.ArgumentParser()
parser.add_argument('json', help='json file containing experiment '
'configuration.')
parser.add_argument('data_dir', help='dir containing the input data.')
parser.add_argument('out_dir', help='dir to write results to.')
args = parser.parse_args()
params = load_config(args.json)
input_dir = [args.data_dir]
topicDict = params.get('outDir').format('topicDict.dict')
opinionDict = params.get('outDir').format('opinionDict.dict')
phi_topic_file = topicFileName(params)
phi_topic = pd.read_csv(phi_topic_file, index_col=0, encoding='utf-8').values.T
#print phi_topic.shape
#print phi_topic
corpus = CPTCorpus(input=input_dir, topicDict=topicDict,
opinionDict=opinionDict, testSplit=100, file_dict=None,
topicLines=params.get('topicLines'),
opinionLines=params.get('opinionLines'))
print str(corpus)
params['outDir'] = args.out_dir
sampler = get_sampler(params, corpus, nTopics=params.get('nTopics'),
initialize=False)
sampler._initialize(phi_topic=phi_topic)
sampler.run()
sampler.estimate_parameters(start=params.get('sampleEstimateStart'),
end=params.get('sampleEstimateEnd'))
logger.info('saving files')
documents = []
for persp in corpus.perspectives:
print str(persp)
for f in persp.testFiles:
p, b = os.path.split(f)
documents.append(b)
theta = sampler.theta_to_df(sampler.theta, documents)
theta.to_csv(thetaFileName(params), encoding='utf8')
|
Python
| 0
|
@@ -1423,17 +1423,73 @@
out_dir%0A
-%0A
+nTopics = params.get('nTopics')%0A%0Afor i in range(10):%0A
sampler
@@ -1530,31 +1530,21 @@
ics=
-params.get('
nTopics
-')
,%0A
+
@@ -1579,16 +1579,20 @@
=False)%0A
+
sampler.
@@ -1624,16 +1624,20 @@
_topic)%0A
+
sampler.
@@ -1642,16 +1642,20 @@
r.run()%0A
+
sampler.
@@ -1711,24 +1711,28 @@
ateStart'),%0A
+
@@ -1785,16 +1785,20 @@
End'))%0A%0A
+
logger.i
@@ -1818,16 +1818,20 @@
iles')%0A%0A
+
document
@@ -1837,16 +1837,20 @@
ts = %5B%5D%0A
+
for pers
@@ -1879,16 +1879,20 @@
es:%0A
+
+
print st
@@ -1900,16 +1900,20 @@
(persp)%0A
+
for
@@ -1942,16 +1942,20 @@
+
+
p, b = o
@@ -1974,24 +1974,28 @@
(f)%0A
+
documents.ap
@@ -2002,16 +2002,20 @@
pend(b)%0A
+
theta =
@@ -2060,16 +2060,20 @@
uments)%0A
+
theta.to
@@ -2081,30 +2081,124 @@
csv(
-thetaFileName(params),
+os.path.join(params%5B'outDir'%5D,%0A 'theta_%7B%7D_%7B%7D.csv'.format(nTopics, i)),%0A
enc
|
e9064d4dda3cbf2abb7f2a16ebceee62d5b9d004
|
Remove last real reference to locations/by_name
|
custom/ilsgateway/tanzania/handlers/register.py
|
custom/ilsgateway/tanzania/handlers/register.py
|
import re
from django.contrib.auth.models import User
from corehq.apps.locations.models import Location
from corehq.apps.sms.mixin import PhoneNumberInUseException, VerifiedNumber
from corehq.apps.users.models import CommCareUser
from custom.ilsgateway.tanzania.handlers import get_location
from custom.ilsgateway.tanzania.handlers.keyword import KeywordHandler
from custom.ilsgateway.models import ILSGatewayConfig
from custom.ilsgateway.tanzania.reminders import REGISTER_HELP, Languages, \
REGISTRATION_CONFIRM_DISTRICT, REGISTRATION_CONFIRM, Roles
from custom.logistics.commtrack import add_location
DISTRICT_PREFIXES = ['d', 'm', 'tb', 'tg', 'dm', 'mz', 'mt', 'mb', 'ir', 'tb', 'ms']
class RegisterHandler(KeywordHandler):
DISTRICT_REG_DELIMITER = ":"
def help(self):
self.respond(REGISTER_HELP)
def _get_facility_location(self, domain, msd_code):
sp = get_location(domain, None, msd_code)
return sp['location']
def _get_district_location(self, domain, sp):
locs = Location.view('locations/by_name',
startkey=[domain, "DISTRICT", sp],
endkey=[domain, "DISTRICT", sp + "z"],
reduce=False,
include_docs=True)
if len(locs) > 1:
locs = Location.view('locations/by_name',
startkey=[domain, "DISTRICT", sp],
endkey=[domain, "DISTRICT", sp],
reduce=False,
include_docs=True)
return locs[0]
def handle(self):
text = ' '.join(self.msg.text.split()[1:])
is_district = False
sp = ""
msd_code = ""
if text.find(self.DISTRICT_REG_DELIMITER) != -1:
phrases = [x.strip() for x in text.split(":")]
if len(phrases) != 2:
self.respond(REGISTER_HELP)
return
name = phrases[0]
sp = phrases[1]
role = Roles.DISTRICT_PHARMACIST
message = REGISTRATION_CONFIRM_DISTRICT
params = {}
is_district = True
else:
names = []
msd_codes = []
location_regex = '^({prefs})\d+'.format(prefs='|'.join(p.lower() for p in DISTRICT_PREFIXES))
for the_string in self.args:
if re.match(location_regex, the_string.strip().lower()):
msd_codes.append(the_string.strip().lower())
else:
names.append(the_string)
name = " ".join(names)
if len(msd_codes) != 1:
self.respond(REGISTER_HELP)
return
else:
[msd_code] = msd_codes
role = Roles.IN_CHARGE
message = REGISTRATION_CONFIRM
params = {
"msd_code": msd_code
}
if not self.user:
domains = [config.domain for config in ILSGatewayConfig.get_all_configs()]
for domain in domains:
if is_district:
loc = self._get_district_location(domain, sp)
else:
loc = self._get_facility_location(domain, msd_code)
if not loc:
continue
splited_name = name.split(' ', 1)
first_name = splited_name[0]
last_name = splited_name[1] if len(splited_name) > 1 else ""
clean_name = name.replace(' ', '.')
username = "%s@%s.commcarehq.org" % (clean_name, domain)
password = User.objects.make_random_password()
user = CommCareUser.create(domain=domain, username=username, password=password,
commit=False)
user.first_name = first_name
user.last_name = last_name
try:
user.set_default_phone_number(self.msg.phone_number.replace('+', ''))
user.save_verified_number(domain, self.msg.phone_number.replace('+', ''), True, self.msg.backend_api)
except PhoneNumberInUseException as e:
v = VerifiedNumber.by_phone(self.msg.phone_number, include_pending=True)
v.delete()
user.save_verified_number(domain, self.msg.phone_number.replace('+', ''), True, self.msg.backend_api)
except CommCareUser.Inconsistent:
continue
user.language = Languages.DEFAULT
params.update({
'sdp_name': loc.name,
'contact_name': name
})
user.user_data = {
'role': role
}
dm = user.get_domain_membership(domain)
dm.location_id = loc._id
user.save()
add_location(user, loc._id)
if params:
self.respond(message, **params)
|
Python
| 0.000008
|
@@ -90,16 +90,19 @@
import
+SQL
Location
@@ -1025,404 +1025,102 @@
-locs = Location.view('locations/by_name',%0A startkey=%5Bdomain, %22DISTRICT%22, sp%5D,%0A endkey=%5Bdomain, %22DISTRICT%22, sp + %22z%22%5D,%0A reduce=False,%0A include_docs=True)%0A if len(locs) %3E 1:%0A locs = Location.view('locations/by_name',%0A startkey=%5Bdomain,
+return SQLLocation.objects.filter(%0A domain=domain,%0A location_type__name=
%22DIS
@@ -1118,37 +1118,32 @@
name=%22DISTRICT%22,
- sp%5D,
%0A
@@ -1143,60 +1143,15 @@
- endkey=%5Bdomain, %22DISTRICT%22,
+name=
sp
-%5D
,%0A
@@ -1160,118 +1160,9 @@
- reduce=False,%0A include_docs=True)%0A return locs
+)
%5B0%5D%0A
|
0fe7cd8cf316dc6d4ef547d733b634de64fc768c
|
Add more options on filters
|
dbaas/dbaas_services/analyzing/admin/analyze.py
|
dbaas/dbaas_services/analyzing/admin/analyze.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django_services import admin
from dbaas_services.analyzing.service import AnalyzeRepositoryService
from dbaas_services.analyzing.forms import AnalyzeRepositoryForm
class AnalyzeRepositoryAdmin(admin.DjangoServicesAdmin):
form = AnalyzeRepositoryForm
service_class = AnalyzeRepositoryService
search_fields = ("database_name", "engine_name",
"environment_name", "instance_name", "databaseinfra_name")
list_filter = ("analyzed_at", "memory_alarm", "cpu_alarm", "volume_alarm")
list_display = ("analyzed_at", "databaseinfra_name", "database_name", "engine_name",
"environment_name", "instance_name", "cpu_alarm",
"memory_alarm", "volume_alarm")
|
Python
| 0
|
@@ -590,16 +590,92 @@
e_alarm%22
+, %22engine_name%22,%0A %22environment_name%22, %22databaseinfra_name%22
)%0A li
|
4605dfb434d7a934e2fce39f96d73e66f17a682b
|
Handle missing settings file
|
i2vbot/settings.py
|
i2vbot/settings.py
|
#!/usr/bin/env python2
# Ampuni aku... :(
import pickle
SETTINGS_FILE = "data/settings.pickle"
def loadSettings(settingsFile = SETTINGS_FILE):
with open(settingsFile, 'r') as f:
try:
return pickle.load(f)
except:
return {}
def saveSettings(settings, settingsFile = SETTINGS_FILE):
with open(settingsFile, 'w') as f:
pickle.dump(settings, f, -1)
|
Python
| 0.000001
|
@@ -131,32 +131,45 @@
SETTINGS_FILE):%0A
+ try:%0A
with open(se
@@ -195,21 +195,8 @@
f:%0A
- try:%0A
@@ -225,20 +225,16 @@
load(f)%0A
-
exce
@@ -237,20 +237,16 @@
except:%0A
-
|
8bc3e371690ef28609f1999a4a3dabc0dd453850
|
Correct serve() to call serve_one() not listen_one()
|
uhttpsrv/uhttpsrv.py
|
uhttpsrv/uhttpsrv.py
|
import socket
class uHTTPsrv:
PROTECTED = [b'__init__', b'listen_once', b'listen', b'response_header', b'__qualname__', b'__module__', b'address', b'port', b'backlog', b'in_buffer_len', b'debug']
def __init__(self, address='', port=80, backlog=1, in_buffer_len=1024, debug=False):
self.address = address
self.port = port
self.backlog = backlog
self.in_buffer_len = in_buffer_len
self.debug = debug
self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._socket.bind((self.address, self.port))
self._socket.listen(self.backlog)
def serve_one(self):
conn,addr = self._socket.accept()
request = conn.recv(self.in_buffer_len)
request = request.rsplit(b'\r\n')
if self.debug:
for line in request:
print(line)
method = request[0].rsplit(b' ')[0].decode('utf-8')
print(method)
if method.lower() not in self.PROTECTED:
if hasattr(self, method):
response = self.response_header(200) + \
eval('self.'+method+'(self,request)')
else:
response=self.response_header(501)
else:
response = self.response_header(501)
if self.debug:
for line in response:
print(line)
conn.send(response)
conn.close()
def serve(self):
while True:
self.listen_once(self)
def response_header(self, code):
return b'HTTP/1.1 ' + str(code) + b'\nConnection: close\n\n'
|
Python
| 0.000169
|
@@ -1217,26 +1217,24 @@
%09%09%09self.
-listen
+serve
_on
-c
e(self)%0A
|
d69aa85c74482354ff8788fb7b9692f0aee6d311
|
Fix it.
|
iepy/preprocess.py
|
iepy/preprocess.py
|
import logging
logger = logging.getLogger(__name__)
class PreProcessPipeline(object):
"""Coordinates the pre-processing tasks on a set of documents"""
def __init__(self, step_runners, documents_manager):
"""Takes a list of callables and a documents-manager.
Step Runners may be any callable. It they have an attribute step,
then that runner will be treated as the responsible for
accomplishing such a PreProcessStep.
"""
self.step_runners = step_runners
self.documents = documents_manager
def walk_document(self, doc):
"""Computes all the missing pre-process steps for the given document"""
for step in self.step_runners:
step(doc)
return
def process_step_in_batch(self, runner):
"""Tries to apply the required step to all documents lacking it"""
logger.info('Starting preprocessing step %s', runner)
if hasattr(runner, 'step'):
docs = self.documents.get_documents_lacking_preprocess(runner.step)
else:
docs = self.documents # everything
for i, doc in enumerate(docs):
runner(doc)
logger.info('\tDone for %i documents', i + 1)
def process_everything(self):
"""Tries to apply all the steps to all documents"""
for runner in self.step_runners:
self.process_step_in_batch(runner)
class BasePreProcessStepRunner(object):
# If it's for a particular step, you can write
# step = PreProcessSteps.something
def __init__(self, override=False):
self.override = override
def __call__(self, doc):
# You'll have to:
# - Check if the document satisfies pre-conditions, and if not, do nothing
# - Explicitely store pre process results on the document
# - Based on the "override" paramenter, and on your checks to see if the step
# was already done or not, decide if you will
# - skip
# - re-do step.
raise NotImplementedError
|
Python
| 0.999982
|
@@ -971,16 +971,40 @@
'step')
+ and not runner.override
:%0A
|
e5f7f120a6574c0bf7d09c5188931a914ab6849b
|
Use the correct implementation of as_symbol
|
devito/dse/inspection.py
|
devito/dse/inspection.py
|
from collections import OrderedDict
from sympy import Function, Indexed, Number, Symbol, cos, preorder_traversal, sin
from devito.dimension import Dimension
from devito.dse.search import retrieve_indexed, retrieve_ops, search
from devito.dse.queries import q_timedimension
from devito.logger import warning
from devito.tools import flatten
__all__ = ['estimate_cost', 'estimate_memory', 'indexify', 'as_symbol']
def count(exprs, query):
"""
Return a mapper ``{(k, v)}`` where ``k`` is a sub-expression in ``exprs``
matching ``query`` and ``v`` is the number of its occurrences.
"""
mapper = OrderedDict()
for expr in exprs:
found = search(expr, query, 'all', 'bfs')
for i in found:
mapper.setdefault(i, 0)
mapper[i] += 1
return mapper
def estimate_cost(handle, estimate_functions=False):
"""Estimate the operation count of ``handle``.
:param handle: a SymPy expression or an iterator of SymPy expressions.
:param estimate_functions: approximate the operation count of known
functions (eg, sin, cos).
"""
external_functions = {sin: 50, cos: 50}
try:
# Is it a plain SymPy object ?
iter(handle)
except TypeError:
handle = [handle]
try:
# Is it a dict ?
handle = handle.values()
except AttributeError:
try:
# Must be a list of dicts then
handle = flatten([i.values() for i in handle])
except AttributeError:
pass
try:
# At this point it must be a list of SymPy objects
# We don't use SymPy's count_ops because we do not count integer arithmetic
# (e.g., array index functions such as i+1 in A[i+1])
# Also, the routine below is *much* faster than count_ops
handle = [i.rhs if i.is_Equality else i for i in handle]
operations = flatten(retrieve_ops(i) for i in handle)
flops = 0
for op in operations:
if op.is_Function:
if estimate_functions:
flops += external_functions.get(op.__class__, 1)
else:
flops += 1
else:
flops += len(op.args) - (1 + sum(True for i in op.args if i.is_Integer))
return flops
except:
warning("Cannot estimate cost of %s" % str(handle))
def estimate_memory(handle, mode='realistic'):
"""
Estimate the number of memory reads and writes.
:param handle: a SymPy expression or an iterator of SymPy expressions.
:param mode: Mode for computing the estimate:
Estimate ``mode`` might be any of: ::
* ideal: Also known as "compulsory traffic", which is the minimum
number of read/writes to be performed (ie, models an infinite cache).
* ideal_with_stores: Like ideal, but a data item which is both read.
and written is counted twice (ie both load an
store are counted).
* realistic: Assume that all datasets, even the time-independent ones,
need to be re-read at each time iteration.
"""
assert mode in ['ideal', 'ideal_with_stores', 'realistic']
def access(symbol):
assert isinstance(symbol, Indexed)
# Irregular accesses (eg A[B[i]]) are counted as compulsory traffic
if any(i.atoms(Indexed) for i in symbol.indices):
return symbol
else:
return symbol.base
try:
# Is it a plain SymPy object ?
iter(handle)
except TypeError:
handle = [handle]
if mode in ['ideal', 'ideal_with_stores']:
filter = lambda s: any(q_timedimension(i) for i in s.atoms())
else:
filter = lambda s: s
reads = set(flatten([retrieve_indexed(e.rhs) for e in handle]))
writes = set(flatten([retrieve_indexed(e.lhs) for e in handle]))
reads = set([access(s) for s in reads if filter(s)])
writes = set([access(s) for s in writes if filter(s)])
if mode == 'ideal':
return len(set(reads) | set(writes))
else:
return len(reads) + len(writes)
def as_symbol(obj):
"""
Turn ``obj`` into a :class:`sympy.Symbol`. Raise ``TypeError`` if no
conversion rule for the type of ``obj`` is known.
"""
try:
return Number(obj)
except (TypeError, ValueError):
pass
if isinstance(obj, str):
return Symbol(obj)
elif isinstance(obj, Dimension):
return Symbol(obj.name)
elif obj.is_Symbol:
return obj
elif isinstance(obj, Indexed):
return obj.base.label
elif isinstance(obj, Function):
try:
# Can be converted only if no args or if it's a Devito.Function
if len(obj.args) == 0 or obj.is_AbstractSymbol:
return Symbol(obj.__class__.__name__)
except AttributeError:
raise TypeError("Cannot symbolify Function w/ args %s" % str(obj.args))
else:
raise TypeError("Cannot symbolify from type %s" % type(obj))
def indexify(expr):
"""
Convert functions into indexed matrix accesses in sympy expression.
:param expr: sympy function expression to be converted.
"""
replacements = {}
for e in preorder_traversal(expr):
if hasattr(e, 'indexed'):
replacements[e] = e.indexify()
return expr.xreplace(replacements)
|
Python
| 0.006325
|
@@ -4176,19 +4176,20 @@
_symbol(
-obj
+expr
):%0A %22
@@ -4199,129 +4199,53 @@
-Turn %60%60obj%60%60 into a :class:%60sympy.Symbol%60. Raise %60%60TypeError%60%60 if no%0A conversion rule for the type of %60%60obj%60%60 is known
+Extract the %22main%22 symbol from a SymPy object
.%0A
@@ -4281,19 +4281,20 @@
Number(
-obj
+expr
)%0A ex
@@ -4354,19 +4354,20 @@
nstance(
-obj
+expr
, str):%0A
@@ -4388,19 +4388,20 @@
Symbol(
-obj
+expr
)%0A el
@@ -4414,19 +4414,20 @@
nstance(
-obj
+expr
, Dimens
@@ -4454,19 +4454,20 @@
Symbol(
-obj
+expr
.name)%0A
@@ -4474,19 +4474,20 @@
elif
-obj
+expr
.is_Symb
@@ -4505,19 +4505,20 @@
return
-obj
+expr
%0A eli
@@ -4530,19 +4530,20 @@
nstance(
-obj
+expr
, Indexe
@@ -4561,19 +4561,20 @@
return
-obj
+expr
.base.la
@@ -4601,181 +4601,25 @@
nce(
-obj, Function):%0A try:%0A # Can be converted only if no args or if it's a Devito.Function%0A if len(obj.args) == 0 or obj.is_AbstractSymbol:%0A
+expr, Function):%0A
@@ -4636,19 +4636,20 @@
Symbol(
-obj
+expr
.__class
@@ -4669,39 +4669,14 @@
- except AttributeError:%0A
+else:%0A
@@ -4707,104 +4707,22 @@
not
-symbolify Function w/ args %25s%22 %25 str(obj.args))%0A else:%0A raise TypeError(%22Canno
+extrac
t symbol
ify
@@ -4717,19 +4717,16 @@
t symbol
-ify
from ty
@@ -4739,19 +4739,20 @@
%25 type(
-obj
+expr
))%0A%0A%0Adef
|
f076e4d1fc13c1aea3000b19cd4b51f404378d0e
|
Update docs and bucket subdirectory path for each build.
|
.buildkite/upload_artifacts.py
|
.buildkite/upload_artifacts.py
|
"""
# Requirements:
* Generate access token in your Github account, then create environment variable GITHUB_ACCESS_TOKEN.
- e.g export GITHUB_ACCESS_TOKEN=1ns3rt-my-t0k3n-h3re.
* Generate a service account key for your Google API credentials, then create environment variable GOOGLE_APPLICATION_CREDENTIALS.
- e.g export GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json.
# Environment Variable/s:
* IS_KOLIBRI_RELEASE = Upload artifacts to the Google Cloud as a release candidate.
* GITHUB_ACCESS_TOKEN = Personal access token used to authenticate in your Github account via API.
* BUILDKITE_BUILD_NUMBER = Build identifier for each directory created.
* BUILDKITE_PULL_REQUEST = Pull request issue or the value is false.
*
"""
import json
import requests
import os
import sys
import logging
from os import listdir
from gcloud import storage
logging.getLogger().setLevel(logging.INFO)
ACCESS_TOKEN = os.getenv("GITHUB_ACCESS_TOKEN")
REPO_OWNER = "learningequality"
REPO_NAME = "kolibri"
ISSUE_ID = os.getenv("BUILDKITE_PULL_REQUEST")
RELEASE_DIR = 'release'
PROJECT_PATH = os.path.join(os.getcwd())
# Python packages artifact location
DIST_DIR = os.path.join(PROJECT_PATH, "dist")
# Installer artifact location
INSTALLER_DIR = os.path.join(PROJECT_PATH, "installer")
def create_github_comment(artifacts):
"""Create an comment on github.com using the given dict."""
url = 'https://api.github.com/repos/%s/%s/issues/%s/comments' % (REPO_OWNER, REPO_NAME, ISSUE_ID)
session = requests.Session()
exe_file, exe_url = None, None
pex_file, pex_url = None, None
whl_file, whl_url = None, None
zip_file, zip_url = None, None
tar_gz_file, tar_gz_url = None, None
for file_data in artifacts:
if file_data.get("name").endswith(".exe"):
exe_file = file_data.get("name")
exe_url = file_data.get("media_url")
if file_data.get("name").endswith(".pex"):
pex_file = file_data.get("name")
pex_url = file_data.get("media_url")
if file_data.get("name").endswith(".whl"):
whl_file = file_data.get("name")
whl_url = file_data.get("media_url")
if file_data.get("name").endswith(".zip"):
zip_file = file_data.get("name")
zip_url = file_data.get("media_url")
if file_data.get("name").endswith(".tar.gz"):
tar_gz_file = file_data.get("name")
tar_gz_url = file_data.get("media_url")
comment_message = {'body':
"## Build Artifacts\r\n"
"**Kolibri Installers**\r\n"
"Windows Installer: [%s](%s)\r\n\r\n"
# "Mac Installer: Mac.dmg\r\n"
# "Debian Installer: Debian.deb\r\n\r\n"
"**Python packages**\r\n"
"Pex: [%s](%s)\r\n"
"Whl file: [%s](%s)\r\n"
"Zip file: [%s](%s)\r\n"
"Tar file: [%s](%s)\r\n"
% (exe_file, exe_url, pex_file, pex_url, whl_file, whl_url, zip_file, zip_url,
tar_gz_file, tar_gz_url)}
headers = {'Authorization': 'token %s'% ACCESS_TOKEN}
r = session.post(url, json.dumps(comment_message), headers=headers)
if r.status_code == 201:
logging.info('Successfully created Github comment(%s).' % url)
else:
logging.info('Error encounter(%s). Now exiting!' % r.status_code)
sys.exit(1)
def collect_local_artifacts():
"""
Create a dict of the artifact name and the location.
"""
artifacts_dict = []
def create_artifact_data(artifact_dir):
for artifact in listdir(artifact_dir):
data = {"name": artifact,
"file_location": "%s/%s" % (artifact_dir, artifact)}
logging.info("Collect file data: (%s)" % data)
artifacts_dict.append(data)
create_artifact_data(DIST_DIR)
create_artifact_data(INSTALLER_DIR)
return artifacts_dict
def upload_artifacts():
"""
Upload the artifacts on the Google Cloud Storage.
Create a comment on the pull requester with artifact media link.
"""
client = storage.Client()
bucket = client.bucket("le-downloads")
artifacts = collect_local_artifacts()
is_release = os.getenv("IS_KOLIBRI_RELEASE")
build_id = os.getenv("BUILDKITE_BUILD_NUMBER")
if os.getenv("BUILDKITE_PULL_REQUEST") != "false":
build_id = ISSUE_ID
for file_data in artifacts:
logging.info("Uploading file (%s)" % (file_data.get("name")))
if is_release:
blob = bucket.blob('kolibri/%s/%s' % (RELEASE_DIR, file_data.get("name")))
else:
blob = bucket.blob('kolibri/buildkite/build-%s/%s' % (build_id, file_data.get("name")))
blob.upload_from_filename(filename=file_data.get("file_location"))
blob.make_public()
file_data.update({'media_url': blob.media_link})
if os.getenv("BUILDKITE_PULL_REQUEST") != "false":
create_github_comment(artifacts)
def main():
upload_artifacts()
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -770,17 +770,75 @@
.%0A *
+GOOGLE_APPLICATION_CREDENTIALS = Your service account key.
%0A
-
%22%22%22%0Aimpo
@@ -1142,16 +1142,64 @@
QUEST%22)%0A
+BUILD_ID = os.getenv(%22BUILDKITE_BUILD_NUMBER%22)%0A%0A
%0ARELEASE
@@ -1473,16 +1473,21 @@
%0A %22%22%22
+%0A
Create a
@@ -1531,16 +1531,21 @@
en dict.
+%0A
%22%22%22%0A
@@ -4629,91 +4629,8 @@
R%22)%0A
- if os.getenv(%22BUILDKITE_PULL_REQUEST%22) != %22false%22:%0A build_id = ISSUE_ID%0A
@@ -4791,24 +4791,27 @@
olibri/%25s/%25s
+/%25s
' %25 (RELEASE
@@ -4815,16 +4815,26 @@
ASE_DIR,
+ BUILD_ID,
file_da
@@ -4929,21 +4929,34 @@
s/%25s
+/%25s
' %25 (
-build_id
+ISSUE_ID, BUILD_ID
, fi
|
42743ac90ede1d9d78c892f1cee033c5e5a66c9b
|
fix typo in docker update script
|
.travis/docker/update_image.py
|
.travis/docker/update_image.py
|
#!/usr/bin/env python3
import os
import subprocess
import sys
cc_mapping = {'gcc': 'g++', 'clang': 'clang++'}
thisdir = os.path.dirname(os.path.abspath(__file__))
def update(commit, cc):
gdt_super_dir = os.path.join(thisdir, '..', '..',)
dockerfile = os.path.join(thisdir, 'dune-gdt-testing', 'Dockerfile')
os.chdir(gdt_super_dir)
cxx = cc_mapping[cc]
commit = commit.replace('/', '_')
repo = 'dunecommunity/dune-gdt-testing_{}'.format(cc)
subprocess.check_call(['docker', 'build', '--no-cache=true', '-f', dockerfile,
'-t', '{}:{}'.format(repo, commit), '--build-arg', 'cc={}'.format(cc),
'--build-arg', 'cxx={}'.format(cxx), '--build-arg', 'commit={}'.format(commit),
'.'])
subprocess.check_call(['docker', '--log-level="debug"', 'push', repo])
if __name__ == '__main__':
if len(sys.argv) > 2:
ccs = [sys.argv[1]]
commmits = [sys.argv[2]]
else:
ccs = list(cc_mapping.keys())
commits = ['master']
subprocess.check_call(['docker', 'pull', 'dunecommunity/testing-base:latest'])
for b in commits:
for c in ccs:
update(b, c)
subprocess.check_call(['docker', '--log-level="debug"', 'images'])
|
Python
| 0.000005
|
@@ -160,16 +160,17 @@
le__))%0A%0A
+%0A
def upda
@@ -852,16 +852,17 @@
repo%5D)%0A%0A
+%0A
if __nam
@@ -946,17 +946,16 @@
comm
-m
its = %5Bs
|
4bddcadd7b177b764d2ee12370b635ec31f12288
|
change jumping
|
server/server1.py
|
server/server1.py
|
import socket, sys, commands, re
import random, time, math
import xlrd
#controller default PORT and IP
UDP_IP = 'controller-host'
UDP_PORT_HELLO = 7777
UDP_PORT_INFO = 7778
UDP_OUT_PORT=5005
def getGreenEnergyValue(location_id, worksheet, row):
energyValue = worksheet.cell(row,location_id).value
return float(energyValue)
def getDelayValue():
return 10
def getCPUValue():
return 10
def main():
print "Excel parsing"
workbook = xlrd.open_workbook('Solar_Test.xlsx')
worksheet = workbook.sheet_by_index(0)
date = int(sys.argv[2]) + 2
location_id = int(sys.argv[1])
print "Location ID is", location_id
date_counter = 1
row = 1
print "Get the date"
while True:
cell_data = getGreenEnergyValue(0,worksheet,date_counter)
if not cell_data:
date_counter = date_counter + 1
elif(int(cell_data) == date):
row = date_counter
break
else:
date_counter = date_counter + 1
#
#Server hello message
message = "Hello"# + ";" + hostname + ";" + mac_address
print "send the hello message"
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
sock.bind(('', UDP_OUT_PORT))
sock.settimeout(5)
while True:
sock.sendto(message, (UDP_IP, UDP_PORT_HELLO))
print "Sending hello message"
time.sleep(1)
try:
data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes
except socket.timeout:
print "No data available"
continue
if data=="404":
continue
break
print "received message:", data
split_line = re.split(';',data)
parameters = tuple(re.split(',',split_line[0]))
timeout = int(split_line[1])
id = split_line[2]
print "Timeout", timeout
print "Parameters", parameters
sock.settimeout(100)
v = 0.0
metrics = 0
while True:
message=''
for p in parameters:
if (p == 'GE'):
v_temp = getGreenEnergyValue(location_id, worksheet, row)
v = float(v_temp)
print v
elif (p == 'DL'):
v = getDelayValue()
elif (p == 'CPU'):
v = getCPUValue()
message += str(v) + ',' #sending only value for now
message = message[:-1] + ";" + id
sock.sendto(message, (UDP_IP, UDP_PORT_INFO))
try:
data, addr = sock.recvfrom(1024) # buffer size is 1024 bytes
except socket.timeout:
print "Send it again"
continue
if data == "404":
return
print "OK received"
row += 2 #jumping
time.sleep(int(timeout))
if __name__ == "__main__":
while True:
main()
|
Python
| 0.000003
|
@@ -2422,17 +2422,17 @@
row +=
-2
+1
#jumpin
|
45ee385204d4a38ea904228d2648d266309332ab
|
fix shutdown command
|
server/sockets.py
|
server/sockets.py
|
import asyncio
from aiohttp import web
import socketio
import hexdump
from log import logname
import frame
from hardware import Hardware
from version import version_info
import os
logger = logname("sockets")
class WSnamespace(socketio.AsyncNamespace):
def __init__(self, namespace='/sockets'):
super().__init__(namespace)
self.sio = None
self.hw = Hardware()
async def on_connect(self, sid, environ):
logger.info("connected %s", sid)
await self.sio.emit('connected', {
'tcs_ver' : version_info,
'firmware_ver' : self.hw.getFirmwareVersion(),
'wifi_dongle' : self.hw.getWirelessAdapterInfo(),
'video_devices': self.hw.getCameraInfo()
}, namespace="/sockets")
async def on_motors(self, sid, payload):
self.hw.setMotors(payload)
await self.sio.emit('response', "motors set", namespace="/sockets")
async def on_manipulator(self, sid, payload):
self.hw.setManipulator(payload)
await self.sio.emit('response', 'manipulator set', namespace="/sockets")
async def on_gripper(self, sid, payload):
self.hw.setGripper(payload)
await self.sio.emit('response', 'gripper set', namespace="/sockets")
async def on_battery(self, sid):
battery_status = self.hw.getBattery()
await self.sio.emit('battery', battery_status, namespace="/sockets")
async def on_signal(self, sid):
signal_strength = self.hw.getSignal()
await self.sio.emit('signal', signal_strength, namespace="/sockets")
async def on_temperature(self, sid):
temperature = self.hw.getTemperature()
await self.sio.emit('temperature', temperature, namespace="/sockets")
async def on_system_shutdown(self, sid):
os.system('poweroff')
class WSserver():
def __init__(self, app):
super().__init__()
self.sio = None
self.app = app
self.namespace = WSnamespace('/sockets')
def start(self):
self.sio = socketio.AsyncServer(async_mode='aiohttp')
self.sio.register_namespace(self.namespace)
self.namespace.sio = self.sio
self.sio.attach(self.app)
|
Python
| 0.000005
|
@@ -172,16 +172,34 @@
mport os
+%0Aimport subprocess
%0A%0Alogger
@@ -1779,15 +1779,8 @@
on_
-system_
shut
@@ -1808,18 +1808,24 @@
-os.system(
+subprocess.run(%5B
'pow
@@ -1834,10 +1834,10 @@
off'
+%5D
)
-
%0A%0A%0Ac
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.