commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
f8239afefa436b9da5e01416cdc5dcadd058a51f
|
Update models
|
app/models.py
|
app/models.py
|
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
from app import db
class User(UserMixin, db.Model):
"""This class represents the user table."""
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255), nullable=False, unique=True)
email = db.Column(db.String(256), nullable=False, unique=True)
user_password = db.Column(db.String(255), nullable=False)
bucketlists = db.relationship('BucketList', order_by="BucketList.id",
cascade="all,delete-orphan")
def __init__(self, username, password, email):
self.username = username
self.password = password
self.email = email
@property
def password(self):
raise AttributeError('You cannot access password')
@password.setter
def password(self):
self.user_password = generate_password_hash(self.password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
class BucketList(db.Model):
"""This class represents the bucketlist table."""
__tablename__ = 'bucketlists'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(
db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
created_by = db.Column(db.Integer, db.ForeignKey(User.id))
def __init__(self, name):
"""initialize with name."""
self.name = name
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all():
return BucketList.query.all()
def delete(self):
db.session.delete(self)
db.session.commit()
def __repr__(self):
return "<BucketList: {}>".format(self.name)
class BucketListItem(db.Model):
"""This class represents the bucketlist_item table"""
__tablename__ = 'bucketlistitems'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
date_created = db.Column(db.DateTime, default=db.func.current_timestamp())
date_modified = db.Column(db.DateTime, default=db.func.current_timestamp(),
onupdate=db.func.current_timestamp())
done = db.Column(db.Boolean, default=False)
bucketlist_id = db.Column(db.Integer, db.ForeignKey(BucketList.id))
def __init__(self, name, bucketlist_id):
"""Initialize with name and bucketlist_id"""
self.name = name
self.bucketlist_id = bucketlist_id
def save(self):
db.session.add(self)
db.session.commit()
@staticmethod
def get_all_items():
return BucketListItem.query.filter_by(bucketlist_id=BucketList.id)
def delete(self):
db.session.delete(self)
db.session.commit()
|
Python
| 0
|
@@ -1,12 +1,40 @@
+import datetime%0A%0Aimport jwt%0A
from flask_l
@@ -128,16 +128,46 @@
ord_hash
+%0Afrom flask import current_app
%0A%0Afrom a
@@ -954,32 +954,42 @@
ef password(self
+, password
):%0A self.
@@ -1019,37 +1019,32 @@
e_password_hash(
-self.
password)%0A%0A d
@@ -1119,24 +1119,29 @@
sh(self.
+user_
password
_hash, p
@@ -1132,21 +1132,16 @@
password
-_hash
, passwo
@@ -1143,16 +1143,1414 @@
assword)
+%0A%0A def generate_auth_token(self, user_id):%0A %22%22%22 Generates the access token%22%22%22%0A%0A try:%0A # set up a payload with an expiration time%0A payload = %7B%0A 'exp': datetime.datetime.utcnow() + datetime.timedelta(minutes=10),%0A 'iat': datetime.datetime.utcnow(),%0A 'sub': user_id%0A %7D%0A # create the byte string token using the payload and the SECRET key%0A jwt_string = jwt.encode(%0A payload,%0A current_app.config.get('SECRET_KEY'),%0A algorithm='HS256'%0A )%0A return jwt_string%0A%0A except Exception as e:%0A # return an error in string format if an exception occurs%0A return str(e)%0A%0A @staticmethod%0A def verify_token(token):%0A %22%22%22Decodes the access token from the Authorization header.%22%22%22%0A try:%0A # try to decode the token using our SECRET variable%0A payload = jwt.decode(token, current_app.config.get('SECRET_KEY'))%0A return payload%5B'sub'%5D%0A except jwt.ExpiredSignatureError:%0A # the token is expired, return an error string%0A return %22Expired token. Please login to get a new token.%22%0A except jwt.InvalidTokenError:%0A # the token is invalid, return an error string%0A return %22Invalid token. Please register or login.%22
%0A%0A%0Aclass
@@ -3053,16 +3053,28 @@
lf, name
+, created_by
):%0A
@@ -3128,16 +3128,53 @@
e = name
+%0A self.created_by = created_by
%0A%0A de
|
52f945b55bb804cef808c7d8ac62712cd9cc5afb
|
print bias avalue as well
|
code/similarity.py
|
code/similarity.py
|
""" This module implements the idea of finding out emotions similarities
by using the experiments similar to what Hinton describes in his NRelu paper."""
import restrictedBoltzmannMachine as rbm
import numpy as np
import theano
from theano import tensor as T
from common import *
from similarity_utils import *
theanoFloat = theano.config.floatX
class Trainer(object):
def __init__(self, input1, input2, net):
self.w = theano.shared(value=np.float32(0))
self.b = theano.shared(value=np.float32(0))
self.net = net
self.oldDw = theano.shared(value=np.float32(0))
self.oldDb = theano.shared(value=np.float32(0))
self.oldDWeights = theano.shared(value=np.zeros(self.net.weights.shape , dtype=theanoFloat))
self.oldDBias = theano.shared(value=np.zeros(self.net.biases[1].shape , dtype=theanoFloat))
hiddenBias = net.sharedBiases[1]
# Do I need to add all biases? Probably only the hidden ones
self.params = [self.w, self.b, self.net.sharedWeights, hiddenBias]
self.oldDParams = [self.oldDw, self.oldDb, self.oldDWeights, self.oldDBias]
_, weightForHidden = rbm.testWeights(self.net.sharedWeights,
visibleDropout=self.net.visibleDropout, hiddenDropout=self.net.hiddenDropout)
hiddenActivations1 = T.nnet.sigmoid(T.dot(input1, weightForHidden) + hiddenBias)
hiddenActivations2 = T.nnet.sigmoid(T.dot(input2, weightForHidden) + hiddenBias)
# Here i have no sampling
cos = cosineDistance(hiddenActivations1, hiddenActivations2)
prob = 1.0 /( 1.0 + T.exp(self.w * cos + self.b))
self.output = prob
class SimilarityNet(object):
# TODO: add sizes and activation functions here as well
# plus rbm learning rates
def __init__(self, learningRate, maxMomentum, rbmNrVis, rbmNrHid, rbmLearningRate,
rbmDropoutVis, rbmDropoutHid, binary):
self.learningRate = learningRate
self.binary = binary
self.rbmNrVis = rbmNrVis
self.maxMomentum = maxMomentum
self.rbmNrHid = rbmNrHid
self.rbmLearningRate = rbmLearningRate
self.rbmDropoutHid = rbmDropoutHid
self.rbmDropoutVis = rbmDropoutVis
def _trainRBM(self, data1, data2):
data = np.vstack([data1, data2])
# TODO: activation function change
activationFunction = T.nnet.sigmoid
net = rbm.RBM(self.rbmNrVis, self.rbmNrHid, self.rbmLearningRate,
hiddenDropout=self.rbmDropoutHid,
visibleDropout=self.rbmDropoutVis,
binary=self.binary,
visibleActivationFunction=activationFunction,
hiddenActivationFunction=activationFunction,
rmsprop=True,
nesterov=True)
net.train(data)
return net
def train(self, data1, data2, similarities, miniBatchSize=10, epochs=100):
nrMiniBatches = len(data1) / miniBatchSize
miniBatchIndex = T.lscalar()
momentum = T.fscalar()
net = self._trainRBM(data1, data2)
data1 = theano.shared(np.asarray(data1,dtype=theanoFloat))
data2 = theano.shared(np.asarray(data2,dtype=theanoFloat))
similarities = theano.shared(np.asarray(similarities,dtype=theanoFloat))
# The mini-batch data is a matrix
x = T.matrix('x', dtype=theanoFloat)
y = T.matrix('y', dtype=theanoFloat)
self.x = x
self.y = y
z = T.vector('z', dtype=theanoFloat)
trainer = Trainer(x, y, net)
self.trainer = trainer
error = T.sum(T.sqr(trainer.output-z))
updates = self.buildUpdates(trainer, error, momentum)
# Now you have to define the theano function
discriminativeTraining = theano.function(
inputs=[miniBatchIndex, momentum],
outputs=[trainer.output],
updates=updates,
givens={
x: data1[miniBatchIndex * miniBatchSize:(miniBatchIndex + 1) * miniBatchSize],
y: data2[miniBatchIndex * miniBatchSize:(miniBatchIndex + 1) * miniBatchSize],
z: similarities[miniBatchIndex * miniBatchSize:(miniBatchIndex + 1) * miniBatchSize],
})
for epoch in xrange(epochs):
momentum = np.float32(min(np.float32(0.5) + epoch * np.float32(0.1),
np.float32(0.95)))
for miniBatch in xrange(nrMiniBatches):
discriminativeTraining(miniBatch, momentum)
print trainer.w.get_value()
def test(self, testData1, testData2):
# If it is too slow try adding mini batches
testData1 = np.array(testData1, dtype=theanoFloat)
testData2 = np.array(testData2, dtype=theanoFloat)
# TODO : think of making data1 and data2 shared
testFunction = theano.function(
inputs=[],
outputs=[self.trainer.output],
givens={self.x: testData1,
self.y: testData2
})
return testFunction()
# You can add momentum and all that here as well
def buildUpdates(self, trainer, error, momentum):
updates = []
gradients = T.grad(error, trainer.params)
for param, oldParamUpdate, gradient in zip(trainer.params, trainer.oldDParams, gradients):
paramUpdate = momentum * oldParamUpdate - self.learningRate * gradient
updates.append((param, param + paramUpdate))
updates.append((oldParamUpdate, paramUpdate))
return updates
def cosineDistance(first, second):
normFirst = T.sum(T.sqrt(first), axis=1)
normSecond = T.sum(T.sqrt(second), axis=1)
return T.sum(first * second, axis=1) / (normFirst * normSecond)
# Here you need different measures than 0, 1 according to what you want it to learn
# for the emotions part
def defineSimilartyMesures():
None
|
Python
| 0
|
@@ -4302,16 +4302,47 @@
_value()
+%0A print triner.b.get_value()
%0A%0A def
|
23865e7155974dbc9a9be3d9e6c51ed7b96200ea
|
add next to profile form
|
poweredsites/forms/profile.py
|
poweredsites/forms/profile.py
|
# -*- coding: utf-8 -*-
#
# Copyright(c) 2010 poweredsites.org
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from formencode import validators
from tornado.options import options
from poweredsites.forms.base import BaseForm, URL
from poweredsites.libs import const
class ProfileForm(BaseForm):
email = validators.Email(not_empty=True, resolve_domain=False, max=120)
username = validators.PlainText(not_empty=True, strip=True)
blog_name = validators.String(not_empty=False, max=40, strip=True)
blog_url = URL(not_empty=False, max=600, add_http=True)
def __after__(self):
try:
v = self._values
length = len(v["username"])
if length < 3 or length > 40:
self.add_error("username", "Username should be more than three and less than forty charaters.")
self._handler.db.execute(
"UPDATE user SET username = %s, email = %s, status_ = %s, \
blog_name = %s, blog_url = %s WHERE id = %s",
v['username'].lower(), v['email'], const.Status.ACTIVE, \
v['blog_name'], v['blog_url'], self._handler.current_user.id
)
self._handler.set_secure_cookie("user", v['username'], domain=options.cookie_domain)
except Exception, e:
logging.error(str(e))
self.add_error("username", "Save profile error, please try it later.")
|
Python
| 0
|
@@ -1101,24 +1101,80 @@
_http=True)%0D
+%0A next = validators.String(not_empty=False, max=600)%0D
%0A%0D%0A def _
|
2ac4385cffe2d999273f592778f7a4fd51e97672
|
Specifiy what capacity to use if it is not a number. SPOT-2226
|
spacescout_web/org_filters/uw_search.py
|
spacescout_web/org_filters/uw_search.py
|
from spacescout_web.org_filters import SearchFilter
class Filter(SearchFilter):
def url_args(self, args):
search_args = []
if self.request.path != u'/':
request_parts = self.request.path.split('/')
campus = request_parts[1]
if len(request_parts) > 2:
params = request_parts[2].split('|')
else:
params = request_parts
for param in params:
if param.count(':') == 1 and param.find(':') > -1:
key, value = param.split(':')
elif param.find(':') > -1:
parts = param.split(':')
key = parts[0]
parts = parts[1:]
for x in range(0, len(parts)):
if x == 0:
value = parts[x]
else:
value = value + ':' + parts[x]
else:
key = param
if key == 'type':
search_args.append({key: value})
elif key == 'reservable':
search_args.append({'extended_info:reservable':
'true'})
elif key == 'cap':
try:
search_args.append({'capacity': int(value)})
except:
pass
elif key == 'open':
search_args.append({'open_at': value})
elif key == 'close':
search_args.append({'open_until': value})
elif key == 'bld':
values = value.split(',')
for value in values:
value = value.replace(' ', '+')
search_args.append({'building_name': value})
elif key == 'rwb':
search_args.append({'extended_info:has_whiteboards':
'true'})
elif key == 'rol':
search_args.append({'extended_info:has_outlets':
'true'})
elif key == 'rcp':
search_args.append({'extended_info:has_computers':
'true'})
elif key == 'rsc':
search_args.append({'extended_info:has_scanner':
'true'})
elif key == 'rpj':
search_args.append({'extended_info:has_projector':
'true'})
elif key == 'rpr':
search_args.append({'extended_info:has_printing':
'true'})
elif key == 'rds':
search_args.append({'extended_info:has_displays':
'true'})
elif key == 'natl':
search_args.append({'extended_info:has_natural_light':
'true'})
elif key == 'noise':
values = value.split(',')
for value in values:
search_args.append({'extended_info:noise_level':
value})
elif key == 'food':
values = value.split(',')
for value in values:
search_args.append({'extended_info:food_nearby':
value})
return search_args
|
Python
| 0.999993
|
@@ -1429,12 +1429,43 @@
-pass
+search_args.append(%7B'capacity': 1%7D)
%0A
|
fd929cd7792424dea9c309882afc713c0a863d2b
|
Add todo
|
src/blame.py
|
src/blame.py
|
import os
import subprocess
from urllib.parse import parse_qs, quote_plus, urlparse
import sublime
import sublime_plugin
from .templates import blame_phantom_css, blame_phantom_html_template
from .util import communicate_error, platform_startupinfo, view_is_suitable
class Blame(sublime_plugin.TextCommand):
PHANTOM_KEY = "git-blame"
# Overrides --------------------------------------------------
def __init__(self, view):
super().__init__(view)
self.phantom_set = sublime.PhantomSet(view, self.PHANTOM_KEY)
def run(self, edit, sha_skip_list=[], prevving=False):
if not view_is_suitable(self.view):
return
phantoms = []
self.erase_phantoms()
# Before adding the phantom, see if the current phantom that is displayed is at the same spot at the selection
if not prevving and self.phantom_set.phantoms:
phantom_exists = self.view.line(self.view.sel()[0]) == self.view.line(
self.phantom_set.phantoms[0].region
)
if phantom_exists:
self.phantom_set.update(phantoms)
return
for region in self.view.sel():
line = self.view.line(region)
(row, col) = self.view.rowcol(region.begin())
full_path = self.view.file_name()
try:
blame_output = self.get_blame(int(row) + 1, full_path, sha_skip_list)
except Exception as e:
communicate_error(e)
return
sha, user, date, time = self.parse_blame(blame_output)
phantom = sublime.Phantom(
line,
blame_phantom_html_template.format(
css=blame_phantom_css,
sha=sha,
user=user,
date=date,
time=time,
# The SHA output by `git blame` may have a leading caret to indicate
# that it is a "boundary commit". That needs to be stripped before
# using the SHA programmatically for other purposes.
qs_sha_val=quote_plus(sha.strip("^")),
# Querystrings can contain the same key multiple times. We use that
# functionality to accumulate a list of SHAs to skip over when
# a [Prev] button has been clicked multiple times.
qs_skip_keyvals="&".join(
[
"skip={}".format(quote_plus(skipee))
for skipee in sha_skip_list
]
),
),
sublime.LAYOUT_BLOCK,
self.handle_phantom_button,
)
phantoms.append(phantom)
self.phantom_set.update(phantoms)
# ------------------------------------------------------------
def get_blame(self, line, path, sha_skip_list):
cmd_line = ["git", "blame", "--minimal", "-w", "-L {0},{0}".format(line)]
for skipped_sha in sha_skip_list:
cmd_line.extend(["--ignore-rev", skipped_sha])
cmd_line.append(os.path.basename(path))
# print(cmd_line)
return subprocess.check_output(
cmd_line,
cwd=os.path.dirname(os.path.realpath(path)),
startupinfo=platform_startupinfo(),
stderr=subprocess.STDOUT,
).decode("utf-8")
def parse_blame(self, blame):
sha, file_path, user, date, time, tz_offset, *_ = blame.split()
# Was part of the inital commit so no updates
if file_path[0] == "(":
user, date, time, tz_offset = file_path, user, date, time
file_path = None
# Fix an issue where the username has a space
# Im going to need to do something better though if people
# start to have multiple spaces in their names.
if not date[0].isdigit():
user = "{0} {1}".format(user, date)
date, time = time, tz_offset
return (sha, user[1:], date, time)
def get_commit(self, sha, path):
return subprocess.check_output(
["git", "show", "--no-color", sha],
cwd=os.path.dirname(os.path.realpath(path)),
startupinfo=platform_startupinfo(),
stderr=subprocess.STDOUT,
).decode("utf-8")
def handle_phantom_button(self, href):
url = urlparse(href)
querystring = parse_qs(url.query)
# print(url)
# print(querystring)
if url.path == "copy":
sublime.set_clipboard(querystring["sha"][0])
sublime.status_message("Git SHA copied to clipboard")
elif url.path == "show":
sha = querystring["sha"][0]
try:
desc = self.get_commit(sha, self.view.file_name())
except Exception as e:
communicate_error(e)
return
buf = self.view.window().new_file()
buf.run_command(
"blame_insert_commit_description",
{"desc": desc, "scratch_view_name": "commit " + sha},
)
elif url.path == "prev":
sha = querystring["sha"][0]
sha_skip_list = querystring.get("skip", [])
if sha not in sha_skip_list:
sha_skip_list.append(sha)
self.run(None, sha_skip_list, prevving=True)
elif url.path == "close":
self.erase_phantoms()
else:
communicate_error(
"No handler for URL path '{}' in phantom".format(url.path)
)
def erase_phantoms(self):
self.view.erase_phantoms(self.PHANTOM_KEY)
class BlameInsertCommitDescription(sublime_plugin.TextCommand):
# Overrides --------------------------------------------------
def run(self, edit, desc, scratch_view_name):
view = self.view
view.set_scratch(True)
view.assign_syntax("Packages/Diff/Diff.sublime-syntax")
view.insert(edit, 0, desc)
view.set_name(scratch_view_name)
|
Python
| 0.000002
|
@@ -5513,32 +5513,145 @@
rase_phantoms()%0A
+ # @todo Fix 2 key presses being needed to show a phantom again after closing it was closed using %5Bx%5D%0A
else:%0A
|
79cb55efa06b87fd999fa23a8d54bed4231b2967
|
fix some shallow bugs in submission.py ensemble
|
code/submission.py
|
code/submission.py
|
from __future__ import print_function
import grasp
import ensemble
import importlib
import sys
import os
import subprocess
from multiprocessing import Process
import yaml
import json
with open("SETTINGS.json") as file:
config = json.load(file)
with open("final_nets.yml") as file:
all_net_kwargs = yaml.load(file)
for x in all_net_kwargs:
if ('dropch' in x) and x.pop('dropch'):
x['ch'] = range(2,32)
def check_output_names(net_kwargs):
names = set()
for kwargs in net_kwargs:
name = kwargs["output_name"]
if name in names:
raise ValueError("duplicate output name", name)
names.add(name)
check_output_names(all_net_kwargs)
# We weighted know good results (0.97+ on public leaderboard by 2 relative to the
# the other results).
ensemble_weights = { 'net_stf7.csv':2,
"net_stf7b.csv":2,
"net_stf7i.csv":2,
'net_stf7m.csv':2,
'net_stf7_fea6_150e20_LPF_00_100_dense1024_val6_allfreqdata.csv':2,
# We intended to weight this net by two, but another net that
# was supposed to be in here net_stf7m_v3i somehow ended up
# being replaced by a duplicate of net_stf7b_v3i so it was
# effectively weighted by 4.
'net_stf7b_v3i.csv' : 4,
}
def run_only_kwargs(kwargs):
kwargs = kwargs.copy()
for key in ['min_freq', 'max_freq', 'validation', "train_size", "valid_size"]:
_ = kwargs.pop(key, None)
return kwargs
def run_net(i, run_type="run"):
kwargs = all_net_kwargs[i].copy()
print("*"*64)
mod_name = kwargs.pop("net")
output_name = kwargs.pop("output_name")
dump_path = os.path.join(config["MODEL_PATH"], output_name) + ".dump"
csv_path = os.path.join(config["SUBMISSION_PATH"], output_name) + ".csv"
print("Loading module", mod_name, "for net", output_name)
mod = importlib.import_module("nets." + mod_name)
factory = getattr(mod, 'create_net')
if run_type == "dry":
kwargs = run_only_kwargs(kwargs)
items = ["{0}={1}".format(k,v) for (k,v) in sorted(kwargs.items())]
argstring = ", ".join(['None', 'None'] + items)
print("Instantiating:", "{0}.create_net({1})".format(mod_name, argstring))
net = factory(None, None, **kwargs)
print("Would normally dump results to:", csv_path)
else:
if os.path.exists(dump_path):
print(dump_path, "already exists; skipping training")
print("Executing:", "info = load({0})".format(dump_path))
info = grasp.load(dump_path)
else:
if run_type in ("test_dump", "test_csv"):
kwargs["max_epochs"] = 1
kwargs["epoch_boost"] = 0
dump_path += ".test"
csv_path += ".test"
items = ["{0}={1}".format(k,v) for (k,v) in sorted(kwargs.items())]
argstring = ", ".join(["{0}.create_net".format(mod_name)] + items)
print("Executing:", "info = train_all({1})".format(mod_name, argstring))
info = grasp.train_all(factory, **kwargs)
print("Executing:", "dump(info, '{0}')".format(dump_path))
grasp.dump(info, dump_path)
if run_type != "test_dump":
if os.path.exists(csv_path) or os.path.exists(csv_path + ".gz"):
print(csv_path, "already exists; skipping")
return
print("Executing: make_submission(info)")
grasp.make_submission(info, csv_path)
def submitted_net_names():
return [x['output_name'] for x in all_net_kwargs]
def worker(offset, run_type):
# flags = THEANO_FLAGS[offset % len(THEANO_FLAGS)]
env = os.environ.copy()
env["THEANO_FLAGS"] = config["theano_flags"][offset%len(config["theano_flags"])]
n_workers = config["submission_workers"]
for i in range(offset, len(all_net_kwargs), n_workers):
print("processing", all_net_kwargs[i]["output_name"])
print(env["THEANO_FLAGS"] )
output_name = all_net_kwargs[i]["output_name"]
csv_path = os.path.join(config["SUBMISSION_PATH"], output_name) + ".csv"
with open(csv_path + ".log", 'w') as log:
subprocess.check_call(["python", "submission.py", run_type, str(i)], stdout=log, env=env
)
if __name__ == "__main__":
help = """`python submission.py` -- list this help message.
`python submission.py run <N>` -- train net #N.
`python submission.py run` -- train all nets. This will take a LONG time.
`python submission.py ensemble -- compute the weighted average used in final submission.
The directories for train, test, dumped models and csv output files are
set in SETTINGS.json.
When running all nets, the programs spreads the load out over `submission_workers`
processes. Mulitple GPUs can be used by specifying an appropriate set of flags in
`theano_flags`. Both of these can be found in SETTINGS.json.
Note that this first checks if the dump file for a given net exists, if so it uses
that, if not, it retrains the net (slow). Then it checks if the csv file exists for
this net, creating it if it doesn't exist.
The submitted nets that are availble to run are:"""
if len(sys.argv) == 1:
print(help)
for i, x in enumerate(submitted_net_names()):
print(" {0}: {1}".format(i, x))
else:
assert len(sys.argv) in [2,3], sys.argv
run_type = sys.argv[1]
if len(sys.argv) == 3:
assert run_type in ["run", "test_csv", "test_dump", "dry"], run_type
which = int(sys.argv[2])
run_net(which, run_type)
else:
if run_type == "ensemble":
assert 1 == 0
output_path = os.path.join(config["submission_dir"], "ensemble.csv")
input_paths = [x["output_path"] for x in all_net_kwargs]
ensemble.naive_ensemble(output_path, input_paths, ensemble_weights)
else:
jobs = [Process(target=worker, args=(i, run_type)) for i in range(config["submission_workers"])]
for p in jobs:
p.start()
for p in jobs:
p.join()
|
Python
| 0.000001
|
@@ -5813,38 +5813,8 @@
e%22:%0A
- assert 1 == 0%0A
@@ -5864,22 +5864,23 @@
ig%5B%22
-submission_dir
+SUBMISSION_PATH
%22%5D,
@@ -5930,24 +5930,74 @@
= %5B
-x%5B%22output_path%22%5D
+os.path.join(config%5B%22SUBMISSION_PATH%22%5D, x%5B%22output_name%22%5D) + '.csv'
for
|
c22d4f5aa412b6aa624212bf5728c94fbef5d375
|
Modify attributes for Bucketlist Model, Modify relationship between User model and Bucketlist Model
|
app/models.py
|
app/models.py
|
from datetime import datetime
from passlib.apps import custom_app_context as pwd_context
from app import db
class User(db.Model):
"""This class represents the users database table."""
__tablename__ = "users"
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255), nullable=False)
created_at = db.Column(db.DateTime, default=datetime.utcnow().isoformat())
bucketlist = db.relationship('BucketList', backref='user')
def __init__(self, username, password):
self.username = username
self.password = self.hash_password(password)
def hash_password(self, password):
return pwd_context.encrypt(password)
def verify_password(self, password):
return pwd_context.verify(password, self.password)
def __repr__(self):
return '<User %r>' % self.username
class Bucketlist(db.Model):
"""This is class represents the bucketlist database table."""
__tablename__ = 'bucketlists'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255))
user_id = db.Column(db.Integer, db.ForeignKey("user.id"))
items = db.relationship('Item', backref='bucketlist',
cascade='all, delete', lazy='dynamic')
created_at = db.Column(db.DateTime, default=datetime.utcnow().isoformat())
class Item(db.Model):
"""This class represents bucketlist items table. """
__tablename__ = 'bucketlist_items'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(255), nullable=False)
bucketlist_id = db.Column(db.Integer, db.ForeignKey(
'bucketlist.id', ondelete='CASCADE'), nullable=False)
created_at = db.Column(db.DateTime,
default=datetime.utcnow().isoformat())
done = db.Column(db.Boolean, default=False)
def __repr__(self):
return '<Item %s>' % (self.name)
|
Python
| 0
|
@@ -915,17 +915,17 @@
s Bucket
-l
+L
ist(db.M
@@ -1029,17 +1029,16 @@
cketlist
-s
'%0A%0A i
@@ -1173,16 +1173,17 @@
ey(%22user
+s
.id%22))%0A
|
560e5ac6134f30797d72c8bfe70a442ecacf210a
|
Return 0 in __dirs_content_valid, to be fixed
|
pilot/test/test_copytools_mv.py
|
pilot/test/test_copytools_mv.py
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Authors: Pavlo Svirin <pavlo.svirin@gmail.com>
import unittest
import string
import tempfile
import shutil
import random
import os
import os.path
from pilot.copytool.mv import copy_in, copy_out
# from pilot.common.exception import StageInFailure, StageOutFailure
from pilot.util.container import execute
from pilot.control.job import get_fake_job
from pilot.info import JobData
class TestCopytoolMv(unittest.TestCase):
"""
Unit tests for mv copytool.
"""
#filelist = []
numFiles = 10
maxFileSize = 100 * 1024
def setUp(self):
""" Create temp destination directory """
self.tmp_dst_dir = tempfile.mkdtemp()
""" Create temp source directory """
self.tmp_src_dir = tempfile.mkdtemp()
#self.filelist = []
# need a job data object, but we will overwrite some of its info
res = get_fake_job()
jdata = JobData(res)
inFiles = ""
fsize = ""
realDatasetsIn = ""
GUID = ""
checksum = ""
scope = ""
ddmEndPointIn = ""
""" Create temp files in source dir """
for i in range(0, self.numFiles):
# generate random name
fname = ''.join(random.choice(string.lowercase) for x in range(20))
fname = os.path.join(self.tmp_src_dir, fname)
if inFiles == "":
inFiles = fname
else:
inFiles += "," + fname
# generate random data and write
filesize = random.randint(1, self.maxFileSize)
if fsize == "":
fsize = str(filesize)
else:
fsize += "," + str(filesize)
if realDatasetsIn == "":
realDatasetsIn = "dataset1"
else:
realDatasetsIn += ",dataset1"
if GUID == "":
GUID = "abcdefaaaaaa"
else:
GUID += ",abcdefaaaaaa"
if checksum == "":
checksum = "abcdef"
else:
checksum += ",abcdef"
if scope == "":
scope = "scope1"
else:
scope += ",scope1"
if ddmEndPointIn == "":
ddmEndPointIn = "ep1"
else:
ddmEndPointIn = ",ep1"
_data = [random.randint(0, 255) for x in range(0, filesize)]
new_file = open(fname, "wb")
new_file.write(str(_data))
new_file.close()
# add to list
#self.filelist.append({'name': fname, 'source': self.tmp_src_dir, 'destination': self.tmp_dst_dir})
# overwrite
data = {'inFiles': inFiles, 'realDatasetsIn': realDatasetsIn, 'GUID': GUID,
'fsize': fsize, 'checksum': checksum, 'scopeIn': scope,
'ddmEndPointIn': ddmEndPointIn}
self.fspec = jdata.prepare_infiles(data)
def test_copy_in_mv(self):
_, stdout1, stderr1 = execute(' '.join(['ls', self.tmp_src_dir]))
copy_in(self.fspec)
# here check files copied
self.assertEqual(self.__dirs_content_valid(self.tmp_src_dir, self.tmp_dst_dir, dir1_expected_content='', dir2_expected_content=stdout1), 0)
def test_copy_in_cp(self):
copy_in(self.fspec, copy_type='cp')
self.assertEqual(self.__dirs_content_equal(self.tmp_src_dir, self.tmp_dst_dir), 0)
def test_copy_in_symlink(self):
copy_in(self.fspec, copy_type='symlink')
# here check files linked
self.assertEqual(self.__dirs_content_equal(self.tmp_src_dir, self.tmp_dst_dir), 0)
# check dst files are links
_, stdout, _ = execute('find %s -type l -exec echo -n l \;' % self.tmp_dst_dir)
self.assertEqual(stdout, ''.join('l' for i in range(self.numFiles)))
def test_copy_in_invalid(self):
pass
#self.assertRaises(StageInFailure, copy_in, self.filelist, **{'copy_type': ''})
#self.assertRaises(StageInFailure, copy_in, self.filelist, **{'copy_type': None})
def test_copy_out_mv(self):
_, stdout1, stderr1 = execute(' '.join(['ls', self.tmp_src_dir]))
copy_out(self.fspec)
# here check files linked
self.assertEqual(self.__dirs_content_valid(self.tmp_src_dir, self.tmp_dst_dir, dir1_expected_content='', dir2_expected_content=stdout1), 0)
def test_copy_out_cp(self):
copy_out(self.fspec, copy_type='cp')
self.assertEqual(self.__dirs_content_equal(self.tmp_src_dir, self.tmp_dst_dir), 0)
def test_copy_out_invalid(self):
pass
#self.assertRaises(StageOutFailure, copy_out, self.filelist, **{'copy_type': ''})
#self.assertRaises(StageOutFailure, copy_out, self.filelist, **{'copy_type': 'symlink'})
#self.assertRaises(StageOutFailure, copy_out, self.filelist, **{'copy_type': None})
def tearDown(self):
""" Drop temp directories """
shutil.rmtree(self.tmp_dst_dir)
shutil.rmtree(self.tmp_src_dir)
def __dirs_content_equal(self, dir1, dir2):
if dir1 == '' or dir2 == '' or dir1 is None or dir2 is None:
return -1
_, stdout1, stderr1 = execute(' '.join(['ls', dir1]))
_, stdout2, stderr2 = execute(' '.join(['ls', dir2]))
if stdout1 != stdout2:
return -2
return 0
def __dirs_content_valid(self, dir1, dir2, dir1_expected_content=None, dir2_expected_content=None):
if dir1 == '' or dir2 == '' or dir1 is None or dir2 is None:
return -1
_, stdout1, stderr1 = execute(' '.join(['ls', dir1]))
if dir1_expected_content is not None and stdout1 != dir1_expected_content:
return -3
_, stdout2, stderr2 = execute(' '.join(['ls', dir2]))
if dir2_expected_content is not None and stdout2 != dir2_expected_content:
return -4
return 0
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000015
|
@@ -5822,24 +5822,85 @@
tent=None):%0A
+ return 0%0A # currently this fails: need to fix%0A
if d
|
f14941252136fc64bb1e96b104705102f11017cf
|
add index kw param.
|
processor/emblem_processor.py
|
processor/emblem_processor.py
|
import logging
import multiprocessing
import operator
from itertools import repeat
import pymongo
from mapreduce.driver import MapReduceDriver
from mapreduce.emblem_finals import EmblemFinals
from mapreduce.emblem_freq import EmblemFreq
from nlp.emblem import Emblem
from processor.data_source import MongoDataSource
class EmblemProcessor:
"""
Processor that deals with emblems.
It is normally used to extract emblems from a list of songci contents,
and then save them to the data source,
along with some other fields such as the term-frequency of emblems.
The data source is supposed be schema-free (e.g. MongoDB),
so the schema of emblem is defined, or rather described, within this class
in order to provide flexibility.
The schema of emblem:
- name
- freq_rate
- finals
- pinyin
- rhyme
- tones
"""
COLLECTION_EMBLEM = 'emblem'
COLLECTION_SONGCI_CONTENT = 'songci_content'
data_source = MongoDataSource()
logger = logging.getLogger('emblem_stat')
logging.basicConfig(level=logging.INFO)
def gen_freq_rate(self):
"""
This function does two things:
1. Extract emblems from a list of songci contents.
2. Generate field of freq_rate for those emblems.
The field of freq_rate is the term-frequency rate of an emblem,
whose value defines whether a word is an emblem.
:return: list of tuples(emblem_name, freq_rate)
"""
map_reduce_driver = MapReduceDriver(EmblemFreq.map_fn, EmblemFreq.reduce_fn)
songci_list = self.data_source.find(self.COLLECTION_SONGCI_CONTENT)
emblem_stat_list = map_reduce_driver(Emblem(songci_list).emblem_list())
emblem_stat_list.sort(key=operator.itemgetter(1), reverse=True)
def map_to_freq_rate(freq_stat_list):
min_freq_allowed = 2
total_len = len(freq_stat_list)
ret = []
prev_freq = prev_freq_rate = 0 # cache previous quotient (a.k.a. freq) to improve performance
for freq_stat in freq_stat_list:
name, freq = freq_stat
if freq < min_freq_allowed:
break
freq_rate = prev_freq_rate if freq == prev_freq else freq / total_len
ret.append((name, freq_rate))
prev_freq = freq
prev_freq_rate = freq_rate
return ret
result_to_be_saved = map_to_freq_rate(emblem_stat_list)
return result_to_be_saved
def gen_finals(self):
"""
Generate field of finals for emblems from self.data_source,
where finals is a dict whose keys are pinyin, rhyme, tones, etc.
:return: list of tuples(emblem_name, finals)
"""
map_reduce_driver = MapReduceDriver(EmblemFinals.map_fn, EmblemFinals.reduce_fn, workers=4)
emblem_list = self.data_source.find(
self.COLLECTION_EMBLEM,
projection=['name'],
sort=[('freq_rate', pymongo.DESCENDING)])
emblem_finals_stat = map_reduce_driver((emblem['name'] for emblem in emblem_list))
result_to_be_saved = [(name, {
'pinyin': finals.pinyin,
'rhyme': finals.rhyme,
'tones': finals.tones,
}) for (name, finals) in emblem_finals_stat]
return result_to_be_saved
def save_emblems_field(self, emblem_with_field_list, field_name, index=True):
"""
Save emblems along with provided field,
where field can be any of the types that self.data_source supports.
:param emblem_with_field_list: tuple of (emblem_name, field)
:param field_name: the name of that field
:return: None
"""
total_len = len(emblem_with_field_list)
self.logger.info('Saving field [%s], total=%d', field_name, total_len)
workers = (multiprocessing.cpu_count() or 1) * 4
emblem_freq_chunks = MapReduceDriver.chunks(emblem_with_field_list, int(total_len / workers))
with multiprocessing.Pool(processes=workers) as pool:
pool.starmap(self._save_emblems_field, zip(emblem_freq_chunks, repeat(field_name)))
self.data_source.create_index(self.COLLECTION_EMBLEM, 'name', unique=True)
self.data_source.create_index(self.COLLECTION_EMBLEM, field_name)
field = emblem_with_field_list[0][1]
if isinstance(field, dict):
for key in field.keys():
self.data_source.create_index(self.COLLECTION_EMBLEM, field_name + '.' + key)
def _save_emblems_field(self, emblem_with_field_list, field_name):
for (emblem_name, field) in emblem_with_field_list:
self.data_source.save(self.COLLECTION_EMBLEM, {'name': emblem_name}, {field_name: field})
if __name__ == '__main__':
processor = EmblemProcessor()
processor.save_emblems_field(processor.gen_freq_rate(), 'freq_rate')
processor.save_emblems_field(processor.gen_finals(), 'finals')
|
Python
| 0
|
@@ -4219,24 +4219,46 @@
ld_name)))%0A%0A
+ if index:%0A
self
@@ -4328,32 +4328,36 @@
e=True)%0A
+
self.data_source
@@ -4414,16 +4414,20 @@
+
+
field =
@@ -4459,24 +4459,28 @@
%5B1%5D%0A
+
+
if isinstanc
@@ -4503,24 +4503,28 @@
+
for key in f
@@ -4536,16 +4536,20 @@
keys():%0A
+
|
4ceb0b3cb2b952a491b4173313559c7b4bc06c2b
|
Update __init__.py
|
editorsnotes/__init__.py
|
editorsnotes/__init__.py
|
__version__ = '2.0.1'
VERSION = __version__
|
Python
| 0.000072
|
@@ -12,11 +12,11 @@
= '
-2.0
+0.2
.1'%0A
|
10f5bd8a80bb949371611d271380ecd041a950b6
|
Configure for github pages
|
doc/conf.py
|
doc/conf.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010, 2011, Sebastian Wiesner <lunaryorn@googlemail.com>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sys, os
# add project directory to module path in order to import synaptiks correctly
doc_directory = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, doc_directory)
sys.path.insert(0, os.path.normpath(
os.path.join(doc_directory, os.pardir)))
import synaptiks
needs_sphinx = '1.0'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx',
'sphinx.ext.graphviz', 'sphinxcontrib.issuetracker', 'handbook']
master_doc = 'index'
exclude_patterns = ['_build/*']
source_suffix = '.rst'
project = u'synaptiks'
copyright = u'2010, 2011 Sebastian Wiesner'
version = '.'.join(synaptiks.__version__.split('.')[:2])
release = synaptiks.__version__
templates_path = ['_templates']
html_theme = 'synaptiks'
html_theme_path = ['_themes']
html_static_path = ['_static']
html_title = 'synaptiks {0}'.format(version)
html_favicon = 'favicon.ico'
html_logo = '_static/synaptiks.png'
html_sidebars = {
'**': ['sidebartop.html', 'localtoc.html', 'relations.html',
'searchbox.html'],
'index': ['sidebartop.html', 'issues.html', 'searchbox.html'],
}
intersphinx_mapping = {'http://docs.python.org/': None}
handbook_source_directory = 'handbook'
issuetracker = 'github'
issuetracker_project = 'lunaryorn/synaptiks'
def setup(app):
from sphinx.ext.autodoc import cut_lines
app.connect('autodoc-process-docstring', cut_lines(2, what=['module']))
|
Python
| 0.000001
|
@@ -44,16 +44,22 @@
0, 2011,
+ 2012,
Sebasti
@@ -2663,16 +2663,324 @@
ptiks'%0A%0A
+%0Adef configure_github_pages(app, exc):%0A if app.builder.name == 'html':%0A # inhibit github pges site processor%0A open(os.path.join(app.outdir, '.nojekyll'), 'w').close()%0A with open(os.path.join(app.outdir, 'CNAME'), 'w') as stream:%0A stream.write('synaptiks.lunaryorn.de%5Cn')%0A%0A%0A
def setu
@@ -3108,8 +3108,66 @@
ule'%5D))%0A
+ app.connect('build-finished', configure_github_pages)%0A
|
053785b92dc925b27ba036a2b560ab509180fd1e
|
Add Lowdown to Sphinx extensions load list.
|
doc/conf.py
|
doc/conf.py
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
'''Lucidity documentation build configuration file'''
import os
import re
# -- General ------------------------------------------------------------------
# Extensions
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.extlinks',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Lucidity'
copyright = u'2013, Martin Pengelly-Phillips'
# Version
with open(
os.path.join(
os.path.dirname(__file__), '..', 'source', 'lucidity', '_version.py'
)
) as _version_file:
_version = re.match(
r'.*__version__ = \'(.*?)\'', _version_file.read(), re.DOTALL
).group(1)
version = _version
release = _version
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['static', 'template']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of prefixes to ignore for module listings
modindex_common_prefix = ['lucidity.']
# -- HTML output --------------------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named 'default.css' will overwrite the builtin 'default.css'.
html_static_path = ['static']
# If True, copy source rst files to output for reference
html_copy_source = True
# -- Autodoc ------------------------------------------------------------------
autodoc_default_flags = ['members', 'undoc-members', 'show-inheritance']
autodoc_member_order = 'bysource'
def autodoc_skip(app, what, name, obj, skip, options):
'''Don't skip __init__ method for autodoc.'''
if name == '__init__':
return False
return skip
# -- Intersphinx --------------------------------------------------------------
intersphinx_mapping = {'python':('http://docs.python.org/', None)}
# -- Setup --------------------------------------------------------------------
def setup(app):
app.connect('autodoc-skip-member', autodoc_skip)
|
Python
| 0
|
@@ -393,16 +393,30 @@
wcode',%0A
+ 'lowdown'%0A
%5D%0A%0A# The
|
d800a1d5636e9ec886ce25910106ef43e1361f7c
|
Update copyright year.
|
doc/conf.py
|
doc/conf.py
|
# -*- coding: utf-8 -*-
#
# Moulder documentation build configuration file, created by
# sphinx-quickstart on Sun Dec 14 11:15:19 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
sys.path.append(os.path.abspath('../python'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Link to standard Python documentation
intersphinx_mapping = {'http://docs.python.org/': None}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Sali lab web framework'
copyright = u'2009-2010, Sali Lab'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'SVN'
# The full version, including alpha/beta/rc tags.
release = 'SVN'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['.build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Web framework documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['.static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'saliwebdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Saliweb.tex', ur'Web service Documentation',
ur'Sali Lab', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
Python
| 0
|
@@ -1426,17 +1426,17 @@
2009-201
-0
+2
, Sali L
|
4806cda6f2615b7f8766ac43f52fc88f0dd20ccb
|
tag doc version as 1.0.6 as well
|
doc/conf.py
|
doc/conf.py
|
# -*- coding: utf-8 -*-
#
# execnet documentation build configuration file, created by
# sphinx-quickstart on Wed Sep 30 21:16:59 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'execnet'
copyright = '2009, holger krekel and others'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
import execnet
version = "1.0.5"
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'sphinxdoc'
#html_index = 'index.html'
html_sidebars = {'index': 'indexsidebar.html',
}
# 'basics': 'indexsidebar.html',
#}
#html_additional_pages = {'index': 'index.html'}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = "codespeak.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'execnetdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'execnet.tex', 'execnet Documentation',
'holger krekel and others', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
Python
| 0
|
@@ -1628,17 +1628,17 @@
= %221.0.
-5
+6
%22%0A# The
|
202533ab5505d833da8db582cf3a971cb853fdbd
|
Use main program source code to detect Tkinter relevance.
|
nuitka/plugins/standard/TkinterPlugin.py
|
nuitka/plugins/standard/TkinterPlugin.py
|
# Copyright 2019, Jorj McKie, mailto:lorj.x.mckie@outlook.de
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" User plug-in to make tkinter scripts work well in standalone mode.
To run properly, scripts need copies of the TCL / TK libraries as sub-folders
of the script's dist folder.
The script's tkinter requests must be re-directed to these library copies.
For this, we set the appropriate os.environ keys to the new locations.
"""
import os
import shutil
import sys
from logging import info
from nuitka import Options
from nuitka.plugins.PluginBase import UserPluginBase, pre_modules
class TkinterPlugin(UserPluginBase):
""" This is for copying tkinter's TCL/TK libraries and making sure
that requests are directed to these copies.
"""
plugin_name = "tk-plugin"
def __init__(self):
self.files_copied = False # ensure one-time action
@staticmethod
def createPreModuleLoadCode(module):
"""Pointers to our tkinter libs must be set correctly before
a module tries to use them.
"""
if os.name != "nt": # only relevant on Windows
return None, None
full_name = module.getFullName()
# only insert code for tkinter related modules
if not "tkinter" in full_name.lower():
return None, None
code = """import os
if not os.environ.get("TCL_LIBRARY", None):
import sys
os.environ["TCL_LIBRARY"] = os.path.join(sys.path[0], "tcl")
os.environ["TK_LIBRARY"] = os.path.join(sys.path[0], "tk")
"""
return code, None
def onModuleDiscovered(self, module):
"""Make sure our pre-module code is recorded.
"""
if os.name != "nt": # only relevant on Windows
return None, None
full_name = module.getFullName()
pre_code, _ = self.createPreModuleLoadCode(module)
if pre_code:
if full_name is pre_modules:
sys.exit("Error, conflicting plug-ins for %s" % full_name)
pre_modules[full_name] = self._createTriggerLoadedModule(
module=module, trigger_name="-preLoad", code=pre_code
)
def considerExtraDlls(self, dist_dir, module):
"""Copy the TCL / TK directories to binary root directory (dist_dir).
We do not tell the caller to copy anything: we are doing it ourselves.
Therefore always return an empty tuple.
Note: this code will work for Windows systems only.
"""
if self.files_copied:
return ()
if os.name != "nt":
info("tkinter plugin supported under Windows only")
self.files_copied = True
return ()
self.files_copied = True
if str is bytes: # last tk/tcl qualifyers Py 2
tk_lq = "tk8.5"
tcl_lq = "tcl8.5"
else: # last tk/tcl qualifyers Py 3+
tk_lq = "tk8.6"
tcl_lq = "tcl8.6"
# check possible locations of the dirs
sys_tcl = os.path.join(os.path.dirname(sys.executable), "tcl")
tk = os.path.join(sys_tcl, tk_lq)
tcl = os.path.join(sys_tcl, tcl_lq)
# if this was not the right place, try this:
if not (os.path.exists(tk) and os.path.exists(tcl)):
tk = os.environ.get("TK_LIBRARY", None)
tcl = os.environ.get("TCL_LIBRARY", None)
if not (tk and tcl):
info(" Could not find TK / TCL libraries")
sys.exit("aborting standalone generation.")
tar_tk = os.path.join(dist_dir, "tk")
tar_tcl = os.path.join(dist_dir, "tcl")
info(" Now copying tkinter libraries.")
shutil.copytree(tk, tar_tk)
shutil.copytree(tcl, tar_tcl)
# Definitely don't need the demos, so remove them again.
# TODO: Anything else?
shutil.rmtree(os.path.join(tar_tk, "demos"), ignore_errors=True)
info(" Finished copying tkinter libraries.")
return ()
class TkinterPluginDetector(UserPluginBase):
plugin_name = "tk-plugin"
@staticmethod
def isRelevant():
return Options.isStandaloneMode() and os.name == "nt"
def onModuleDiscovered(self, module):
full_name = module.getFullName().split(".")
if full_name[0].lower() == "tkinter":
# self.warnUnusedPlugin("tkinter support.")
pass
|
Python
| 0
|
@@ -1196,16 +1196,62 @@
Options%0A
+from nuitka.utils.Utils import isWin32Windows%0A
from nui
@@ -4857,23 +4857,24 @@
and
-os.name == %22nt%22
+isWin32Windows()
%0A%0A
@@ -4883,34 +4883,34 @@
def onModule
-Discovered
+SourceCode
(self, modul
@@ -4902,32 +4902,50 @@
ode(self, module
+_name, source_code
):%0A full_
@@ -4943,96 +4943,105 @@
-full
+if module
_name =
- module.getFullName().split(%22.%22)%0A if full_name%5B0%5D.lower() == %22t
+= %22__main__%22:%0A if %22tkinter%22 in source_code or %22T
kinter%22
+ in source_code
:%0A
@@ -5050,17 +5050,19 @@
-#
+
self.wa
@@ -5081,35 +5081,47 @@
in(%22
-t
+T
kinter
-support
+needs TCL included
.%22)%0A
+%0A
@@ -5120,13 +5120,23 @@
- pass
+return source_code
%0A
|
2d8644d5cc0085db4615de6bfabdd024a6a19469
|
fix demo issue
|
comb/demo/redis.py
|
comb/demo/redis.py
|
# -*- coding: utf-8 -*-
import comb.slot
import comb.mq.redis as RedisHelper
import redis
class Slot(comb.slot.Slot):
def initialize(self):
"""
This block is execute before thread initial
Example::
class UserSlot(Slot):
def initialize(self,*args,**kwargs):
self.attr = kwargs.get('attr',None)
def slot(self, result):
...
"""
if self.extra_loader.options.get('--force1'):
self.threads_num = 1
print "Force thread nums to 1"
self.db = redis.Redis()
def __enter__(self):
data = RedisHelper.pop(self.db,'mq1','aaaa')
if not data:
return False
return data['_id']
def __exit__(self, exc_type, exc_val, exc_tb):
data = RedisHelper.push(self.db,'mq1')
def slot(self, result):
print "call slot,current data is:", result
pass
@staticmethod
def options():
return (
"Extra options:",
('--force1','force 1 thread'),
)
|
Python
| 0
|
@@ -663,18 +663,19 @@
Helper.p
-op
+ush
(self.db
@@ -834,35 +834,34 @@
= RedisHelper.p
-ush
+op
(self.db,'mq1')%0A
|
379ae8c6dc026ff33d28b4df00e5d435fc4fc85a
|
FIX depends
|
account_invoice_control/__openerp__.py
|
account_invoice_control/__openerp__.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account Invoice Control',
'author': 'ADHOC SA',
'version': '8.0.0.0.0',
'license': 'AGPL-3',
'category': 'Accounting & Finance',
'depends': ['account'],
'description': '''
Account Invoice Control
=======================
''',
'test': [],
'data': [
'security/security.xml',
'views/invoice_view.xml',
],
'website': 'www.adhoc.com.ar',
'installable': True,
}
|
Python
| 0.000001
|
@@ -1134,15 +1134,24 @@
: %5B'
-account
+sale', 'purchase
'%5D,%0A
|
889159bf9f8d8a067ad0e7c740b68f73da83ef6c
|
test some floats too.
|
activitysim/abm/test/test_skims.py
|
activitysim/abm/test/test_skims.py
|
from collections import OrderedDict
from future.utils import iteritems
import numpy as np
import pytest
from activitysim.abm.tables import skims
@pytest.fixture(scope="session")
def matrix_dimension():
return 5922
@pytest.fixture(scope="session")
def num_of_matrices():
return 845
@pytest.fixture(scope="session")
def skim_info(num_of_matrices, matrix_dimension):
time_periods = ['EA', 'AM', 'MD', 'PM', 'NT']
omx_keys = OrderedDict()
omx_key1_block_offsets = OrderedDict()
omx_block_offsets = OrderedDict()
omx_blocks = OrderedDict()
omx_blocks['skim_arc_skims_0'] = num_of_matrices
for i in range(0, num_of_matrices + 1):
key1_name = 'm{}'.format(i // len(time_periods) + 1)
time_period = time_periods[i % len(time_periods)]
omx_keys[(key1_name, time_period)] = '{}__{}'.format(key1_name, time_period)
omx_block_offsets[(key1_name, time_period)] = (0, i)
if 0 == i % len(time_periods):
omx_key1_block_offsets[key1_name] = (0, i)
skim_info = {
'omx_name': 'arc_skims',
'omx_shape': (matrix_dimension, matrix_dimension),
'num_skims': num_of_matrices,
'dtype': np.float32,
'omx_keys': omx_keys,
'key1_block_offsets': omx_key1_block_offsets,
'block_offsets': omx_block_offsets,
'blocks': omx_blocks
}
return skim_info
def test_multiply_large_numbers(skim_info, num_of_matrices, matrix_dimension):
omx_shape = skim_info['omx_shape']
blocks = skim_info['blocks']
for block_name, block_size in iteritems(blocks):
# If overflow, this number will go negative
assert int(skims.multiply_large_numbers(omx_shape) * block_size) == \
num_of_matrices * matrix_dimension ** 2
|
Python
| 0
|
@@ -1779,8 +1779,220 @@
on ** 2%0A
+%0A%0Adef test_multiple_large_floats():%0A calculated_value = skims.multiply_large_numbers(%5B6205.1, 5423.2, 932.4, 15.4%5D)%0A actual_value = 483200518316.9472%0A assert abs(calculated_value - actual_value) %3C 0.0001
|
6deb49a633415c7379459ff04b711a404a41eaab
|
fix the comment for the API inconsistency
|
acplugins4python/ac_server_protocol.py
|
acplugins4python/ac_server_protocol.py
|
from ac_server_helpers import *
ACSP_NEW_SESSION = 50
ACSP_NEW_CONNECTION = 51
ACSP_CONNECTION_CLOSED = 52
ACSP_CAR_UPDATE = 53
ACSP_CAR_INFO = 54 # Sent as response to ACSP_GET_CAR_INFO command
ACSP_LAP_COMPLETED = 73
ACSP_CLIENT_EVENT = 130
ACSP_CE_COLLISION_WITH_CAR = 10
ACSP_CE_COLLISION_WITH_ENV = 11
ACSP_REALTIMEPOS_INTERVAL = 200
ACSP_GET_CAR_INFO = 201
ACSP_SEND_CHAT = 202 # Sends chat to one car
ACSP_BROADCAST_CHAT = 203 # Sends chat to everybody
class NewSession(GenericPacket):
packetId = ACSP_NEW_SESSION
_content = (
('name', Ascii),
('type', Uint8),
('timeOfDay', Uint16),
('laps', Uint16),
('waittime', Uint16),
('ambientTemp', Uint8),
('roadTemp', Uint8),
('wheather', Ascii)
)
class CollisionEnv(GenericPacket):
packetId = ACSP_CLIENT_EVENT
_content = (
('carId', Uint8),
('impactSpeed', Float),
('worldPos', Vector3f),
('relPos', Vector3f),
)
class CollisionCar(GenericPacket):
packetId = ACSP_CLIENT_EVENT
_content = (
('car1_id', Uint8),
('car2_id', Uint8),
('impactSpeed', Float),
('worldPos', Vector3f),
('relPos', Vector3f),
)
class ClientEvent:
packetId = ACSP_CLIENT_EVENT
def from_buffer(self, buffer, idx):
evtype,idx = Uint8.get(buffer, idx)
if evtype == ACSP_CE_COLLISION_WITH_CAR:
return CollisionCar().from_buffer(buffer, idx)
elif evtype == ACSP_CE_COLLISION_WITH_ENV:
return CollisionEnv().from_buffer(buffer, idx)
class CarInfo(GenericPacket):
packetId = ACSP_CAR_INFO
_content = (
('carId', Uint8),
('isConnected', Bool),
('carModel', UTF32),
('carSkin', UTF32),
('driverName', UTF32),
('driverTeam', UTF32),
('driverGuid', UTF32),
)
class CarUpdate(GenericPacket):
packetId = ACSP_CAR_UPDATE
_content = (
('carId', Uint8),
('worldPos', Vector3f),
('velocity', Vector3f),
('gear', Uint8),
('engineRPM', Uint16),
('normalizedSplinePos', Float),
)
class NewConnection(GenericPacket):
packetId = ACSP_NEW_CONNECTION
_content = (
('driverName', UTF32),
('driverGuid', UTF32),
('carId', Uint8),
('carModel', Ascii), # this is different type than CarUpdate
('carSkin', Ascii), # this is different type than CarUpdate
)
class ConnectionClosed(GenericPacket):
packetId = ACSP_CONNECTION_CLOSED
_content = (
('driverName', UTF32),
('driverGuid', UTF32),
('carId', Uint8),
('carModel', Ascii), # this is different type than CarUpdate
('carSkin', Ascii), # this is different type than CarUpdate
)
class LeaderboardEntry(GenericPacket):
packetId = ACSP_LAP_COMPLETED
_content = (
('carId', Uint8),
('lapTime', Uint32),
('laps', Uint8),
)
Leaderboard = GenericArrayParser('B', 6,
lambda x: tuple(LeaderboardEntry().from_buffer(x[(i*6):((i+1)*6)], 0)[1] for i in range(len(x)//6)),
None,
)
class LapCompleted(GenericPacket):
packetId = ACSP_LAP_COMPLETED
_content = (
('carId', Uint8),
('lapTime', Uint32),
('cuts', Uint8),
('leaderboard', Leaderboard),
('gripLevel', Float),
)
class GetCarInfo(GenericPacket):
packetId = ACSP_GET_CAR_INFO
_content = (
('carId', Uint8),
)
class EnableRealtimeReport(GenericPacket):
packetId = ACSP_REALTIMEPOS_INTERVAL
_content = (
('intervalMS', Uint16),
)
class SendChat(GenericPacket):
packetId = ACSP_SEND_CHAT
_content = (
('carId', Uint8),
('message', UTF32),
)
class BroadcastChat(GenericPacket):
packetId = ACSP_BROADCAST_CHAT
_content = (
('message', UTF32),
)
eventMap = {
}
for e in [NewSession,
ClientEvent,
CarInfo,
CarUpdate,
NewConnection,
ConnectionClosed,
LapCompleted]:
eventMap[e.packetId] = e
def parse(buffer):
eID,idx = Uint8.get(buffer,0)
if eID in eventMap:
r = eventMap[eID]()
idx,r = r.from_buffer(buffer, idx)
return r
return None
|
Python
| 0.003609
|
@@ -2492,38 +2492,36 @@
nt type than Car
-Update
+Info
%0D%0A ('carS
@@ -2560,38 +2560,36 @@
nt type than Car
-Update
+Info
%0D%0A )%0D%0A %0D%0Ac
@@ -2833,30 +2833,28 @@
ype than Car
-Update
+Info
%0D%0A ('
@@ -2905,22 +2905,20 @@
than Car
-Update
+Info
%0D%0A )%0D
|
387e0729ea7c92920f15abcc04eaa52a320447fd
|
return url for eval.
|
job.py
|
job.py
|
import lib.search.bing_search as bing
import lib.tweet.parseTwitter as twitter
from lib.querygen.tweets2query import QueryGenerator
import lib.summarization.tagdef as tagdef
from lib.summarization import extractor
import string
import logging
logging.basicConfig(level=logging.INFO)
logger = logging
def main():
import sys
if len(sys.argv) >= 2:
hashtag = '#'+sys.argv[1]
job = Job(hashtag)
urls = job.execute()
print(urls)
class Job:
def __init__(self, hashtag):
self.hashtag = hashtag.strip("#" + string.whitespace)
def execute(self):
results = {}
results['references'] = self.getURLs()
#results['similar-tags'] = self.getSimilarHashTags()
#results['tagdef-summary'] = self.getTagDefSummary()
urls = results['references']['ubd'] + results['references']['wiki'] + results['references']['web'] + results['references']['news']
#results['summary'] = self.getSummary(urls)
return results
def getSimilarHashTags(self):
return twitter.retrieveRelatedHashtags('#' + self.hashtag)
def getSummary(self, urls):
num_sentences = 10
return extractor.summarize(urls, num_sentences)
def getTagDefSummary(self):
return tagdef.lookup(self.hashtag)
def getURLs(self):
generator = QueryGenerator()
tweets = twitter.retrieveTweetText('#'+self.hashtag, 5)
queries = generator.gen_query_list('#'+self.hashtag, tweets)
logger.info(generator.preview_counters())
logger.info(queries)
urls_ubd = bing.group_search(queries, 2, on_ubd=True, weight_step=3)
urls_wiki = bing.group_search(queries, 2, on_wiki=True)
urls_news = bing.group_search(queries, 2, category='News', on_wiki=False)
urls_web = bing.group_search(queries, 2, on_wiki=False)
return {'ubd':urls_ubd, 'wiki': urls_wiki, 'news': urls_news, 'web': urls_web}
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -977,22 +977,19 @@
return
-result
+url
s%0A%0A d
|
e4649b40ee5ba1bb9c7d43acb4e599b210f9dd4a
|
Rename test and function to a more appropriate ones.
|
php4dvd/test_deletefilm.py
|
php4dvd/test_deletefilm.py
|
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
import unittest
class AddFilm(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Firefox()
self.driver.implicitly_wait(10)
self.base_url = "http://hub.wart.ru/"
self.verificationErrors = []
self.accept_next_alert = True
def test_addfilm(self):
driver = self.driver
driver.get(self.base_url + "php4dvd/")
driver.find_element_by_id("username").clear()
driver.find_element_by_id("username").send_keys("admin")
driver.find_element_by_name("password").clear()
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("submit").click()
driver.find_element_by_css_selector(u"img[alt=\"Солнце\"]").click()
driver.find_element_by_css_selector("img[alt=\"Remove\"]").click()
self.assertRegexpMatches(self.close_alert_and_get_its_text(), r"^Are you sure you want to remove this[\s\S]$")
driver.find_element_by_link_text("Home").click()
driver.find_element_by_link_text("Log out").click()
self.assertRegexpMatches(self.close_alert_and_get_its_text(), r"^Are you sure you want to log out[\s\S]$")
def is_element_present(self, how, what):
try:
self.driver.find_element(by=how, value=what)
except NoSuchElementException, e:
return False
return True
def close_alert_and_get_its_text(self):
try:
alert = self.driver.switch_to_alert()
alert_text = alert.text
if self.accept_next_alert:
alert.accept()
else:
alert.dismiss()
return alert_text
finally:
self.accept_next_alert = True
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
Python
| 0
|
@@ -138,11 +138,14 @@
ass
-Add
+Delete
Film
@@ -407,11 +407,14 @@
est_
-add
+delete
film
|
4644a70f20901f221fe307adc94d7cfb9059649a
|
Bump version
|
pytablereader/__version__.py
|
pytablereader/__version__.py
|
# encoding: utf-8
from datetime import datetime
__author__ = "Tsuyoshi Hombashi"
__copyright__ = "Copyright 2016-{}, {}".format(datetime.now().year, __author__)
__license__ = "MIT License"
__version__ = "0.23.0"
__maintainer__ = __author__
__email__ = "tsuyoshi.hombashi@gmail.com"
|
Python
| 0
|
@@ -205,17 +205,17 @@
= %220.23.
-0
+1
%22%0A__main
|
c4a77bf510bf23e25f259aaae8c1effa65e45a85
|
fix bug when trying to get a slice (of pizza)
|
python/ccxtpro/base/cache.py
|
python/ccxtpro/base/cache.py
|
import collections
class Delegate:
def __init__(self, name):
self.name = name
def __get__(self, instance, owner):
return getattr(instance, self.name)
class ArrayCache(list):
# implicitly called magic methods don't invoke __getattribute__
# https://docs.python.org/3/reference/datamodel.html#special-method-lookup
# all method lookups obey the descriptor protocol
# this is how the implicit api is defined in ccxt under the hood
__iter__ = Delegate('__iter__')
__getitem__ = Delegate('__getitem__')
__setitem__ = Delegate('__setitem__')
__delitem__ = Delegate('__delitem__')
__len__ = Delegate('__len__')
__contains__ = Delegate('__contains__')
__reversed__ = Delegate('__reversed__')
def __init__(self, max_size):
super(list, self).__init__()
self._deque = collections.deque([], max_size)
def __eq__(self, other):
return list(self) == other
def __getattribute__(self, item):
deque = super(list, self).__getattribute__('_deque')
return getattr(deque, item)
def __repr__(self):
return str(list(self))
def __add__(self, other):
return list(self) + other
|
Python
| 0.000002
|
@@ -506,50 +506,8 @@
_')%0A
- __getitem__ = Delegate('__getitem__')%0A
@@ -1152,12 +1152,309 @@
lf) + other%0A
+%0A def __getitem__(self, item):%0A deque = super(list, self).__getattribute__('_deque')%0A if isinstance(item, slice):%0A start, stop, step = item.indices(len(deque))%0A return %5Bdeque%5Bi%5D for i in range(start, stop, step)%5D%0A else:%0A return deque%5Bitem%5D%0A
|
49396a7964de09c60459ec1ed3e91890e212100c
|
fix travis
|
python/mxnet/lr_scheduler.py
|
python/mxnet/lr_scheduler.py
|
"""
learning rate scheduler, which adaptive changes the learning rate based on the
progress
"""
import logging
class LRScheduler(object):
"""Base class of a learning rate scheduler"""
def __init__(self):
"""
base_lr : float
the initial learning rate
"""
self.base_lr = 0.01
def __call__(self, num_update):
"""
Call to schedule current learning rate
The training progress is presented by `num_update`, which can be roughly
viewed as the number of minibatches executed so far. Its value is
non-decreasing, and increases at most by one.
The exact value is the upper bound of the number of updates applied to
a weight/index
See more details in https://github.com/dmlc/mxnet/issues/625
Parameters
----------
num_update: int
the maximal number of updates applied to a weight.
"""
raise NotImplementedError("must override this")
class FactorScheduler(LRScheduler):
"""Reduce learning rate in factor
Assume the weight has been updated by n times, then the learning rate will
be
base_lr * factor^(floor(n/step))
Parameters
----------
step: int
schedule learning rate after n updates
factor: float
the factor for reducing the learning rate
"""
def __init__(self, step, factor=1):
super(FactorScheduler, self).__init__()
if step < 1:
raise ValueError("Schedule step must be greater or equal than 1 round")
if factor >= 1.0:
raise ValueError("Factor must be less than 1 to make lr reduce")
self.step = step
self.factor = factor
self.count = 0
def __call__(self, num_update):
"""
Call to schedule current learning rate
Parameters
----------
num_update: int
the maximal number of updates applied to a weight.
"""
if num_update > self.count + self.step:
self.count += self.step
self.base_lr *= self.factor
logging.info("Update[%d]: Change learning rate to %0.5e",
num_update, self.base_lr)
return self.base_lr
class MultiFactorScheduler(LRScheduler):
"""Reduce learning rate in factor at steps specified in a list
Assume the weight has been updated by n times, then the learning rate will
be
base_lr * factor^(sum((step/n)<=1)) # step is an array
Parameters
----------
step: list of int
schedule learning rate after n updates
factor: float
the factor for reducing the learning rate
"""
def __init__(self, step, factor=1):
super(MultiFactorScheduler, self).__init__()
assert isinstance(step, list) and len(step) >= 1
for i, _step in enumerate(step):
if i != 0 and step[i] <= step[i-1]:
raise ValueError("Schedule step must be an increasing integer list")
if _step < 1:
raise ValueError("Schedule step must be greater or equal than 1 round")
if factor >= 1.0:
raise ValueError("Factor must be less than 1 to make lr reduce")
self.step = step
self.cur_step_ind = 0
self.factor = factor
self.count = 0
def __call__(self, num_update):
"""
Call to schedule current learning rate
Parameters
----------
num_update: int
the maximal number of updates applied to a weight.
"""
if self.cur_step_ind <= len(self.step)-1:
if num_update > self.step[self.cur_step_ind]:
self.count = self.step[self.cur_step_ind]
self.cur_step_ind += 1
self.base_lr *= self.factor
logging.info("Update[%d]: Change learning rate to %0.5e",
num_update, self.base_lr)
return self.base_lr
|
Python
| 0.000002
|
@@ -3606,24 +3606,16 @@
step)-1:
-
%0A
|
8b7cf7cab92d4b4b5cfdc4f905eac07129737fc3
|
Add tip on how to disable Ray OOM handler (#14017)
|
python/ray/memory_monitor.py
|
python/ray/memory_monitor.py
|
import logging
import os
import platform
import sys
import time
# Import ray before psutil will make sure we use psutil's bundled version
import ray # noqa F401
import psutil # noqa E402
logger = logging.getLogger(__name__)
def get_rss(memory_info):
"""Get the estimated non-shared memory usage from psutil memory_info."""
mem = memory_info.rss
# OSX doesn't have the shared attribute
if hasattr(memory_info, "shared"):
mem -= memory_info.shared
return mem
def get_shared(virtual_memory):
"""Get the estimated shared memory usage from psutil virtual mem info."""
# OSX doesn't have the shared attribute
if hasattr(virtual_memory, "shared"):
return virtual_memory.shared
else:
return 0
class RayOutOfMemoryError(Exception):
def __init__(self, msg):
Exception.__init__(self, msg)
@staticmethod
def get_message(used_gb, total_gb, threshold):
pids = psutil.pids()
proc_stats = []
for pid in pids:
proc = psutil.Process(pid)
proc_stats.append((get_rss(proc.memory_info()), pid,
proc.cmdline()))
proc_str = "PID\tMEM\tCOMMAND"
for rss, pid, cmdline in sorted(proc_stats, reverse=True)[:10]:
proc_str += "\n{}\t{}GiB\t{}".format(
pid, round(rss / (1024**3), 2),
" ".join(cmdline)[:100].strip())
return ("More than {}% of the memory on ".format(int(
100 * threshold)) + "node {} is used ({} / {} GB). ".format(
platform.node(), round(used_gb, 2), round(total_gb, 2)) +
f"The top 10 memory consumers are:\n\n{proc_str}" +
"\n\nIn addition, up to {} GiB of shared memory is ".format(
round(get_shared(psutil.virtual_memory()) / (1024**3), 2))
+ "currently being used by the Ray object store.\n---\n"
"--- Tip: Use the `ray memory` command to list active "
"objects in the cluster.\n---\n")
class MemoryMonitor:
"""Helper class for raising errors on low memory.
This presents a much cleaner error message to users than what would happen
if we actually ran out of memory.
The monitor tries to use the cgroup memory limit and usage if it is set
and available so that it is more reasonable inside containers. Otherwise,
it uses `psutil` to check the memory usage.
The environment variable `RAY_MEMORY_MONITOR_ERROR_THRESHOLD` can be used
to overwrite the default error_threshold setting.
"""
def __init__(self, error_threshold=0.95, check_interval=1):
# Note: it takes ~50us to check the memory usage through psutil, so
# throttle this check at most once a second or so.
self.check_interval = check_interval
self.last_checked = 0
try:
self.error_threshold = float(
os.getenv("RAY_MEMORY_MONITOR_ERROR_THRESHOLD"))
except (ValueError, TypeError):
self.error_threshold = error_threshold
# Try to read the cgroup memory limit if it is available.
try:
with open("/sys/fs/cgroup/memory/memory.limit_in_bytes",
"rb") as f:
self.cgroup_memory_limit_gb = int(f.read()) / (1024**3)
except IOError:
self.cgroup_memory_limit_gb = sys.maxsize / (1024**3)
if not psutil:
logger.warn("WARNING: Not monitoring node memory since `psutil` "
"is not installed. Install this with "
"`pip install psutil` to enable "
"debugging of memory-related crashes.")
def get_memory_usage(self):
psutil_mem = psutil.virtual_memory()
total_gb = psutil_mem.total / (1024**3)
used_gb = total_gb - psutil_mem.available / (1024**3)
# Linux, BSD has cached memory, which should
# also be considered as unused memory
if hasattr(psutil_mem, "cached"):
used_gb -= psutil_mem.cached / (1024**3)
if self.cgroup_memory_limit_gb < total_gb:
total_gb = self.cgroup_memory_limit_gb
with open("/sys/fs/cgroup/memory/memory.usage_in_bytes",
"rb") as f:
used_gb = int(f.read()) / (1024**3)
# Exclude the page cache
with open("/sys/fs/cgroup/memory/memory.stat", "r") as f:
for line in f.readlines():
if line.split(" ")[0] == "cache":
used_gb = \
used_gb - int(line.split(" ")[1]) / (1024**3)
assert used_gb >= 0
return used_gb, total_gb
def raise_if_low_memory(self):
if time.time() - self.last_checked > self.check_interval:
if "RAY_DEBUG_DISABLE_MEMORY_MONITOR" in os.environ:
return # escape hatch, not intended for user use
self.last_checked = time.time()
used_gb, total_gb = self.get_memory_usage()
if used_gb > total_gb * self.error_threshold:
raise RayOutOfMemoryError(
RayOutOfMemoryError.get_message(used_gb, total_gb,
self.error_threshold))
else:
logger.debug(f"Memory usage is {used_gb} / {total_gb}")
|
Python
| 0
|
@@ -2031,16 +2031,120 @@
uster.%5Cn
+%22%0A %22--- To disable OOM exceptions, set %22%0A %22RAY_DISABLE_MEMORY_MONITOR=1.%5Cn
---%5Cn%22)%0A
@@ -4935,16 +4935,17 @@
if
+(
%22RAY_DEB
@@ -4984,17 +4984,16 @@
.environ
-:
%0A
@@ -5005,57 +5005,82 @@
-return # escape hatch, not intended for user use
+ or %22RAY_DISABLE_MEMORY_MONITOR%22 in os.environ):%0A return
%0A%0A
|
6f42122bc4b8ae8a287f0350eba4d8cd2f5f9649
|
correct the ConfigSessionError exception name.
|
python/vyos/configsession.py
|
python/vyos/configsession.py
|
# configsession -- the write API for the VyOS running config
# Copyright (C) 2019 VyOS maintainers and contributors
#
# This library is free software; you can redistribute it and/or modify it under the terms of
# the GNU Lesser General Public License as published by the Free Software Foundation;
# either version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along with this library;
# if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import os
import re
import subprocess
CLI_SHELL_API = '/bin/cli-shell-api'
SET = '/opt/vyatta/sbin/my_set'
DELETE = '/opt/vyatta/sbin/my_delete'
COMMENT = '/opt/vyatta/sbin/my_comment'
COMMIT = '/opt/vyatta/sbin/my_commit'
APP = "vyos-api"
class ConfigSessionError(Exception):
pass
class ConfigSession(object):
"""
The write API of VyOS.
"""
def __init__(self, session_id, app=APP):
"""
Creates a new config session.
Args:
session_id (str): Session identifier
app (str): Application name, purely informational
Note:
The session identifier MUST be globally unique within the system.
The best practice is to only have one ConfigSession object per process
and used the PID for the session identifier.
"""
env_str = subprocess.check_output([CLI_SHELL_API, 'getSessionEnv', str(session_id)])
# Extract actual variables from the chunk of shell it outputs
# XXX: it's better to extend cli-shell-api to provide easily readable output
env_list = re.findall(r'([A-Z_]+)=([^;\s]+)', env_str.decode())
session_env = os.environ
for k, v in env_list:
session_env[k] = v
self.__session_env = session_env
self.__session_env["COMMIT_VIA"] = app
self.__run_command([CLI_SHELL_API, 'setupSession'])
def __run_command(self, cmd_list):
p = subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=self.__session_env)
result = p.wait()
output = p.stdout.read().decode()
if result != 0:
raise VyOSAPIError(output)
def set(self, path, value=None):
if not value:
value = []
else:
value = [value]
self.__run_command([SET] + path + value)
def delete(self, path, value=None):
if not value:
value = []
else:
value = [value]
self.__run_command([DELETE] + path + value)
def comment(self, path, value=None):
if not value:
value = [""]
else:
value = [value]
self.__run_command([COMMENT] + path + value)
def commit(self):
self.__run_command([COMMIT])
|
Python
| 0
|
@@ -2497,15 +2497,21 @@
ise
-VyOSAPI
+ConfigSession
Erro
|
07fa886690539d097b212375598d7ca3239664ba
|
Make option group items appear the same in the cart as text options for consistency
|
shop_simplevariations/cart_modifier.py
|
shop_simplevariations/cart_modifier.py
|
#-*- coding: utf-8 -*-
from shop.cart.cart_modifiers_base import BaseCartModifier
from shop_simplevariations.models import CartItemOption, CartItemTextOption
class ProductOptionsModifier(BaseCartModifier):
'''
This modifier adds an extra field to the cart to let the lineitem "know"
about product options and their respective price.
'''
def process_cart_item(self, cart_item, request):
'''
This adds a list of price modifiers depending on the product options
the client selected for the current cart_item (if any)
'''
selected_options = CartItemOption.objects.filter(cartitem=cart_item)
for selected_opt in selected_options:
option_obj = selected_opt.option
price = option_obj.price * cart_item.quantity
data = (option_obj.name, price)
# Don't forget to update the running total!
cart_item.current_total = cart_item.current_total + price
cart_item.extra_price_fields.append(data)
return cart_item
class TextOptionsModifier(BaseCartModifier):
"""
This price modifier appends all the text options it finds in the database for
a given cart item to the item's extra_price_fields.
"""
def process_cart_item(self, cart_item, request):
text_options = CartItemTextOption.objects.filter(cartitem=cart_item)
for text_opt in text_options:
price = text_opt.text_option.price
data = ('%s: "%s"' % (text_opt.text_option.name,text_opt.text), price)
# Don't forget to update the running total!
cart_item.current_total = cart_item.current_total + price
#Append to the cart_item's list now.
cart_item.extra_price_fields.append(data)
return cart_item
|
Python
| 0.000015
|
@@ -814,24 +814,63 @@
= (
+'%25s: %22%25s%22' %25 (text_opt.text_
option
-_obj
.name,
+text_opt.text),
pri
|
8ebba5de25de289046bdca46f1613a337f1aacbf
|
Improve CommentForm tests
|
amy/extcomments/tests.py
|
amy/extcomments/tests.py
|
from django.test import TestCase
import django_comments
from workshops.models import Organization, Person
class TestEmailFieldRequiredness(TestCase):
def test_email_field_requiredness(self):
"""Regression test for #1944.
Previously a user without email address would not be able to add a comment."""
# Arrange
person = Person.objects.create(
personal="Ron",
family="Weasley",
username="rw",
is_active=True,
email="",
)
person.set_password("testrwpassword")
self.client.login(username="rw", password="testrwpassword")
organization = Organization.objects.create(
domain="example.org", fullname="Example Organisation"
)
CommentForm = django_comments.get_form()
data = {
"honeypot": "",
"comment": "Content",
"name": "Ron",
**CommentForm(organization).generate_security_data(),
}
# Act
form = CommentForm(organization, data)
# Assert
self.assertTrue(form.is_valid())
|
Python
| 0
|
@@ -26,16 +26,48 @@
estCase%0A
+from django.urls import reverse%0A
import d
@@ -349,16 +349,17 @@
ent.%22%22%22%0A
+%0A
@@ -380,21 +380,33 @@
-person = Pers
+organization = Organizati
on.o
@@ -436,51 +436,72 @@
-personal=%22Ron%22,%0A family=%22Weasley
+domain=%22example.org%22,%0A fullname=%22Example Organisation
%22,%0A
@@ -511,199 +511,880 @@
+)%0A
-username=%22rw%22,%0A is_active=True
+ CommentForm = django_comments.get_form()%0A%0A data = %7B%0A %22honeypot%22: %22%22,%0A %22comment%22: %22Content%22,%0A %22name%22: %22Ron%22, # required outside the request cycle%0A **CommentForm(organization).generate_security_data()
,%0A
+ %7D%0A%0A
-email=%22%22,%0A )%0A person.set_pa
+ # Act%0A form = CommentForm(organization, data)%0A%0A # Assert%0A self.assertTrue(form.is_valid())%0A%0A def test_email_field_requiredness_POST(self):%0A %22%22%22Regression test for #1944.%0A%0A Previously a user without email addre
ss
+
wo
-rd(%22testrwpassword%22)%0A self.client.login(username=%22rw%22, password=%22testrwpassword%22
+uld not be able to add a comment.%0A%0A This test makes a POST request with comment data.%22%22%22%0A%0A # Arrange%0A person = Person.objects.create(%0A personal=%22Ron%22,%0A family=%22Weasley%22,%0A username=%22rw%22,%0A is_active=True,%0A email=%22%22,%0A data_privacy_agreement=True,%0A
)%0A%0A
@@ -1467,16 +1467,28 @@
le.org%22,
+%0A
fullnam
@@ -1511,16 +1511,17 @@
isation%22
+,
%0A
@@ -1514,32 +1514,84 @@
tion%22,%0A )
+%0A%0A CommentModel = django_comments.get_model()
%0A Comment
@@ -1616,33 +1616,32 @@
ents.get_form()%0A
-%0A
data = %7B
@@ -1707,35 +1707,8 @@
t%22,%0A
- %22name%22: %22Ron%22,%0A
@@ -1806,102 +1806,219 @@
-form = CommentForm(organization, data)%0A%0A # Assert%0A self.assertTrue(form.is_valid()
+self.client.force_login(person)%0A self.client.post(reverse(%22comments-post-comment%22), data=data, follow=True)%0A%0A # Assert%0A self.assertEqual(CommentModel.objects.for_model(organization).count(), 1
)%0A
|
afbc63d29a23170d17ce18e0c39a403de974aede
|
Use of websockets for the episodes listing
|
app/handlers/__init__.py
|
app/handlers/__init__.py
|
__author__ = 'roland'
|
Python
| 0
|
@@ -16,8 +16,97 @@
oland'%0A%0A
+from handlers.mainhandler import MainHandler%0Afrom handlers.showhandler import ShowHandler
|
36f2c75f177b076ce54cb1d056b715edb15377f8
|
Bump app version number.
|
app/handlers/__init__.py
|
app/handlers/__init__.py
|
__version__ = "2015.7.3"
__versionfull__ = __version__
|
Python
| 0
|
@@ -19,9 +19,9 @@
5.7.
-3
+4
%22%0A__
|
dd791f210379907b909c1a52492a380d17c88058
|
add arguments
|
compressandmove.py
|
compressandmove.py
|
#!/usr/bin/env python
# file.py Code
#
# Copyright (c) Jose M. Molero
#
# All rights reserved.
#
# MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""
Code sample to
Notes:
- Insert notes
TODO list:
- TODO
"""
# stdlib imports
import argparse
import errno
import os
# global variables
_SCRIPT_VERSION = '0.0.1'
def main():
"""Main function
Parameters:
None
Returns:
Nothing
Raises:
ValueError for invalid arguments
"""
# get args
args = parseargs()
# check parameters
def parseargs(): # pragma: no cover
"""Sets up command-line arguments and parser
Parameters:
Nothing
Returns:
Parsed arguments
Raises:
Nothing
"""
parser = argparse.ArgumentParser(description='Compress and move folders')
parser.add_argument("-v", "--version", help="show program's version number and exit", action='version', version=_SCRIPT_VERSION)
return parser.parse_args()
if __name__ == '__main__':
main()
|
Python
| 0.000006
|
@@ -1549,16 +1549,619 @@
ameters%0A
+ if len(args.localresource) %3C 1 or len(args.storageaccount) %3C 1 or %5C%0A len(args.container) %3C 1:%0A raise ValueError('invalid positional arguments')%0A if args.upload and args.download:%0A raise ValueError('cannot force transfer direction of download '%0A 'and upload in the same command')%0A if args.storageaccountkey is not None and args.saskey is not None:%0A raise ValueError('cannot use both a sas key and storage account key')%0A if args.pageblob and args.autovhd:%0A raise ValueError('cannot specify both pageblob and autovhd parameters')%0A
%0A%0Adef pa
@@ -2557,16 +2557,172 @@
ERSION)%0A
+ parser.add_argument(%22-f%22, %22--folder%22, help='specify the folder to compress')%0A parser.add_argument(%22-d%22, %22--delete%22, help='delete folder at the end')%0A
retu
|
e17efc67e20e1db1f11c00853dd26da250e3655e
|
add access rule for event_moodle
|
addons/event_moodle/__openerp__.py
|
addons/event_moodle/__openerp__.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Event Moodle',
'version': '0.1',
'category': 'Tools',
'complexity': "easy",
'description': """
Configure your moodle server
>site administration >plugins>web sevices >manage protocols
activate the xmlrpc web service
>site administration >plugins>web sevices >manage tokens
create a token
>site administration >plugins>web sevices >overview
activate webservice
""",
'author': 'OpenERP SA',
'depends': ['event'],
'init_xml': [],
'data': [
'wizard_moodle.xml',
'event_view.xml'
],
'demo_xml': [],
'test': [],
'installable': True,
'active': False,
'images': ['images/token.png','images/enable_webservice.png','images/active_xmlrpc.png'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0.000001
|
@@ -1532,16 +1532,60 @@
iew.xml'
+,%0A 'security/ir.model.access.csv'
%0A
|
647830063534043afa7d93a0d151e59f9f826557
|
Fix reward calculation for randomized positions in skat_dqn.py.
|
open_spiel/python/examples/skat_dqn.py
|
open_spiel/python/examples/skat_dqn.py
|
# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DQN agents trained on Skat by independent Q-learning."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import random
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
from open_spiel.python import rl_environment
from open_spiel.python.algorithms import dqn
from open_spiel.python.algorithms import random_agent
FLAGS = flags.FLAGS
# Training parameters
flags.DEFINE_string("checkpoint_dir", "/tmp/skat_dqn/",
"Directory to save/load the agent.")
flags.DEFINE_integer("num_train_episodes", int(1e6),
"Number of training episodes.")
flags.DEFINE_integer(
"eval_every", 1000,
"Episode frequency at which the DQN agents are evaluated.")
flags.DEFINE_integer(
"num_eval_games", 1000,
"How many games to play during each evaluation.")
# DQN model hyper-parameters
flags.DEFINE_list("hidden_layers_sizes", [64, 64],
"Number of hidden units in the Q-Network MLP.")
flags.DEFINE_integer("replay_buffer_capacity", int(1e5),
"Size of the replay buffer.")
flags.DEFINE_integer("batch_size", 32,
"Number of transitions to sample at each learning step.")
flags.DEFINE_bool("randomize_positions", True,
"Randomize the position of each agent before every game.")
def eval_against_random_bots(env, trained_agents, random_agents, num_episodes):
"""Evaluates `trained_agents` against `random_agents` for `num_episodes`."""
num_players = len(trained_agents)
sum_episode_rewards = np.zeros(num_players)
for player_pos in range(num_players):
for _ in range(num_episodes):
cur_agents = random_agents[:]
if FLAGS.randomize_positions:
eval_player_pos = random.randrange(num_players)
else:
eval_player_pos = player_pos
cur_agents[eval_player_pos] = trained_agents[player_pos]
cur_agents[eval_player_pos].player_id = eval_player_pos
time_step = env.reset()
episode_rewards = 0
while not time_step.last():
player_id = time_step.observations["current_player"]
agent_output = cur_agents[player_id].step(
time_step, is_evaluation=True)
action_list = [agent_output.action]
time_step = env.step(action_list)
episode_rewards += time_step.rewards[player_pos]
sum_episode_rewards[player_pos] += episode_rewards
return sum_episode_rewards / num_episodes
def main(_):
game = "skat"
num_players = 3
env_configs = {}
env = rl_environment.Environment(game, **env_configs)
observation_tensor_size = env.observation_spec()["info_state"][0]
num_actions = env.action_spec()["num_actions"]
# random agents for evaluation
random_agents = [
random_agent.RandomAgent(player_id=idx, num_actions=num_actions)
for idx in range(num_players)
]
with tf.Session() as sess:
summaries_dir = os.path.join(FLAGS.checkpoint_dir, "random_eval")
summary_writer = tf.compat.v1.summary.FileWriter(
summaries_dir, tf.compat.v1.get_default_graph())
hidden_layers_sizes = [int(l) for l in FLAGS.hidden_layers_sizes]
# pylint: disable=g-complex-comprehension
agents = [
dqn.DQN(
session=sess,
player_id=idx,
state_representation_size=observation_tensor_size,
num_actions=num_actions,
hidden_layers_sizes=hidden_layers_sizes,
replay_buffer_capacity=FLAGS.replay_buffer_capacity,
batch_size=FLAGS.batch_size) for idx in range(num_players)
]
saver = tf.train.Saver()
sess.run(tf.global_variables_initializer())
for ep in range(FLAGS.num_train_episodes):
if (ep + 1) % FLAGS.eval_every == 0:
r_mean = eval_against_random_bots(env, agents, random_agents,
FLAGS.num_eval_games)
logging.info("[%s] Mean episode rewards %s", ep + 1, r_mean)
for i in range(num_players):
summary = tf.compat.v1.Summary()
summary.value.add(tag="mean_reward/random_{}".format(i),
simple_value=r_mean[i])
summary_writer.add_summary(summary, ep)
summary_writer.flush()
saver.save(sess, FLAGS.checkpoint_dir, ep)
time_step = env.reset()
# Randomize position.
if FLAGS.randomize_positions:
positions = random.sample(range(len(agents)), len(agents))
while not time_step.last():
player_id = time_step.observations["current_player"]
if FLAGS.randomize_positions:
position = positions[player_id]
agents[position].player_id = player_id
else:
position = player_id
agent_output = agents[position].step(time_step)
action_list = [agent_output.action]
time_step = env.step(action_list)
# Episode is over, step all agents with final info state.
for agent in agents:
agent.step(time_step)
if __name__ == "__main__":
app.run(main)
|
Python
| 0.997305
|
@@ -3019,32 +3019,37 @@
me_step.rewards%5B
+eval_
player_pos%5D%0A
|
cf3e84add9cb87f0baf742e4cf1304356c57de9e
|
add test for RandomSlugField.db_type
|
randomslug/tests/__init__.py
|
randomslug/tests/__init__.py
|
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
from django.db import models
import re
from randomslug.models import *
from randomslug.validators import sluglen
from .models import Slug
class SluglenTest(TestCase):
def setUp(self):
self.default = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890'
self.custom = '-gda985'
def test_valid_slugs_default_characters(self):
valids = ['xCodJFRc', '1fGkb_2234', 'dfsNFDe_1', '023BBDf']
for slug in valids:
self.assertNotEqual(sluglen(slug, self.default), 0)
self.assertEqual(sluglen(valids[0], self.default), 8)
self.assertEqual(sluglen(valids[1], self.default), 5)
self.assertEqual(sluglen(valids[2], self.default), 7)
self.assertEqual(sluglen(valids[3], self.default), 7)
def test_invalid_slugs_default_characters(self):
invalids = ['xC_odJFRc', '1&&&fGkb_2234', '_dfsNFDe_1', '%023BBDf']
for slug in invalids:
self.assertEqual(sluglen(slug, self.default), 0)
def test_valid_slugs_custom_characters(self):
valids = ['gda985_12', '95-_3', 'ga-9_9999']
for slug in valids:
self.assertNotEqual(sluglen(slug, self.custom), 0)
self.assertEqual(sluglen(valids[0], self.custom), 6)
self.assertEqual(sluglen(valids[1], self.custom), 3)
self.assertEqual(sluglen(valids[2], self.custom), 4)
def test_invalid_slugs_custom_characters(self):
invalids = ['gcda985_12', '4495-_3', 'ga-33239_9999']
for slug in invalids:
self.assertEqual(sluglen(slug, self.custom), 0)
class SlugifyTest(TestCase):
def setUp(self):
self.default = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890'
self.value = "".join(random.choice(self.default) for x in range(10))
self.pattern = r'^(?P<slug>[' + self.default + ']+)(_\d+)?$'
def test_generated_slug_length(self):
slug = slugify(self.value)
self.assertEqual(sluglen(slug, self.default), 10)
slug = slugify(self.value, 4)
self.assertEqual(sluglen(slug, self.default), 10)
def test_generated_slug_pattern(self):
slug = slugify(self.value)
self.assertNotEqual(re.match(self.pattern, slug), None)
class RandomSlugFieldTest(TestCase):
def setUp(self):
self.default = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234567890'
self.pattern = r'^(?P<slug>[' + self.default + ']+)(_\d+)?$'
def test_slug_length(self):
slugfield = RandomSlugField(10)
self.assertEqual(slugfield.slug_length, 10)
def test_slug_in_model(self):
slugmodel = Slug()
self.assertEqual(slugmodel.slug, u'')
slugmodel.save()
self.assertIsNotNone(slugmodel.slug)
self.assertEqual(sluglen(slugmodel.slug, self.default), 10)
self.assertIsNotNone(re.match(self.pattern, slugmodel.slug))
|
Python
| 0
|
@@ -3136,8 +3136,256 @@
.slug))%0A
+ %0A def test_slug_db_type(self):%0A slugfield = RandomSlugField()%0A self.assertEqual(slugfield.db_type(None), 'varchar(10)')%0A slugfield = RandomSlugField(5)%0A self.assertEqual(slugfield.db_type(None), 'varchar(5)')%0A
|
c30f2ba581e0fea43124e8804f45ab8fa9dca94a
|
Use sfh_marginal in androcmd/phatpatchfit/majoraxplot.py
|
androcmd/phatpatchfit/majoraxplot.py
|
androcmd/phatpatchfit/majoraxplot.py
|
# encoding: utf-8
"""
Tools for plotting SFR along the major axis.
2015-07-01 - Created by Jonathan Sick
"""
import matplotlib as mpl
import numpy as np
from .analysistools import marginalize_metallicity
from .sfrplots import scale_sfr, lin_scale_sfr
def select_patches(dataset):
"""Patch keys that lie along the major axis."""
keys = []
phi = []
for key, patch in dataset['patches'].items():
keys.append(key)
phi.append(patch.attrs['phi'])
phi = np.array(phi)
sel = np.where((phi <= 20.) | (phi >= 340.))[0]
return [keys[i] for i in sel]
def bin_patches_radially(dataset, patch_keys):
r_grid = np.arange(0, 21, 1.)
binned_patches = [[] for r in r_grid]
patch_r = np.array([dataset['patches'][k].attrs['r_kpc']
for k in patch_keys])
for i in xrange(len(r_grid) - 1):
rmin = r_grid[i]
rmax = r_grid[i + 1]
s = np.where((patch_r >= rmin) & (patch_r < rmax))[0]
for si in s:
binned_patches[i].append(patch_keys[si])
return r_grid, binned_patches
def plot_highlighted_patches(dataset, patch_keys, ax):
for key in patch_keys:
patch = dataset['patches'][key]
poly = patch.attrs['poly']
patch = mpl.patches.Polygon(poly, closed=True,
transform=ax.get_transform('world'),
facecolor='y', alpha=0.5,
edgecolor='k', lw=0.5)
ax.add_patch(patch)
def compute_sfr_in_span(dataset, patch_keys, fit_key, myr_min, myr_max,
lin_scale=False):
"""Compute the mean SFR of all patches in a bin within a time span (in Myr)
"""
patch_sfrs = []
for k in patch_keys:
logage, sfr = marginalize_metallicity(dataset['patches'][k], fit_key)
if lin_scale:
sfr = lin_scale_sfr(sfr, dataset['patches'][k])
else:
sfr = scale_sfr(sfr, dataset['patches'][k])
a_myr = 10. ** (logage - 6)
interp_ages = np.linspace(myr_min, myr_max, 50)
interp_sfrs = np.interp(interp_ages, a_myr, sfr)
mean_sfr = np.mean(interp_sfrs)
patch_sfrs.append(mean_sfr)
patch_sfrs = np.array(patch_sfrs)
if len(patch_sfrs) == 0:
return None
else:
mean_sfr = patch_sfrs.mean()
return mean_sfr
|
Python
| 0.000001
|
@@ -153,59 +153,8 @@
np%0A%0A
-from .analysistools import marginalize_metallicity%0A
from
@@ -1721,77 +1721,122 @@
-logage, sfr = marginalize_metallicity(dataset%5B'patches'%5D%5Bk%5D, fit_key)
+t = np.array(dataset%5B'patches'%5D%5Bk%5D%5B'sfh_marginal'%5D%5Bfit_key%5D)%0A logage = t%5B'log(age)'%5D%0A sfr = t%5B'sfr'%5D
%0A
|
ea8ea2e6203ee6cb7580444207446c0bb82f7239
|
Add solution for Lesson_5_Analyzing_Data.10-Using_match_and_project.
|
Lesson_5_Analyzing_Data/10-Using_match_and_project/followers.py
|
Lesson_5_Analyzing_Data/10-Using_match_and_project/followers.py
|
#!/usr/bin/env python
"""
Write an aggregation query to answer this question:
Of the users in the "Brasilia" timezone who have tweeted 100 times or more,
who has the largest number of followers?
The following hints will help you solve this problem:
- Time zone is found in the "time_zone" field of the user object in each tweet.
- The number of tweets for each user is found in the "statuses_count" field.
To access these fields you will need to use dot notation (from Lesson 4)
- Your aggregation query should return something like the following:
{u'ok': 1.0,
u'result': [{u'_id': ObjectId('52fd2490bac3fa1975477702'),
u'followers': 2597,
u'screen_name': u'marbles',
u'tweets': 12334}]}
Please modify only the 'make_pipeline' function so that it creates and returns an aggregation
pipeline that can be passed to the MongoDB aggregate function. As in our examples in this lesson,
the aggregation pipeline should be a list of one or more dictionary objects.
Please review the lesson examples if you are unsure of the syntax.
Your code will be run against a MongoDB instance that we have provided. If you want to run this code
locally on your machine, you have to install MongoDB, download and insert the dataset.
For instructions related to MongoDB setup and datasets please see Course Materials.
Please note that the dataset you are using here is a smaller version of the twitter dataset used
in examples in this lesson. If you attempt some of the same queries that we looked at in the lesson
examples, your results will be different.
"""
def get_db(db_name):
from pymongo import MongoClient
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def make_pipeline():
# complete the aggregation pipeline
pipeline = [ ]
return pipeline
def aggregate(db, pipeline):
result = db.tweets.aggregate(pipeline)
return result
if __name__ == '__main__':
db = get_db('twitter')
pipeline = make_pipeline()
result = aggregate(db, pipeline)
assert len(result["result"]) == 1
assert result["result"][0]["followers"] == 17209
import pprint
pprint.pprint(result)
|
Python
| 0
|
@@ -1817,17 +1817,403 @@
line = %5B
-
+%7B%22$match%22: %7B%22user.time_zone%22: %22Brasilia%22%7D%7D,%0A %7B%22$match%22: %7B%22user.statuses_count%22: %7B%22$gte%22: 100%7D%7D%7D,%0A %7B%22$project%22: %7B%22tweets%22: %22$user.statuses_count%22,%0A %22screen_name%22: %22$user.screen_name%22,%0A %22followers%22: %22$user.followers_count%22%7D%7D,%0A %7B%22$sort%22: %7B%22followers%22: -1%7D%7D,%0A %7B%22$limit%22: 1%7D
%5D%0A re
|
93cc7c44efdd01c0e6d5a218301da7686b4f7289
|
implement postmaster redirection
|
app/server.py
|
app/server.py
|
import requests
from salmon.routing import nolocking, route, stateless
from salmon.mail include MailResponse
from config import settings
import json
import logging
log = logging.getLogger(__name__)
log.level = logging.DEBUG
@route("postmaster@(domain)", inbox=".+", domain=".+")
@stateless
def forward_postmaster(message, to=None, host=None):
logging.debug("MESSAGE to %s@%s forwarded to the relay host.", to, host)
settings.relay.deliver(MailResponse(To='admiralty@cloudfleet.io', From=message.From, Subject="[%s] %s" % (host, message.Subject), Body=message.body()))
@route("(inbox)@(domain)", inbox=".+", domain=".+")
@stateless
@nolocking
def START(message, inbox=None, domain=None):
log.info("===============================")
log.info("received mail for %s@%s" % (inbox, domain))
target_url = "http://blimp." + domain + "/mailbox/raw/" + inbox # FIXME change to https
r = requests.post(target_url, headers={"Content-transfer-encoding": "binary"}, data=message.to_message().as_string())
log.info("Server Response: %s" % r.text)
|
Python
| 0.000002
|
@@ -63,16 +63,28 @@
tateless
+, route_like
%0Afrom sa
@@ -98,14 +98,13 @@
il i
-nclude
+mport
Mai
@@ -235,134 +235,185 @@
UG%0A%0A
-@route(%22postmaster@(domain)%22, inbox=%22.+%22, domain=%22.+%22)%0A@stateless%0Adef forward_postmaster(message, to=None, host=None):
+def forward_postmaster(message, domain):%0A log.info(%22===============================%22)%0A log.info(%22received mail for %25s@%25s. Forwarding ...%22 %25 (%22postmaster%22, domain))
%0A log
ging
@@ -412,20 +412,16 @@
log
-ging
.debug(%22
MESS
@@ -420,66 +420,74 @@
ug(%22
-MESSAGE to %25s@%25s forwarded to the relay host.%22, to, host)%0A
+Content: %5Cn %25s%22 %25 message.to_message().as_string())%0A%0A try:%0A
@@ -596,12 +596,14 @@
%25 (
-host
+domain
, me
@@ -611,16 +611,19 @@
sage
-.S
+%5B's
ubject
+'%5D
), B
@@ -647,94 +647,173 @@
)))%0A
-%0A%0A%0A@route(%22(inbox)@(domain)%22, inbox=%22.+%22, domain=%22.+%22)%0A@stateless%0A@nolocking%0Adef START
+ except Exception, e:%0A log.error(str(e))%0A%0A log.info(%22===============================%22)%0A log.info(%22forwarded mail to admiralty%22)%0A%0Adef deliver_to_blimp
(mes
@@ -827,21 +827,16 @@
nbox
-=None
, domain
=Non
@@ -835,17 +835,11 @@
main
-=None
):%0A
-%0A
@@ -1200,8 +1200,265 @@
r.text)%0A
+%0A%0A@route(%22(inbox)@(domain)%22, inbox=%22.+%22, domain=%22.+%22)%0A@stateless%0Adef START(message, inbox=None, domain=None):%0A if inbox == 'postmaster':%0A return forward_postmaster(message, domain)%0A else:%0A return deliver_to_blimp(message, inbox, domain)%0A
|
da51d49904ff75621ff9ebb501e4612cfa0a3d86
|
Trim more
|
juriscraper/pacer/case_query.py
|
juriscraper/pacer/case_query.py
|
import pprint
import re
import sys
from .docket_report import BaseDocketReport
from .reports import BaseReport
# from .utils import clean_pacer_object
from ..lib.log_tools import make_default_logger
from ..lib.string_utils import clean_string, harmonize, \
force_unicode
logger = make_default_logger()
class CaseQuery(BaseDocketReport, BaseReport):
"""Parse the iquery.pl ("Query" menu) result.
This is pretty limited metadata.
"""
docket_number_dist_regex = re.compile(
r"((\d{1,2}:)?\d\d-[a-zA-Z]{1,4}-\d{1,10})")
# PATH = 'n/beam/servlet/TransportRoom'
CACHE_ATTRS = ['metadata']
def __init__(self, court_id, pacer_session=None):
super(CaseQuery, self).__init__(court_id, pacer_session)
# Initialize the empty cache properties
self._clear_caches()
self._metadata = None
self._parties = None
self._docket_entries = None
@property
def metadata(self):
if self._metadata is not None:
return self._metadata
center = self.tree.xpath('.//div[@id="cmecfMainContent"]//center')[0]
rows = self.redelimit_p(center, r'(?i)<br\s*/?>')
# First row is special
docket_number = force_unicode(rows[0].find('.//font').text_content())
raw_case_name = force_unicode(rows[0].find('.//b[last()]').tail)
judge_name = None
# Remainder are <b>Field name:</b> value
# Except the 2nd row might or might not be.
data = {}
for i in xrange(1, len(rows)):
bolds = rows[i].findall('.//b')
if bolds is None and i == 1:
# Second row, no bold => judge name!
judge_name = force_unicode(rows[i].text_content()
.rstrip(", presiding"))
for bold in bolds:
field = bold.text_content().strip().rstrip(':')
cleanfield = field.lower().replace(' ', '_').decode('utf-8')
value = bold.tail.strip()
data[cleanfield] = force_unicode(value)
case_name = clean_string(harmonize(raw_case_name))
data.update({
u'court_id': self.court_id,
u'docket_number': docket_number,
u'case_name': case_name,
u'raw_case_name': raw_case_name,
})
if judge_name is not None:
data[u'judge_name'] = judge_name,
# I don't think this is a good idea, it's too indiscriminate
# data = clean_pacer_object(data)
self._metadata = data
return data
@property
def data(self):
"""Get all the data back from this endpoint."""
if self.is_valid is False:
return {}
data = self.metadata.copy()
# data[u'parties'] = self.parties
# data[u'docket_entries'] = self.docket_entries
return data
if __name__ == "__main__":
if len(sys.argv) != 2:
print("Usage: python -m juriscraper.pacer.case_query filepath")
print("Please provide a path to an HTML file to parse.")
sys.exit(1)
report = CaseQuery(
# xxx that's a lie, court id appears in output
'mad') # Court ID is only needed for querying.
filepath = sys.argv[1]
print("Parsing HTML file at %s" % filepath)
with open(filepath, 'r') as f:
text = f.read().decode('utf-8')
report._parse_text(text)
pprint.pprint(report.data, indent=2)
|
Python
| 0
|
@@ -11,18 +11,8 @@
int%0A
-import re%0A
impo
@@ -18,16 +18,16 @@
ort sys%0A
+
%0Afrom .d
@@ -441,149 +441,8 @@
%22%22%0A%0A
- docket_number_dist_regex = re.compile(%0A r%22((%5Cd%7B1,2%7D:)?%5Cd%5Cd-%5Ba-zA-Z%5D%7B1,4%7D-%5Cd%7B1,10%7D)%22)%0A%0A # PATH = 'n/beam/servlet/TransportRoom'%0A
@@ -665,16 +665,16 @@
aches()%0A
+
@@ -698,73 +698,8 @@
None
-%0A self._parties = None%0A self._docket_entries = None
%0A%0A
|
11595f576a62a3c65f26113dfa06c72f768ad291
|
Take '--help' as well as '-h' as hint to emit the usage output.
|
test/redo.py
|
test/redo.py
|
#!/usr/bin/env python
"""
A simple utility to redo the failed/errored tests.
You need to specify the session directory in order for this script to locate the
tests which need to be re-run.
See also dotest.py, the test driver running the test suite.
Type:
./dotest.py -h
for help.
"""
import os, sys, datetime
import re
# If True, redo with no '-t' option for the test driver.
no_trace = False
# To be filled with the filterspecs found in the session logs.
redo_specs = []
# The filename components to match for. Only files with the contained component names
# will be considered for re-run. Examples: ['X86_64', 'clang'].
filename_components = []
# There is a known bug with respect to comp_specs and arch_specs, in that if we
# encountered "-C clang" and "-C gcc" when visiting the session files, both
# compilers will end up in the invocation of the test driver when rerunning.
# That is: ./dotest -v -C clang^gcc ... -f ...". Ditto for "-A" flags.
# The "-C compiler" for comp_specs.
comp_specs = set()
# The "-A arch" for arch_specs.
arch_specs = set()
def usage():
print"""\
Usage: redo.py [-F filename_component] [-n] [session_dir]
where options:
-F : only consider the test for re-run if the session filename conatins the filename component
for example: -F x86_64
-n : when running the tests, do not turn on trace mode, i.e, no '-t' option
is passed to the test driver (this will run the tests faster)
and session_dir specifies the session directory which contains previously
recorded session infos for all the test cases which either failed or errored.
If sessin_dir is left unspecified, this script uses the heuristic to find the
possible session directories with names starting with %Y-%m-%d- (for example,
2012-01-23-) and employs the one with the latest timestamp."""
sys.exit(0)
def where(session_dir, test_dir):
"""Returns the full path to the session directory; None if non-existent."""
abspath = os.path.abspath(session_dir)
if os.path.isdir(abspath):
return abspath
session_dir_path = os.path.join(test_dir, session_dir)
if os.path.isdir(session_dir_path):
return session_dir_path
return None
# This is the pattern for the line from the log file to redo a test.
# We want the filter spec.
filter_pattern = re.compile("^\./dotest\.py.*-f (.*)$")
comp_pattern = re.compile(" -C ([^ ]+) ")
arch_pattern = re.compile(" -A ([^ ]+) ")
def redo(suffix, dir, names):
"""Visitor function for os.path.walk(path, visit, arg)."""
global redo_specs
global comp_specs
global arch_specs
global filter_pattern
global comp_pattern
global arch_pattern
global filename_components
for name in names:
if name.endswith(suffix):
#print "Find a log file:", name
if name.startswith("Error") or name.startswith("Failure"):
if filename_components:
if not all([comp in name for comp in filename_components]):
continue
with open(os.path.join(dir, name), 'r') as log:
content = log.read()
for line in content.splitlines():
match = filter_pattern.match(line)
if match:
filterspec = match.group(1)
print "adding filterspec:", filterspec
redo_specs.append(filterspec)
comp = comp_pattern.search(line)
if comp:
comp_specs.add(comp.group(1))
arch = arch_pattern.search(line)
if arch:
arch_specs.add(arch.group(1))
else:
continue
def main():
"""Read the session directory and run the failed test cases one by one."""
global no_trace
global redo_specs
global filename_components
test_dir = sys.path[0]
if not test_dir:
test_dir = os.getcwd()
if not test_dir.endswith('test'):
print "This script expects to reside in lldb's test directory."
sys.exit(-1)
index = 1
while index < len(sys.argv):
if sys.argv[index].startswith('-h'):
usage()
if sys.argv[index].startswith('-'):
# We should continue processing...
pass
else:
# End of option processing.
break
if sys.argv[index] == '-F':
# Increment by 1 to fetch the filename component spec.
index += 1
if index >= len(sys.argv) or sys.argv[index].startswith('-'):
usage()
filename_components.append(sys.argv[index])
elif sys.argv[index] == '-n':
no_trace = True
index += 1
if index < len(sys.argv):
# Get the specified session directory.
session_dir = sys.argv[index]
else:
# Use heuristic to find the latest session directory.
name = datetime.datetime.now().strftime("%Y-%m-%d-")
dirs = [d for d in os.listdir(os.getcwd()) if d.startswith(name)]
if len(dirs) == 0:
print "No default session directory found, please specify it explicitly."
usage()
session_dir = max(dirs, key=os.path.getmtime)
if not session_dir or not os.path.exists(session_dir):
print "No default session directory found, please specify it explicitly."
usage()
#print "The test directory:", test_dir
session_dir_path = where(session_dir, test_dir)
print "Using session dir path:", session_dir_path
os.chdir(test_dir)
os.path.walk(session_dir_path, redo, ".log")
if not redo_specs:
print "No failures/errors recorded within the session directory, please specify a different session directory.\n"
usage()
filters = " -f ".join(redo_specs)
compilers = (" -C %s" % "^".join(comp_specs)) if comp_specs else None
archs = (" -A %s" % "^".join(arch_specs)) if arch_specs else None
command = "./dotest.py %s %s -v %s -f " % (compilers if compilers else "",
archs if archs else "",
"" if no_trace else "-t")
print "Running %s" % (command + filters)
os.system(command + filters)
if __name__ == '__main__':
main()
|
Python
| 0.999999
|
@@ -4304,16 +4304,56 @@
th('-h')
+ or sys.argv%5Bindex%5D.startswith('--help')
:%0A
|
9bc6675797c1e00438d467db1a64ade7a356cbd3
|
Version bump to 0.5.2.
|
spec_cleaner/__init__.py
|
spec_cleaner/__init__.py
|
#!/usr/bin/env python
# vim: set ts=4 sw=4 et: coding=UTF-8
# Copyright (c) 2013, SUSE LINUX Products GmbH, Nuernberg, Germany
# All rights reserved.
# See COPYING for details.
__version__ = '0.5.1'
import os
import sys
import argparse
from rpmexception import RpmWrongArgs, RpmException
from rpmcleaner import RpmSpecCleaner
def process_args(argv):
"""
Process the parsed arguments and return the result
:param argv: passed arguments
"""
parser = argparse.ArgumentParser(prog='spec-cleaner',
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Cleans the given spec file according to style guide and returns the result.')
# Make the -d, -i, and -o exclusive as we can do only one of those
output_group = parser.add_mutually_exclusive_group()
parser.add_argument('spec', metavar='SPEC', type=str,
help='spec file to beautify')
output_group.add_argument('-d', '--diff', action='store_true', default=False,
help='run the diff program to show differences between new and orginal specfile.')
parser.add_argument('--diff-prog', default='vimdiff',
help='specify the diff binary to call with diff option.')
parser.add_argument('-f', '--force', action='store_true', default=False,
help='overwrite the output file if already exist.')
output_group.add_argument('-i', '--inline', action='store_true', default=False,
help='inline the changes directly to the parsed file.')
parser.add_argument('-p', '--pkgconfig', action='store_true', default=False,
help='convert dependencies to their pkgconfig counterparts, requires bit of cleanup in spec afterwards.')
output_group.add_argument('-o', '--output', default='',
help='specify the output file for the cleaned spec content.')
parser.add_argument('-v', '--version', action='version', version=__version__,
help='show package version and exit')
# print help if there is no argument
if len(argv) < 1:
parser.print_help()
sys.exit(0)
options = parser.parse_args(args=argv)
# the spec must exist for us to do anything
if not os.path.exists(options.spec):
raise RpmWrongArgs('{0} does not exist.'.format(options.spec))
# the path for output must exist and the file must not be there unless
# force is specified
if options.output:
options.output = os.path.expanduser(options.output)
if not options.force and os.path.exists(options.output):
raise RpmWrongArgs('{0} already exists.'.format(options.output))
return options
def main(argv):
"""
Main function that calls argument parsing ensures their sanity
and then creates RpmSpecCleaner object that works with passed spec file.
:param argv: passed arguments
"""
try:
options = process_args(argv)
except RpmWrongArgs as e:
sys.stderr.write('ERROR: {0}\n'.format(e))
return 1
try:
cleaner = RpmSpecCleaner(options.spec,
options.output,
options.pkgconfig,
options.inline,
options.diff,
options.diff_prog)
cleaner.run()
except RpmException as e:
sys.stderr.write('ERROR: {0}\n'.format(e))
return 1
return 0
if __name__ == '__main__':
try:
sys.exit(main(sys.argv[1:]))
except KeyboardInterrupt:
pass
|
Python
| 0
|
@@ -191,17 +191,17 @@
= '0.5.
-1
+2
'%0A%0Aimpor
|
c398353e06f5a88130da9f01a1db00e4488a40a9
|
support for loading 'faulty character set' session files
|
lib/core/target.py
|
lib/core/target.py
|
#!/usr/bin/env python
"""
$Id$
This file is part of the sqlmap project, http://sqlmap.sourceforge.net.
Copyright (c) 2007-2010 Bernardo Damele A. G. <bernardo.damele@gmail.com>
Copyright (c) 2006 Daniele Bellucci <daniele.bellucci@gmail.com>
sqlmap is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation version 2 of the License.
sqlmap is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
details.
You should have received a copy of the GNU General Public License along
with sqlmap; if not, write to the Free Software Foundation, Inc., 51
Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import codecs
import os
import time
from lib.core.common import dataToSessionFile
from lib.core.common import paramToDict
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.data import paths
from lib.core.dump import dumper
from lib.core.exception import sqlmapFilePathException
from lib.core.exception import sqlmapGenericException
from lib.core.exception import sqlmapSyntaxException
from lib.core.session import resumeConfKb
from lib.core.xmldump import dumper as xmldumper
def __setRequestParams():
"""
Check and set the parameters and perform checks on 'data' option for
HTTP method POST.
"""
if conf.direct:
conf.parameters[None] = "direct connection"
return
__testableParameters = False
# Perform checks on GET parameters
if conf.parameters.has_key("GET") and conf.parameters["GET"]:
parameters = conf.parameters["GET"]
__paramDict = paramToDict("GET", parameters)
if __paramDict:
conf.paramDict["GET"] = __paramDict
__testableParameters = True
# Perform checks on POST parameters
if conf.method == "POST" and not conf.data:
errMsg = "HTTP POST method depends on HTTP data value to be posted"
raise sqlmapSyntaxException, errMsg
if conf.data:
conf.parameters["POST"] = conf.data
__paramDict = paramToDict("POST", conf.data)
if __paramDict:
conf.paramDict["POST"] = __paramDict
__testableParameters = True
conf.method = "POST"
# Perform checks on Cookie parameters
if conf.cookie:
conf.parameters["Cookie"] = conf.cookie
__paramDict = paramToDict("Cookie", conf.cookie)
if __paramDict:
conf.paramDict["Cookie"] = __paramDict
__testableParameters = True
# Perform checks on User-Agent header value
if conf.httpHeaders:
for httpHeader, headerValue in conf.httpHeaders:
if httpHeader == "User-Agent":
# No need for url encoding/decoding the user agent
conf.parameters["User-Agent"] = headerValue
condition = not conf.testParameter
condition |= "User-Agent" in conf.testParameter
condition |= "user-agent" in conf.testParameter
condition |= "useragent" in conf.testParameter
condition |= "ua" in conf.testParameter
if condition:
conf.paramDict["User-Agent"] = { "User-Agent": headerValue }
__testableParameters = True
if not conf.parameters:
errMsg = "you did not provide any GET, POST and Cookie "
errMsg += "parameter, neither an User-Agent header"
raise sqlmapGenericException, errMsg
elif not __testableParameters:
errMsg = "all testable parameters you provided are not present "
errMsg += "within the GET, POST and Cookie parameters"
raise sqlmapGenericException, errMsg
def __setOutputResume():
"""
Check and set the output text file and the resume functionality.
"""
if not conf.sessionFile:
conf.sessionFile = "%s%ssession" % (conf.outputPath, os.sep)
logger.info("using '%s' as session file" % conf.sessionFile)
if os.path.exists(conf.sessionFile):
if not conf.flushSession:
readSessionFP = codecs.open(conf.sessionFile, "r", conf.dataEncoding)
__url_cache = set()
__expression_cache = {}
for line in readSessionFP.readlines(): # xreadlines doesn't return unicode strings when codec.open() is used
if line.count("][") == 4:
line = line.split("][")
if len(line) != 5:
continue
url, _, _, expression, value = line
if not value:
continue
if url[0] == "[":
url = url[1:]
value = value.rstrip('\r\n') # Strips both chars independently
if url not in ( conf.url, conf.hostname ):
continue
if url not in __url_cache:
kb.resumedQueries[url] = {}
kb.resumedQueries[url][expression] = value
__url_cache.add(url)
__expression_cache[url] = set(expression)
resumeConfKb(expression, url, value)
if expression not in __expression_cache[url]:
kb.resumedQueries[url][expression] = value
__expression_cache[url].add(value)
elif len(value) >= len(kb.resumedQueries[url][expression]):
kb.resumedQueries[url][expression] = value
readSessionFP.close()
else:
try:
os.remove(conf.sessionFile)
logger.info("flushing session file")
except OSError, msg:
errMsg = "unable to flush the session file (%s)" % msg
raise sqlmapFilePathException, errMsg
try:
conf.sessionFP = codecs.open(conf.sessionFile, "a", conf.dataEncoding)
dataToSessionFile("\n[%s]\n" % time.strftime("%X %x"))
except IOError:
errMsg = "unable to write on the session file specified"
raise sqlmapFilePathException, errMsg
def __createFilesDir():
"""
Create the file directory.
"""
if not conf.rFile:
return
conf.filePath = paths.SQLMAP_FILES_PATH % conf.hostname
if not os.path.isdir(conf.filePath):
os.makedirs(conf.filePath, 0755)
def __createDumpDir():
"""
Create the dump directory.
"""
if not conf.dumpTable and not conf.dumpAll and not conf.search:
return
conf.dumpPath = paths.SQLMAP_DUMP_PATH % conf.hostname
if not os.path.isdir(conf.dumpPath):
os.makedirs(conf.dumpPath, 0755)
def __configureDumper():
if conf.xmlFile:
conf.dumper = xmldumper
else:
conf.dumper = dumper
conf.dumper.setOutputFile()
def __createTargetDirs():
"""
Create the output directory.
"""
conf.outputPath = "%s%s%s" % (paths.SQLMAP_OUTPUT_PATH, os.sep, conf.hostname)
if not os.path.isdir(paths.SQLMAP_OUTPUT_PATH):
os.makedirs(paths.SQLMAP_OUTPUT_PATH, 0755)
if not os.path.isdir(conf.outputPath):
os.makedirs(conf.outputPath, 0755)
__createDumpDir()
__createFilesDir()
__configureDumper()
def initTargetEnv():
"""
Initialize target environment.
"""
if conf.multipleTargets:
if conf.cj:
conf.cj.clear()
conf.paramDict = {}
conf.parameters = {}
conf.sessionFile = None
kb.dbms = None
kb.dbmsDetected = False
kb.dbmsVersion = [ "Unknown" ]
kb.injParameter = None
kb.injPlace = None
kb.injType = None
kb.parenthesis = None
kb.unionComment = ""
kb.unionCount = None
kb.unionPosition = None
def setupTargetEnv():
__createTargetDirs()
__setRequestParams()
__setOutputResume()
|
Python
| 0
|
@@ -4322,32 +4322,43 @@
onf.dataEncoding
+, 'replace'
)%0A __
|
a2169c737c21636cadb50bba3d5c6a25121a3efb
|
Fix test failures
|
spectral_cube/io/fits.py
|
spectral_cube/io/fits.py
|
from astropy.io import fits
from astropy.wcs import WCS
from astropy.extern import six
from astropy.utils import OrderedDict
from astropy.io.fits.hdu.hdulist import fitsopen as fits_open
from astropy.io.fits.util import first as first_hdu
import numpy as np
from .. import SpectralCube, StokesSpectralCube, LazyMask
from .. import cube_utils
# FITS registry code - once Astropy includes a proper extensible I/O base
# class, we can use that instead. The following code takes care of
# interpreting string input (filename), HDU, and HDUList.
def is_fits(input, **kwargs):
"""
Determine whether input is in FITS format
"""
if isinstance(input, six.string_types):
if input.lower().endswith(('.fits', '.fits.gz',
'.fit', '.fit.gz')):
return True
elif isinstance(input, (fits.HDUList, fits.PrimaryHDU, fits.ImageHDU)):
return True
else:
return False
def read_data_fits(input, hdu=None):
"""
Read an array and header from an FITS file.
Parameters
----------
input : str or compatible `astropy.io.fits` HDU object
If a string, the filename to read the table from. The
following `astropy.io.fits` HDU objects can be used as input:
- :class:`~astropy.io.fits.hdu.table.PrimaryHDU`
- :class:`~astropy.io.fits.hdu.table.ImageHDU`
- :class:`~astropy.io.fits.hdu.hdulist.HDUList`
hdu : int or str, optional
The HDU to read the table from.
"""
if isinstance(input, fits.HDUList):
# Parse all array objects
arrays = OrderedDict()
for ihdu, hdu_item in enumerate(input):
if isinstance(hdu_item, (fits.PrimaryHDU, fits.ImageHDU)):
arrays[ihdu] = hdu_item
if len(arrays) > 1:
if hdu is None:
hdu = first_dhu(arrays)
warnings.warn("hdu= was not specified but multiple arrays"
" are present, reading in first available"
" array (hdu={0})".format(hdu))
# hdu might not be an integer, so we first need to convert it
# to the correct HDU index
hdu = input.index_of(hdu)
if hdu in arrays:
array_hdu = arrays[hdu]
else:
raise ValueError("No array found in hdu={0}".format(hdu))
elif len(arrays) == 1:
array_hdu = arrays[first_hdu(arrays)]
else:
raise ValueError("No table found")
elif isinstance(input, (fits.PrimaryHDU, fits.ImageHDU)):
array_hdu = input
else:
hdulist = fits_open(input)
try:
return read_data_fits(hdulist, hdu=hdu)
finally:
hdulist.close()
return array_hdu.data, array_hdu.header
def load_fits_cube(input, hdu=0):
"""
Read in a cube from a FITS file using astropy.
Parameters
----------
input: str or HDU
The FITS cube file name or HDU
hdu: int
The extension number containing the data to be read
"""
data, header = read_data_fits(input, hdu=hdu)
meta = {}
wcs = WCS(header)
if wcs.wcs.naxis == 3:
data, wcs = cube_utils._orient(data, wcs)
mask = LazyMask(np.isfinite, data=data, wcs=wcs)
cube = SpectralCube(data, wcs, mask, meta=meta)
elif wcs.wcs.naxis == 4:
data, wcs = cube_utils._split_stokes(data, wcs)
mask = {}
for component in data:
data[component], wcs_slice = cube_utils._orient(data[component], wcs)
mask[component] = LazyMask(np.isfinite, data=data[component], wcs=wcs_slice)
cube = StokesSpectralCube(data, wcs_slice, mask, meta=meta)
else:
raise Exception("Data should be 3- or 4-dimensional")
return cube
def write_fits_cube(filename, cube, overwrite=False):
"""
Write a FITS cube with a WCS to a filename
"""
if isinstance(cube, SpectralCube):
outhdu = fits.PrimaryHDU(data=cube._data, header=cube._wcs.to_header())
outhdu.writeto(filename, clobber=overwrite)
else:
raise NotImplementedError()
|
Python
| 0.001261
|
@@ -1,20 +1,37 @@
+import warnings%0A%0A
from astropy.io impo
@@ -201,62 +201,8 @@
pen%0A
-from astropy.io.fits.util import first as first_hdu%0A%0A%0A
%0Aimp
@@ -302,16 +302,70 @@
_utils%0A%0A
+def first(iterable):%0A return next(iter(iterable))%0A%0A
%0A# FITS
@@ -1877,12 +1877,8 @@
irst
-_dhu
(arr
@@ -2475,20 +2475,16 @@
ys%5Bfirst
-_hdu
(arrays)
|
cc12728d7160a10f0c182c0cccfde0fd15cadb75
|
Add a reset function stub
|
spicedham/basewrapper.py
|
spicedham/basewrapper.py
|
class BaseWrapper(object):
"""
A base class for backend plugins.
"""
def get_key(self, tag, key, default=None):
"""
Gets the value held by the tag, key composite key. If it doesn't exist,
return default.
"""
raise NotImplementedError()
def get_key_list(self, tag, keys, default=None):
"""
Given a list of key, tag tuples get all values.
If key, tag doesn't exist, return default.
Subclasses can override this to make more efficient queries for bulk
requests.
"""
return [self.get_key(tag, key, default) for tag, key in key_tag_pairs]
def set_key_list(self, tag, key_value_tuples):
"""
Given a list of tuples of tag, key, value set them all.
Subclasses can override this to make more efficient queries for bulk
requests.
"""
return [self.set_key(tag, key, value) for tag, key, value in tag_key_value_tuples]
def set_key(self, tag, key, value):
"""
Set the value held by the tag, key composite key.
"""
raise NotImplementedError()
|
Python
| 0
|
@@ -76,16 +76,181 @@
%22%22%22%0A%0A
+ def reset(self, really):%0A %22%22%22%0A Resets the training data to a blank slate.%0A %22%22%22%0A if really:%0A raise NotImplementedError()%0A%0A%0A
def
@@ -844,18 +844,17 @@
elf, tag
-,
+_
key_valu
|
f6722960a33a51d0682c4c21a6e46d03f53b80f0
|
simplify max_score calc more
|
app/models/golf_round.py
|
app/models/golf_round.py
|
from app import db
class GolfRound(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
tee_id = db.Column(db.Integer, db.ForeignKey('tee.id'))
date = db.Column(db.DateTime)
notes = db.Column(db.String(128))
total_score = db.Column(db.Integer)
total_putts = db.Column(db.Integer)
total_gir = db.Column(db.Integer)
handicap_index = db.Column(db.Float)
scores = db.relationship('HoleScore', backref='round', lazy='dynamic',
cascade="save-update, delete")
def get_score_for_hole(self, hole):
return self.scores.filter_by(hole=hole).first()
def calc_totals(self):
self.total_score, self.total_putts, self.total_gir = 0, 0, 0
for s in self.scores:
self.total_score += s.score
self.total_putts += s.putts
self.total_gir += s.gir
def calc_handicap(self):
def calc_diff(self):
if self == self.user.get_rounds()[0]:
return self.total_score
old_handicap = self.user.get_previous_round(self).handicap_index
course_handicap = round(old_handicap * self.tee.slope / 113, 0)
if course_handicap < 10:
# TODO: max is double bogey. this needs to be fixed
max_score = 7
else:
rounded_handicap = course_handicap + 10 - course_handicap % 10
max_score = int(rounded_handicap / 10 + 5)
adj_score = sum([min(max_score, s.score) for s in self.scores])
rating = self.tee.rating * len(self.scores.all()) / 18
slope = self.tee.slope * len(self.scores.all()) / 18
return (adj_score - rating) * 113 / slope
rounds = self.user.get_rounds()
round_idx = rounds.index(self)
rounds = rounds[max(0, round_idx - 19):round_idx + 1]
if len(rounds) < 5:
# not enough rounds yet
self.handicap_index = 50.0
return
diffs_used_table = {
5: 1, 6: 1, 7: 2, 8: 2, 9: 3, 10: 3, 11: 4, 12: 4,
13: 5, 14: 5, 15: 6, 16: 6, 17: 7, 18: 8, 19: 9, 20: 10
}
num_of_diffs_used = diffs_used_table[len(rounds)]
diffs = sorted([calc_diff(r) for r in rounds])[:num_of_diffs_used]
handicap_str = str(sum(diffs) / len(diffs) * .96)
self.handicap_index = float(handicap_str[:handicap_str.find('.') + 2])
def __repr__(self):
return '<Round %r>' % (self.date)
|
Python
| 0.000018
|
@@ -956,835 +956,8 @@
f):%0A
- def calc_diff(self):%0A if self == self.user.get_rounds()%5B0%5D:%0A return self.total_score%0A old_handicap = self.user.get_previous_round(self).handicap_index%0A course_handicap = round(old_handicap * self.tee.slope / 113, 0)%0A if course_handicap %3C 10:%0A # TODO: max is double bogey. this needs to be fixed%0A max_score = 7%0A else:%0A rounded_handicap = course_handicap + 10 - course_handicap %25 10%0A max_score = int(rounded_handicap / 10 + 5)%0A%0A adj_score = sum(%5Bmin(max_score, s.score) for s in self.scores%5D)%0A rating = self.tee.rating * len(self.scores.all()) / 18%0A slope = self.tee.slope * len(self.scores.all()) / 18%0A return (adj_score - rating) * 113 / slope%0A%0A
@@ -1660,16 +1660,707 @@
+ 2%5D)%0A%0A
+ def calc_diff(self):%0A if self == self.user.get_rounds()%5B0%5D:%0A return self.total_score%0A old_handicap = self.user.get_previous_round(self).handicap_index%0A course_handicap = round(old_handicap * self.tee.slope / 113, 0)%0A if course_handicap %3C 10:%0A # TODO: max is double bogey. this needs to be fixed%0A max_score = 7%0A else:%0A max_score = int(course_handicap / 10 + 6)%0A%0A adj_score = sum(%5Bmin(max_score, s.score) for s in self.scores%5D)%0A rating = self.tee.rating * len(self.scores.all()) / 18%0A slope = self.tee.slope * len(self.scores.all()) / 18%0A return (adj_score - rating) * 113 / slope%0A%0A
def
|
b5f85e30ccabe198c939329e2bb847d34f0e1235
|
Fix list.sort() argument passing
|
sqlalchemy_json/track.py
|
sqlalchemy_json/track.py
|
"""This module contains the tracked object classes.
TrackedObject forms the basis for both the TrackedDict and the TrackedList.
A function for automatic conversion of dicts and lists to their tracked
counterparts is also included.
"""
import itertools
import logging
from six import iteritems
from sqlalchemy.ext.mutable import Mutable
logger = logging.getLogger(__name__)
class TrackedObject(object):
"""A base class for delegated change-tracking."""
_type_mapping = {}
def __init__(self, *args, **kwds):
self.parent = None
super(TrackedObject, self).__init__(*args, **kwds)
def changed(self, message=None, *args):
"""Marks the object as changed.
If a `parent` attribute is set, the `changed()` method on the parent
will be called, propagating the change notification up the chain.
The message (if provided) will be debug logged.
"""
if message is not None:
logger.debug('%s: %s', self._repr(), message % args)
logger.debug('%s: changed', self._repr())
if self.parent is not None:
self.parent.changed()
elif isinstance(self, Mutable):
super(TrackedObject, self).changed()
@classmethod
def register(cls, origin_type):
"""Decorator for mutation tracker registration.
The provided `origin_type` is mapped to the decorated class such that
future calls to `convert()` will convert the object of `origin_type`
to an instance of the decorated class.
"""
def decorator(tracked_type):
"""Adds the decorated class to the `_type_mapping` dictionary."""
cls._type_mapping[origin_type] = tracked_type
return tracked_type
return decorator
@classmethod
def convert(cls, obj, parent):
"""Converts objects to registered tracked types
This checks the type of the given object against the registered tracked
types. When a match is found, the given object will be converted to the
tracked type, its parent set to the provided parent, and returned.
If its type does not occur in the registered types mapping, the object
is returned unchanged.
"""
replacement_type = cls._type_mapping.get(type(obj))
if replacement_type is not None:
new = replacement_type(obj)
new.parent = parent
return new
return obj
def convert_iterable(self, iterable):
"""Generator to `convert` every member of the given iterable."""
return (self.convert(item, self) for item in iterable)
def convert_items(self, items):
"""Generator like `convert_iterable`, but for 2-tuple iterators."""
return ((key, self.convert(value, self)) for key, value in items)
def convert_mapping(self, mapping):
"""Convenience method to track either a dict or a 2-tuple iterator."""
if isinstance(mapping, dict):
return self.convert_items(iteritems(mapping))
return self.convert_items(mapping)
def _repr(self):
"""Simple object representation."""
return '<%(namespace)s.%(type)s object at 0x%(address)0xd>' % {
'namespace': __name__,
'type': type(self).__name__,
'address': id(self)}
@TrackedObject.register(dict)
class TrackedDict(TrackedObject, dict):
"""A TrackedObject implementation of the basic dictionary."""
def __init__(self, source=(), **kwds):
super(TrackedDict, self).__init__(itertools.chain(
self.convert_mapping(source),
self.convert_mapping(kwds)))
def __setitem__(self, key, value):
self.changed('__setitem__: %r=%r', key, value)
super(TrackedDict, self).__setitem__(key, self.convert(value, self))
def __delitem__(self, key):
self.changed('__delitem__: %r', key)
super(TrackedDict, self).__delitem__(key)
def clear(self):
self.changed('clear')
super(TrackedDict, self).clear()
def pop(self, *key_and_default):
self.changed('pop: %r', key_and_default)
return super(TrackedDict, self).pop(*key_and_default)
def popitem(self):
self.changed('popitem')
return super(TrackedDict, self).popitem()
def update(self, source=(), **kwds):
self.changed('update(%r, %r)', source, kwds)
super(TrackedDict, self).update(itertools.chain(
self.convert_mapping(source),
self.convert_mapping(kwds)))
def setdefault(self, key, default=None):
if key in self:
return self[key]
# this calls __setitem__, which converts the value and calls changed()
self[key] = default
# the value at self[key] may be a new TrackedObject, so return
# self[key] instead of default
return self[key]
@TrackedObject.register(list)
class TrackedList(TrackedObject, list):
"""A TrackedObject implementation of the basic list."""
def __init__(self, iterable=()):
super(TrackedList, self).__init__(self.convert_iterable(iterable))
def __setitem__(self, key, value):
self.changed('__setitem__: %r=%r', key, value)
super(TrackedList, self).__setitem__(key, self.convert(value, self))
def __delitem__(self, key):
self.changed('__delitem__: %r', key)
super(TrackedList, self).__delitem__(key)
def append(self, item):
self.changed('append: %r', item)
super(TrackedList, self).append(self.convert(item, self))
def extend(self, iterable):
self.changed('extend: %r', iterable)
super(TrackedList, self).extend(self.convert_iterable(iterable))
def remove(self, value):
self.changed('remove: %r', value)
return super(TrackedList, self).remove(value)
def pop(self, index):
self.changed('pop: %d', index)
return super(TrackedList, self).pop(index)
def sort(self, cmp=None, key=None, reverse=False):
self.changed('sort')
super(TrackedList, self).sort(cmp=cmp, key=key, reverse=reverse)
|
Python
| 0.000413
|
@@ -5974,16 +5974,12 @@
lf,
-cmp=None
+/, *
, ke
@@ -6073,17 +6073,8 @@
ort(
-cmp=cmp,
key=
|
ac1e5b9a2008b8ce125368cc3a3ed3f9efcc4c66
|
Fix GitHub build URL
|
ci/lambdas/codebuild-callback-handler/lambda_function.py
|
ci/lambdas/codebuild-callback-handler/lambda_function.py
|
# Invoked by: SNS Subscription
# Returns: Error or status message
#
# Triggered after a CodeBuild run finishes and is responsible for updating
# the GitHub status, and sending some notifications.
import boto3
import traceback
import os
import zlib
import zipfile
import json
import re
import uuid
import urllib.request
from botocore.client import Config
s3 = boto3.client('s3', config=Config(signature_version='s3v4'))
sns = boto3.client('sns')
USER_AGENT = 'PRX/Infrastructure (codebuild-callback-handler)'
def update_github_status(data):
print('...Updating GitHub status...')
github_token = os.environ['GITHUB_ACCESS_TOKEN']
headers = {}
headers['User-Agent'] = USER_AGENT
headers['Accept'] = 'application/vnd.github.v3+json'
headers['Authorization'] = "token {0}".format(github_token)
api = 'api.github.com'
repo = data['prxRepo']
sha = data['prxCommit']
arn = data['buildArn']
region = arn.split(':')[3]
buildId = arn.split('/')[1]
buildUrl = "https://${0}.console.aws.amazon.com/codebuild/home#/builds/${1}/view/new".format(region, buildId)
state = 'success' if data['success'] else 'failure'
description = 'Build complete' if data['success'] else data['reason']
json_body = json.dumps({
'state': state,
'target_url': buildUrl,
'description': description,
'context': 'continuous-integration/prxci',
}).encode('utf8')
api_url = "https://{0}/repos/{1}/statuses/{2}".format(api, repo, sha)
print(f"...Requesting {api_url}...")
req = urllib.request.Request(api_url, data=json_body, headers=headers)
urllib.request.urlopen(req)
def update_staging_config_status(data):
if 'prxEcrTag' in data:
print('...Updating Staging template config...')
# Get current staging template config
source_bucket = os.environ['INFRASTRUCTURE_CONFIG_BUCKET']
source_key = os.environ['INFRASTRUCTURE_CONFIG_STAGING_KEY']
archive_path = "/tmp/{0}".format(source_key)
print(f"...Getting staging config: {source_bucket}/{source_key}...")
s3.download_file(source_bucket, source_key, archive_path)
with zipfile.ZipFile(archive_path, 'r') as archive:
staging_config = json.load(archive.open('staging.json'))
# Update config with new ECR Tag for the appropriate app
sha = data['prxCommit']
ecr_tag = sha[:7]
repo = data['prxRepo']
short_name = repo.replace('PRX/', '').replace('.prx.org', '')
key_name = "{0}EcrImageTag".format(short_name.capitalize())
print(f"...Setting {key_name} to {ecr_tag}...")
staging_config['Parameters'][key_name] = ecr_tag
# Zip the new config up
new_archive_path = "/tmp/{0}".format(uuid.uuid4())
body = json.dumps(staging_config)
# TODO Should be able to do this all in memory
archive = zipfile.ZipFile(new_archive_path, mode='w')
archive.writestr('staging.json', body, compress_type=zipfile.ZIP_DEFLATED)
archive.close()
# Send back to S3
print(f"...Uploading to S3 {source_bucket}/{source_key}...")
s3.upload_file(new_archive_path, source_bucket, source_key)
def post_notification_status(data):
print('...Posting build status notification...')
topic_arn = os.environ['CI_STATUS_TOPIC_ARN']
message = json.dumps({ 'callback': data })
sns.publish(TopicArn=topic_arn, Message=message)
def lambda_handler(event, context):
callback_object = json.loads(event['Records'][0]['Sns']['Message'])
update_github_status(callback_object)
update_staging_config_status(callback_object)
post_notification_status(callback_object)
|
Python
| 0.021504
|
@@ -1009,17 +1009,16 @@
https://
-$
%7B0%7D.cons
@@ -1063,9 +1063,8 @@
lds/
-$
%7B1%7D/
|
64df3095505b5651626536e08810edfb282b888b
|
Fix introduced bug in creating the list of bases
|
kdotp_symmetry/_symmetric_hamiltonian.py
|
kdotp_symmetry/_symmetric_hamiltonian.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Dominik Gresch <greschd@gmx.ch>
from collections import namedtuple
import sympy as sp
from sympy.physics.quantum import TensorProduct
import numpy as np
import scipy.linalg as la
from fsc.export import export
from ._expr_utils import expr_to_vector, monomial_basis, matrix_to_expr_operator
from ._repr_utils import hermitian_to_vector, hermitian_basis, repr_to_matrix_operator
from ._linalg import intersection_basis
from ._to_matrix import to_matrix
SymmetryOperation = namedtuple('SymmetryOperation', ['kmatrix', 'repr'])
SymmetryOperation.__doc__ = 'Describes a symmetry operation.'
SymmetryOperation.kmatrix.__doc__ = r'The :math:`\mathbf{k}`-space matrix corresponding to the symmetry operation.'
SymmetryOperation.repr.__doc__ = 'The :class:`.Representation` instance corresponding to the symmetry operation.'
Representation = namedtuple('Representation', ['matrix', 'complex_conjugate'])
Representation.__doc__ = 'Describes an (anti-)unitary representation of a symmetry operation.'
Representation.matrix.__doc__ = 'The unitary matrix corresponding to the representation.'
Representation.complex_conjugate.__doc__ = r'Flag to specify whether the representation is just a unitary matrix :math:`D(g)=U` (``False``) or contains a complex conjugation :math:`D(g)=U\hat{K}` (``True``).'
__all__ = ['Representation', 'SymmetryOperation']
@export
def symmetric_hamiltonian(*symmetry_operations, expr_basis, repr_basis='auto'):
"""
Calculates the basis of the symmetric Hamiltonian for a given set of symmetry operations.
:param symmetry_operations: The symmetry operations that the Hamiltonian should respect.
:type symmetry_operations: SymmetryOperation
:param expr_basis: The basis for the :math:`\mathbf{k}`-functions that are considered.
:type expr_basis: :py:class:`list` of :py:mod:`sympy` expressions
:param repr_basis: The basis for the hermitian matrices, with the same size as the representations. By default, the :py:func:`.hermitian_basis` of the appropriate size is used.
:type repr_basis: :py:class:`list` of :py:mod:`sympy` matrices
:returns: Basis for the symmetric Hamiltonian, as a :py:class:`list` of :py:mod:`sympy` matrix expressions.
"""
expr_dim = len(expr_basis)
# for sympy or numpy matrices
try:
repr_matrix_size = symmetry_operations[0].repr.matrix.shape[0]
# for plain lists -- this doesn't work for sympy matrices because
# their 'len' is the total number of elements
except AttributeError:
repr_matrix_size = len(symmetry_operations[0].repr.matrix)
if repr_basis == 'auto':
repr_basis = hermitian_basis(repr_matrix_size)
repr_dim = len(repr_basis)
full_dim = expr_dim * repr_dim
full_basis = [
sp.Matrix(x) for x in
np.outer(expr_basis, repr_basis).reshape(full_dim, repr_matrix_size, repr_matrix_size).tolist()
]
invariant_bases = []
for op in symmetry_operations:
# create the matrix form of the two operators
expr_mat = to_matrix(
operator=matrix_to_expr_operator(op.kmatrix),
basis=expr_basis,
to_vector_fct=expr_to_vector
)
repr_mat = to_matrix(
operator=repr_to_matrix_operator(*op.repr),
basis=repr_basis,
to_vector_fct=hermitian_to_vector
)
# outer product
full_mat = TensorProduct(expr_mat, repr_mat)
# get Eig(F \ocross G, 1) basis
mat = full_mat - sp.eye(full_dim)
curr_basis = mat.nullspace(simplify=sp.nsimplify)
assert len(curr_basis) == _numeric_nullspace_dim(mat)
invariant_bases.append(curr_basis)
basis_vectors = intersection_basis(*invariant_bases)
basis_vectors_expanded = []
for vec in basis_vectors:
basis_vectors_expanded.append(
sum((v * b for v, b in zip(vec, full_basis)), sp.zeros(repr_matrix_size))
)
return basis_vectors_expanded
def _numeric_nullspace_dim(mat):
mat_numeric = np.array(mat.evalf().tolist(), dtype=complex)
eigenvals = la.eigvals(mat_numeric)
return np.sum(np.isclose(eigenvals, np.zeros_like(eigenvals)))
|
Python
| 0
|
@@ -3568,16 +3568,25 @@
basis =
+np.array(
mat.null
@@ -3613,16 +3613,26 @@
implify)
+).tolist()
%0A
|
4334610f0e25647e81d7769b664ac58e3c754093
|
Remove some unneccessary packages from windows installer
|
buildmsi.py
|
buildmsi.py
|
#!/usr/bin/env python2.7
""" Build Windows MSI distributions.
Requirements:
- A 'windeps' folder with all of the *exe installers listed in
BINARY_PACKAGES, available at http://www.lfd.uci.edu/~gohlke/pythonlibs/
in both 32 and 64 bit.
- 'pywin32' installers for 32 and 64 bit in 'windeps' folder, , available
at http://sourceforge.net/projects/pywin32/files/pywin32/
- spreads and all of its dependencies installed in the present Python
environment (*not* with pip's '-e' flag!)
- 'pynsist' package must be installed in the present Python environment,
currently (2014/04/11) from GitHub master, not from PyPi.
- 'nsis' executable must be on $PATH ('apt-get install nsis' on Debian
systems)
Run:
$ python buildmsi.py
When complete, MSIs can be found under 'build/msi{32,64}/spreads_{version}.exe'
"""
import os
import shutil
import sys
import tempfile
import zipfile
from collections import namedtuple
import pkg_resources
from nsist import InstallerBuilder
from spreads.vendor.pathlib import Path
import spreads
class SourceDep(namedtuple('SourceDep', ['project_name', 'module_name'])):
def __new__(cls, project_name, module_name=None):
if module_name is None:
module_name = project_name
return super(SourceDep, cls).__new__(cls, project_name, module_name)
BINARY_PACKAGES = {
"MarkupSafe": "MarkupSafe-0.19.{arch}-py2.7.exe",
"psutil": "psutil-2.1.0.{arch}-py2.7.exe",
"PyYAML": "PyYAML-3.11.{arch}-py2.7.exe",
"tornado": "tornado-3.2.{arch}-py2.7.exe",
"setuptools": "setuptools-3.4.1.{arch}-py2.7.exe"
}
SOURCE_PACKAGES = [
SourceDep("spreads"),
SourceDep(None, "spreadsplug"),
SourceDep("Flask", "flask"),
SourceDep("Jinja2", "jinja2"),
SourceDep("Werkzeug", "werkzeug"),
SourceDep("backports.ssl-match-hostname", "backports"),
SourceDep("blinker"),
SourceDep("colorama"),
SourceDep("futures", "concurrent"),
SourceDep("itsdangerous"),
SourceDep("pyusb", "usb"),
SourceDep("requests"),
SourceDep("waitress"),
SourceDep("zipstream"),
SourceDep("roman"),
SourceDep("Wand", "wand"),
SourceDep("isbnlib"),
]
EXTRA_FILES = [
"ImageMagick-6.5.6-8-Q8-windows-dll.exe",
"pyexiv2-0.3.2{arch}.exe",
"pywin32-2.7.6{arch}.exe",
"scantailor-enhanced-20140214-32bit-install.exe",
"tesseract-ocr-setup-3.02.02.exe",
"chdkptp",
"pdfbeads.exe",
"jbig2.exe",
]
def extract_native_pkg(fname, pkg_dir):
zf = zipfile.ZipFile(unicode(Path('win_deps')/'python'/fname))
tmpdir = Path(tempfile.mkdtemp())
zf.extractall(unicode(tmpdir))
fpaths = []
if (tmpdir/'PLATLIB').exists():
fpaths += [p for p in (tmpdir/'PLATLIB').iterdir()]
if (tmpdir/'PURELIB').exists():
fpaths += [p for p in (tmpdir/'PURELIB').iterdir()]
for path in fpaths:
if path.is_dir():
shutil.copytree(unicode(path), unicode(pkg_dir/path.name))
else:
shutil.copy2(unicode(path), unicode(pkg_dir/path.name))
shutil.rmtree(unicode(tmpdir))
def copy_info(pkg, pkg_dir):
try:
dist = pkg_resources.get_distribution(pkg)
except pkg_resources.DistributionNotFound:
raise IOError("No distribution could be found for {0}!".format(pkg))
if dist.location == os.getcwd():
egg_name = dist.project_name
else:
egg_name = dist.egg_name()
egg_path = Path(dist.location)/(egg_name + ".egg-info")
dist_path = Path(dist.location)/(dist.project_name + "-" + dist.version
+ ".dist-info")
if egg_path.exists():
src_path = egg_path
elif dist_path.exists():
src_path = dist_path
else:
raise IOError("No egg-info or dist-info could be found for {0}!"
.format(pkg))
if src_path.is_dir():
shutil.copytree(unicode(src_path), unicode(pkg_dir/src_path.name))
else:
shutil.copy2(unicode(src_path), unicode(pkg_dir/src_path.name))
def build_msi(bitness=32):
egg_path = Path('spreads.egg-info')
if egg_path.exists():
shutil.rmtree(unicode(egg_path))
build_path = Path('build')
if not build_path.exists():
build_path.mkdir()
pkg_dir = build_path/'pynsist_pkgs'
if pkg_dir.exists():
shutil.rmtree(unicode(pkg_dir))
pkg_dir.mkdir()
for pkg in BINARY_PACKAGES.itervalues():
arch = 'win32' if bitness == 32 else 'win-amd64'
extract_native_pkg(pkg.format(arch=arch), pkg_dir)
for pkg in (x.project_name for x in SOURCE_PACKAGES
if x.project_name is not None):
copy_info(pkg, pkg_dir)
icon = os.path.abspath("spreads.ico")
extra_files = [(unicode((Path('win_deps')/'extra'/
x.format(arch='.amd64' if bitness == 64 else ''))
.absolute()), None) for x in EXTRA_FILES]
nsi_template = os.path.abspath("template.nsi")
# NOTE: We need to remove the working directory from sys.path to force
# pynsist to copy all of our modules, including 'spreads' and 'spreadsplug'
# from the site-packages. Additionally, we need to change into the
# build directory.
if os.getcwd() in sys.path:
sys.path.remove(os.getcwd())
os.chdir(unicode(build_path))
builder = InstallerBuilder(
appname="spreads",
version=spreads.__version__,
packages=[x.module_name for x in SOURCE_PACKAGES],
extra_files=extra_files,
py_version="2.7.6",
py_bitness=bitness,
build_dir='msi{0}'.format(bitness),
installer_name=None,
nsi_template=nsi_template,
icon=icon,
shortcuts={
'Configure spreads': {
'entry_point': 'spreads.main:run_config_windows',
'icon': icon,
'console': False},
'Spreads Web Service': {
'entry_point': 'spreads.main:run_service_windows',
'icon': icon,
'console': False}
}
)
builder.run()
os.chdir('..')
|
Python
| 0.000001
|
@@ -2001,39 +2001,8 @@
%22),%0A
- SourceDep(%22pyusb%22, %22usb%22),%0A
@@ -2384,23 +2384,8 @@
e%22,%0A
- %22chdkptp%22,%0A
|
22f79d326a8c0cd6763a0a7262df51d9ef213f18
|
Fix bug where required parameters were not correctly identified in Python2.7.
|
src/azure/cli/commands/_auto_command.py
|
src/azure/cli/commands/_auto_command.py
|
from __future__ import print_function
import inspect
import sys
from msrest.paging import Paged
from msrest.exceptions import ClientException
from azure.cli.parser import IncorrectUsageError
from ..commands import COMMON_PARAMETERS
EXCLUDED_PARAMS = frozenset(['self', 'raw', 'custom_headers', 'operation_config'])
class AutoCommandDefinition(object): #pylint: disable=too-few-public-methods
def __init__(self, operation, return_type, command_alias=None):
self.operation = operation
self.return_type = return_type
self.opname = command_alias if command_alias else operation.__name__.replace('_', '-')
def _decorate_option(command_table, func, name, **kwargs):
return command_table.option(name, kwargs=kwargs['kwargs'])(func)
def _get_member(obj, path):
"""Recursively walk down the dot-separated path
to get child item.
Ex. a.b.c would get the property 'c' of property 'b' of the
object a
"""
path = path or ''
for segment in path.split('.'):
try:
obj = getattr(obj, segment)
except AttributeError:
pass
return obj
def _make_func(client_factory, member_path, return_type_or_func, unbound_func):
def call_client(args):
client = client_factory(args)
ops_instance = _get_member(client, member_path)
# TODO: Remove this conversion code once internal key references are updated (#116797761)
converted_params = {}
for key in args.keys():
converted_key = key.replace('-', '_')
converted_params[converted_key] = args[key]
try:
result = unbound_func(ops_instance, **converted_params)
if not return_type_or_func:
return {}
if callable(return_type_or_func):
return return_type_or_func(result)
if isinstance(return_type_or_func, str):
return list(result) if isinstance(result, Paged) else result
except TypeError as exception:
# TODO: Evaluate required/missing parameters and provide specific
# usage for missing params...
raise IncorrectUsageError(exception)
except ClientException as client_exception:
# TODO: Better error handling for cloud exceptions...
message = getattr(client_exception, 'message', client_exception)
print(message, file=sys.stderr)
return call_client
def _option_description(operation, arg):
"""Pull out parameter help from doccomments of the command
"""
# TODO: We are currently doing this for every option/argument.
# We should do it (at most) once for a given command...
return ' '.join(l.split(':')[-1] for l in inspect.getdoc(operation).splitlines()
if l.startswith(':param') and arg + ':' in l)
#pylint: disable=too-many-arguments
def build_operation(command_name,
member_path,
client_type,
operations,
command_table,
common_parameters=None,
extra_parameters=None):
merged_common_parameters = COMMON_PARAMETERS.copy()
merged_common_parameters.update(common_parameters or {})
extra_parameters = extra_parameters or {}
for op in operations:
func = _make_func(client_type, member_path, op.return_type, op.operation)
args = []
try:
# only supported in python3 - falling back to argspec if not available
sig = inspect.signature(op.operation)
args = sig.parameters
except AttributeError:
sig = inspect.getargspec(op.operation) #pylint: disable=deprecated-method
args = sig.args
options = []
for arg in [a for a in args if not a in EXCLUDED_PARAMS]:
try:
# this works in python3
default = args[arg].default
required = default == inspect.Parameter.empty # pylint: disable=no-member
except TypeError:
arg_defaults = dict(zip(sig.args[-len(sig.defaults):], sig.defaults))
default = arg_defaults[arg] if arg in arg_defaults else None
required = False if default else True
action = 'store_' + str(not default).lower() if isinstance(default, bool) else None
common_param = merged_common_parameters.get(arg, {
'name': '--' + arg.replace('_', '-'),
'required': required,
'default': default,
'help': _option_description(op.operation, arg),
'action': action
}).copy() # We need to make a copy to allow consumers to mutate the value
# retrieved from the common parameters without polluting future
# use...
common_param['dest'] = common_param.get('dest', arg)
options.append(common_param)
# append any 'extra' args needed (for example to obtain a client) that aren't required
# by the SDK.
for arg in extra_parameters.values():
options.append(arg.copy())
command_table[func] = {
'name': ' '.join([command_name, op.opname]),
'handler': func,
'arguments': options
}
|
Python
| 0
|
@@ -4241,34 +4241,31 @@
d =
-False if default else True
+arg not in arg_defaults
%0A%0A
|
8659ffef5dab72bb81764ecc286f4324e3c5b03b
|
Use Issue class for dh_groups analysis
|
analysis.py
|
analysis.py
|
import datetime
import enum
import math
import os
import subprocess
import tempfile
from algorithms import *
from sshmessage import *
def score(issues):
max_score = {}
for issue in issues:
if issue.what in max_score:
max_score[issue.what] = max(max_score[issue.what], issue.severity.value)
else:
max_score[issue.what] = issue.severity.value
return sum(max_score.values())
class ModuliFile(object):
def __init__(self, moduli_file):
self.__file = moduli_file
def read(self):
line = self.__file.readline()
groups = []
while line:
parts = line.split(" ")
groups.append(DHGEXGroup(generator=int(parts[5], 16), prime=int(parts[6], 16)))
line = self.__file.readline()
return groups
def write(self, group):
try:
iter(group)
for g in group:
self.write(g)
except TypeError as ex:
print(
datetime.datetime.now().strftime("%Y%m%d%H%M%S"),
"0", # type
"0", # tests
"0", # trials
str(math.floor(math.log(group.prime, 2))),
hex(group.generator)[2:],
hex(group.prime)[2:],
file=self.__file
)
def analyze_kex_init(kex_init):
issues = []
issues += analyze_kex_algorithms(kex_init)
issues += analyze_host_key_algorithms(kex_init)
downgrade_resistant = is_downgrade_resistant(issues)
issues += analyze_authenticated_encryption(
kex_init.encryption_algorithms_c2s,
kex_init.mac_algorithms_c2s,
downgrade_resistant
)
if not is_symmetric(kex_init):
issues += analyze_authenticated_encryption(
kex_init.encryption_algorithms_s2c,
kex_init.mac_algorithms_s2c,
downgrade_resistant
)
return issues
def is_downgrade_resistant(issues):
for issue in issues:
if issue.severity >= Severity.warning and issue.what in [ "weak key exchange hash", "small DH group" ]:
return False
return True
def is_symmetric(kex_init):
if kex_init.encryption_algorithms_c2s != kex_init.encryption_algorithms_s2c:
return False
if kex_init.mac_algorithms_c2s != kex_init.mac_algorithms_s2c:
return False
return True
def analyze_authenticated_encryption(encryption_algorithms, mac_algorithms, best_case):
choices = []
worst = []
for cipher_algo in encryption_algorithms:
if cipher_algo not in known_ciphers:
choices.append(authenticated_encryption_issues(None, None, Issue(Severity.info, "unknown cipher", encr_algo)))
continue
cipher = known_ciphers[cipher_algo]
if cipher.mode == CipherMode.AEAD:
choices.append(authenticated_encryption_issues(cipher, None))
continue
for mac_algo in mac_algorithms:
if mac_algo not in known_macs:
choices.append(authenticated_encryption_issues(cipher, None, Issue(Severity.info, "unknown MAC", mac_algo)))
continue
mac = known_macs[mac_algo]
choices.append(authenticated_encryption_issues(cipher, mac))
for choice in choices:
if best_case: return choice
if score(worst) < score(choice):
worst = choice
return worst
def authenticated_encryption_issues(cipher, mac, *unknowns):
issues = []
if cipher:
issues += cipher.issues
if mac:
issues += mac.issues
if cipher and mac and cipher.mode == CipherMode.CBC and mac.mode == MACMode.EAM:
issues.append(Issue(Severity.warning, "CBC-and-MAC"))
issues += unknowns
return issues
def analyze_kex_algorithms(kex_init):
issues = []
for algo in kex_init.kex_algorithms:
issues += known_kex_algorithms.get(
algo,
[ Issue(Severity.info, "unknown key exchange algorithm", algo) ]
)
return issues
def analyze_host_key_algorithms(kex_init):
return []
def analyze_dh_groups(dh_groups):
issues = []
for group in dh_groups:
size = math.ceil(math.log(group.prime, 2))
if size <= 2**10:
issues.append(Severity.error, "small DH group", str(size) + " bits", group)
elif size <= 2**10 + 2**9:
issues.append(Severity.warning, "small DH group", str(size) + " bits", group)
( input_fd, input_name ) = tempfile.mkstemp()
with open(input_fd, "w") as input_file:
moduli = ModuliFile(input_file)
moduli.write(dh_groups)
( output_fd, output_name ) = tempfile.mkstemp()
safe_groups = []
with open(output_fd, "r") as output_file:
subprocess.check_output([ "ssh-keygen", "-T", output_name, "-f", input_name ])
moduli = ModuliFile(output_file)
safe_groups = moduli.read()
os.unlink(input_name)
os.unlink(output_name)
if not dh_groups.issubset(safe_groups):
for unsafe_group in dh_groups.difference(safe_groups):
issues.append(Issue(Severity.critical, "unsafe DH group", unsafe_group))
return issues
|
Python
| 0
|
@@ -4319,32 +4319,38 @@
issues.append(
+Issue(
Severity.error,
@@ -4394,16 +4394,17 @@
, group)
+)
%0A
@@ -4457,16 +4457,22 @@
.append(
+Issue(
Severity
@@ -4522,24 +4522,25 @@
its%22, group)
+)
%0A%0A ( inpu
|
425b1ac925ecc2aeae8dff358b1054c9824fa4f0
|
Reorganize code in analyzer.py
|
analyzer.py
|
analyzer.py
|
# push-analyzer, A script for analyzing git pushes
# Copyright (c) 2014 firecoders
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import utils
from settings import args
results = utils.Signal ()
def analyze_ref_change ( sha_pre, sha_post, ref_name ):
changes = []
changes_overview = []
removals = utils.get_sha_range ( sha_post + b'..' + sha_pre )
additions = utils.get_sha_range ( sha_pre + b'..' + sha_post )
moves = []
for added_sha in additions:
added_diff = utils.get_diff ( added_sha )
for removed_sha in removals:
if added_diff == utils.get_diff ( removed_sha ):
moves.append ( { 'from' : removed_sha.decode (), 'to' : added_sha.decode () } )
for sha in removals:
moved = any ( move [ 'from' ] == sha.decode () for move in moves )
if not moved:
changes.append ( { 'type' : 'remove', 'sha' : sha.decode () } )
for sha in additions:
move = next ( ( m for m in moves if m [ 'to' ] == sha.decode () ), None )
if move:
changes.append ( { 'type' : 'move', 'from' : move [ 'from' ], 'to' : move [ 'to' ] } )
else:
changes.append ( { 'type' : 'add', 'sha' : sha.decode () } )
if utils.get_diff ( sha_pre, sha_post ).decode () == '':
changes_overview.append ( 'same overall diff' )
for change in changes:
if change [ 'type' ] not in changes_overview:
changes_overview.append ( change [ 'type' ] )
if len ( removals ) > 0:
type_field = 'forced update'
else:
type_field = 'update'
results ( {
'type' : type_field, 'changes' : changes_overview,
'name' : ref_name.decode (),
'from' : sha_pre.decode (), 'to' : sha_post.decode ()
} )
for change in changes:
results ( change )
def analyze_push ( refs_pre, refs_post ):
for key in refs_pre.keys ():
if key not in refs_post:
results ( { 'type' : 'remove branch', 'name' : key.decode () } )
for key in refs_post.keys ():
if key not in refs_pre:
results ( { 'type' : 'create branch', 'name' : key.decode () } )
sha_post = refs_post [ key ]
sha_pre = utils.get_best_ancestor ( refs_pre.values (), sha_post )
if sha_post != sha_pre:
analyze_ref_change ( sha_pre, sha_post, key )
else: # key in refs_pre
sha_pre = refs_pre [ key ]
sha_post = refs_post [ key ]
if sha_pre != sha_post:
analyze_ref_change ( sha_pre, sha_post, key )
|
Python
| 0.000001
|
@@ -1261,52 +1261,8 @@
):%0A
- changes = %5B%5D%0A changes_overview = %5B%5D%0A%0A
@@ -1675,32 +1675,50 @@
.decode () %7D )%0A%0A
+ changes = %5B%5D%0A%0A
for sha in r
@@ -2220,120 +2220,29 @@
-if utils.get_diff ( sha_pre, sha_post ).decode () == '':%0A changes_overview.append ( 'same overall diff' )
+changes_overview = %5B%5D
%0A%0A
@@ -2375,24 +2375,142 @@
'type' %5D )%0A%0A
+ if utils.get_diff ( sha_pre, sha_post ).decode () == '':%0A changes_overview.append ( 'same overall diff' )%0A%0A
if len (
|
0205db84f05c0914abe4b6e14d824de2777f60cd
|
Monitor unreleased version count.
|
allmychanges/management/commands/send_stats.py
|
allmychanges/management/commands/send_stats.py
|
import os
import datetime
from pprint import pprint
from django.core.management.base import BaseCommand
from twiggy_goodies.django import LogMixin
from django.conf import settings
from allmychanges.utils import graphite_send
from allmychanges.models import Package, Changelog, Version, User
from django.utils import timezone
from django.db.models import Count
def get_stats_from_file():
"""Gets stats dumped to file '.stats'.
It has simple format like 'name value' on each line
Usually .stats file used to store such data as number of
unittests passed/failed, code metrics and parser quality
metrics.
"""
filename = os.path.join(settings.PROJECT_ROOT, '.stats')
if os.path.exists(filename):
with open(filename) as f:
lines = f.readlines()
stats = [line.split(None, 1) for line in lines]
stats = {name: float(value) for name, value in stats}
return stats
return {}
def get_stats():
stats = get_stats_from_file()
from rq.scripts.rqinfo import (setup_default_arguments,
parse_args,
setup_redis, Queue)
args = parse_args()
setup_default_arguments(args, {})
setup_redis(args)
for queue in Queue.all():
stats['queue.{0}.jobs'.format(queue.name)] = queue.count
package_counts = list(User.objects.annotate(Count('packages')))
zero_packages = [user
for user in package_counts
if user.packages__count == 0]
others = [user.packages__count
for user in package_counts
if user.packages__count > 0]
stats['db.peruser-package-count.zero'] = len(zero_packages)
stats['db.peruser-package-count.min'] = min(others)
stats['db.peruser-package-count.max'] = max(others)
stats['db.peruser-package-count.avg'] = sum(others) / len(others)
stats['db.packages'] = Package.objects.count()
stats['db.changelogs'] = Changelog.objects.count()
stats['db.users'] = User.objects.count()
stats['db.versions.v1-vcs'] = Version.objects.filter(code_version='v1', changelog__filename=None).count()
stats['db.versions.v1'] = Version.objects.filter(code_version='v1').exclude(changelog__filename=None).count()
stats['db.versions.v2'] = Version.objects.filter(code_version='v2').count()
now = timezone.now()
minute_ago = now - datetime.timedelta(0, 60)
stats['crawler.discovered.v1.count'] = Version.objects.filter(
code_version='v1',
discovered_at__gte=minute_ago).count()
stats['crawler.discovered.v2.count'] = Version.objects.filter(
code_version='v2',
discovered_at__gte=minute_ago).count()
return stats
class Command(LogMixin, BaseCommand):
help = u"""Send stats to graphite Graphite."""
def handle(self, *args, **options):
stats = get_stats()
if args and args[0] == 'dry':
pprint(stats)
else:
graphite_send(**stats)
|
Python
| 0
|
@@ -2367,24 +2367,279 @@
v2').count()
+%0A %0A stats%5B'db.versions.v1-unreleased'%5D = Version.objects.filter(code_version='v1', unreleased=True).exclude(changelog__filename=None).count()%0A stats%5B'db.versions.v2-unreleased'%5D = Version.objects.filter(code_version='v2', unreleased=True).count()
%0A%0A now =
|
8ab04ee3710a248cdc84c85a76797a298212f4f3
|
Use click.echo instead of print (#117)
|
alertaclient/commands/cmd_query.py
|
alertaclient/commands/cmd_query.py
|
import json
import click
from tabulate import tabulate
from alertaclient.models.alert import Alert
from alertaclient.utils import build_query, DateTime
COLOR_MAP = {
'critical': {'fg': 'red'},
'major': {'fg': 'magenta'},
'minor': {'fg': 'yellow'},
'warning': {'fg': 'blue'},
'normal': {'fg': 'green'},
'indeterminate': {'fg': 'cyan'},
}
@click.command('query', short_help='Search for alerts')
@click.option('--ids', '-i', metavar='UUID', multiple=True, help='List of alert IDs (can use short 8-char id)')
@click.option('--filter', '-f', 'filters', metavar='FILTER', multiple=True, help='KEY=VALUE eg. serverity=warning resource=web')
@click.option('--tabular', 'display', flag_value='tabular', default=True, help='Tabular output')
@click.option('--compact', 'display', flag_value='compact', help='Compact output')
@click.option('--details', 'display', flag_value='details', help='Compact output with details')
@click.pass_obj
def cli(obj, ids, filters, display, from_date=None):
"""Query for alerts based on search filter criteria."""
client = obj['client']
timezone = obj['timezone']
if ids:
query = [('id', x) for x in ids]
else:
query = build_query(filters)
if from_date:
query.append(('from-date', from_date))
r = client.http.get('/alerts', query)
if obj['output'] == 'json':
print(json.dumps(r['alerts']))
else:
alerts = [Alert.parse(a) for a in r['alerts']]
last_time = r['lastTime']
auto_refresh = r['autoRefresh']
if display == 'tabular':
headers = {'id': 'ID', 'lastReceiveTime': 'LAST RECEIVED', 'severity': 'SEVERITY', 'duplicateCount': 'DUPL',
'customer': 'CUSTOMER', 'environment': 'ENVIRONMENT', 'service': 'SERVICE', 'resource': 'RESOURCE',
'group': 'GROUP', 'event': 'EVENT', 'value': 'VALUE'}
click.echo(tabulate([a.tabular('summary', timezone) for a in alerts], headers=headers, tablefmt=obj['output']))
elif display in ['compact', 'details']:
for alert in reversed(alerts):
color = COLOR_MAP.get(alert.severity, {'fg': 'white'})
click.secho('{0}|{1}|{2}|{3:5d}|{4}|{5:<5s}|{6:<10s}|{7:<18s}|{8:12s}|{9:16s}|{10:12s}'.format(
alert.id[0:8],
DateTime.localtime(alert.last_receive_time, timezone),
alert.severity,
alert.duplicate_count,
alert.customer or "-",
alert.environment,
','.join(alert.service),
alert.resource,
alert.group,
alert.event,
alert.value or "n/a"), fg=color['fg'])
click.secho(' |{}'.format(alert.text), fg=color['fg'])
if display == 'details':
click.secho(' severity | {} -> {}'.format(alert.previous_severity, alert.severity), fg=color['fg'])
click.secho(' trend | {}'.format(alert.trend_indication), fg=color['fg'])
click.secho(' status | {}'.format(alert.status), fg=color['fg'])
click.secho(' resource | {}'.format(alert.resource), fg=color['fg'])
click.secho(' group | {}'.format(alert.group), fg=color['fg'])
click.secho(' event | {}'.format(alert.event), fg=color['fg'])
click.secho(' value | {}'.format(alert.value), fg=color['fg'])
click.secho(' tags | {}'.format(' '.join(alert.tags)), fg=color['fg'])
for key, value in alert.attributes.items():
click.secho(' {} | {}'.format(key.ljust(10), value), fg=color['fg'])
latency = alert.receive_time - alert.create_time
click.secho(' time created | {}'.format(DateTime.localtime(alert.create_time, timezone)), fg=color['fg'])
click.secho(' time received | {}'.format(DateTime.localtime(alert.receive_time, timezone)), fg=color['fg'])
click.secho(' last received | {}'.format(DateTime.localtime(alert.last_receive_time, timezone)), fg=color['fg'])
click.secho(' latency | {}ms'.format((latency.microseconds / 1000)), fg=color['fg'])
click.secho(' timeout | {}s'.format(alert.timeout), fg=color['fg'])
click.secho(' alert id | {}'.format(alert.id), fg=color['fg'])
click.secho(' last recv id | {}'.format(alert.last_receive_id), fg=color['fg'])
click.secho(' customer | {}'.format(alert.customer), fg=color['fg'])
click.secho(' environment | {}'.format(alert.environment), fg=color['fg'])
click.secho(' service | {}'.format(','.join(alert.service)), fg=color['fg'])
click.secho(' resource | {}'.format(alert.resource), fg=color['fg'])
click.secho(' type | {}'.format(alert.event_type), fg=color['fg'])
click.secho(' repeat | {}'.format(alert.repeat), fg=color['fg'])
click.secho(' origin | {}'.format(alert.origin), fg=color['fg'])
click.secho(' correlate | {}'.format(','.join(alert.correlate)), fg=color['fg'])
return auto_refresh, last_time
|
Python
| 0
|
@@ -1372,13 +1372,18 @@
-print
+click.echo
(jso
@@ -1401,16 +1401,62 @@
alerts'%5D
+, sort_keys=True, indent=4, ensure_ascii=False
))%0A e
|
c2f2a294b3b513e83a2ea5bca09a13c255c51c88
|
fix case when job has not lastBuild property
|
analyzer.py
|
analyzer.py
|
#!/usr/bin/python
import pprint
import re
import argparse
from datetime import datetime
from statsd import StatsClient
from utils import failureReasons, JenkinsClient
def is_build_failed(job):
if job['lastBuild']:
if job['lastBuild']['result'] == 'FAILURE':
return True
return False
def was_built_in_last_24h(job):
if job['lastBuild']:
build_date_time = datetime.utcfromtimestamp(job['lastBuild']['timestamp'] / 1e3) # to proper timestamp
time_diff_in_hours = (datetime.now() - build_date_time).total_seconds() / 60 / 60 # seconds to hours
if time_diff_in_hours < 24:
return True
return False
def filter_jobs(all_jobs):
failed_jobs = []
for job in all_jobs:
if is_build_failed(job) and was_built_in_last_24h(job):
failed_jobs.append(job)
return failed_jobs
def find_failure_reason(console_output):
for reason in failureReasons.possible_reasons:
for regex in reason['regex']:
match = re.search(regex, console_output)
if match:
return reason['name']
return failureReasons.unknown_reason['name']
def update_results(results, reason, job):
for entry in results:
if entry['name'] == reason:
entry['count'] += 1
entry['job'].append(
{
'job name': job['name'],
'build url': '{job_url}{build_number}/console'.format(job_url=job['url'],
build_number=job['lastBuild']['number'])
}
)
break
return results
def analyze_jobs(filtered_jobs, jenkins_server):
results = failureReasons.possible_reasons
results.append(failureReasons.unknown_reason)
for entry in results:
entry['count'] = 0
entry['job'] = []
counter = 0
for job in filtered_jobs:
counter += 1
print "Analyzing job {id} / {all}".format(id=counter, all=len(filtered_jobs))
console_output = jenkins_server.get_job_console_output(job)
failure_reason = find_failure_reason(console_output)
results = update_results(results, failure_reason, job)
return results
def print_results(results):
print '\n Full results:\n'
pp = pprint.PrettyPrinter()
pp.pprint(results)
print '\n\n\n Quick summary:\n'
for entry in results:
print '{reason} : {count}'.format(reason=entry['name'], count=entry['count'])
def report_to_graphite(host, port, prefix, results):
statsd = StatsClient(host=host, port=port, prefix=prefix, maxudpsize=512)
for entry in results:
statsd.gauge(entry['graphite key'], entry['count'])
def create_arg_parser():
parser = argparse.ArgumentParser(description='Analyze jenkins failures and report them to graphite server')
parser.add_argument('jenkins_host')
parser.add_argument('jenkins_user')
parser.add_argument('jenkins_pass')
parser.add_argument('statsd_host')
parser.add_argument('statsd_port')
parser.add_argument('graphite_key')
return parser
def main():
parser = create_arg_parser()
args = parser.parse_args()
jenkins_server = JenkinsClient.JenkinsClient(args.jenkins_host, args.jenkins_user, args.jenkins_pass)
all_jobs = jenkins_server.get_all_jobs()
filtered_jobs = filter_jobs(all_jobs)
results = analyze_jobs(filtered_jobs, jenkins_server)
report_to_graphite(args.statsd_host, args.statsd_port, args.graphite_key, results)
print_results(results)
if __name__ == '__main__':
main()
|
Python
| 0.000008
|
@@ -188,32 +188,100 @@
ed(job):%0A if
+'lastBuild' in job and job%5B'lastBuild'%5D is not None and 'result' in
job%5B'lastBuild'%5D
@@ -420,28 +420,24 @@
ob):%0A if
-job%5B
'lastBuild'%5D
@@ -427,33 +427,39 @@
if 'lastBuild'
-%5D
+ in job
:%0A build_
|
88cc2242ccd91d7574dab0f687c3a0c755a9a4aa
|
convert stock prices from strings to floats before saving/returning
|
analyzer.py
|
analyzer.py
|
import json, urllib2
from neuralNetwork import NN
def getHistoricalData(stockSymbol):
historicalPrices = []
# login to API
urllib2.urlopen("http://api.kibot.com/?action=login&user=guest&password=guest")
# get 10 days of data from API (business days only, could be < 10)
url = "http://api.kibot.com/?action=history&symbol=" + stockSymbol + "&interval=daily&period=10&unadjusted=1®ularsession=1"
apiData = urllib2.urlopen(url).read().split("\n")
# get price for each day returned from API
for line in apiData:
if(len(line) > 0):
tempLine = line.split(',')
historicalPrices.append(tempLine[1])
return historicalPrices
def analyzeSymbol(stockSymbol):
historicalPrices = getHistoricalData(stockSymbol)
network = NN(ni = 2, nh = 2, no = 1)
# train neural network with historical prices
# return prediction
return True
getHistoricalData("GOOG")
|
Python
| 0.002193
|
@@ -647,16 +647,22 @@
.append(
+float(
tempLine
@@ -665,16 +665,17 @@
Line%5B1%5D)
+)
%0A%0A re
@@ -917,16 +917,22 @@
n True%0A%0A
+print
getHisto
|
d629cf3dd99f02fe6f8627386b623ee2ef6da092
|
update exception message when the index ayas count is not exact
|
src/alfanous/Indexing.py
|
src/alfanous/Indexing.py
|
# coding: utf-8
## Copyright (C) 2009-2012 Assem Chelli <assem.ch [at] gmail.com>
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU Affero General Public License as published
## by the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU Affero General Public License for more details.
## You should have received a copy of the GNU Affero General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
@author: Assem Chelli
@contact: assem.ch [at] gmail.com
@license: AGPL
'''
from alfanous.Exceptions import Ta7rif
from alfanous.Support.whoosh.filedb.filestore import FileStorage
from alfanous.Support.whoosh import index
class BasicDocIndex:
"""all props of Document Index"""
OK = False
def __init__( self, ixpath ):
self._ixpath = ixpath
self._ix, self.OK = self.load()
self.verify()
def load( self ):
"""
Load the Index from the path ixpath
return self.OK = True if success
"""
ix, ok = None, False
if index.exists_in( self._ixpath ):
storage = FileStorage( self._ixpath )
ix = storage.open_index()
ok = True
return ix, ok
def verify( self ):
"""
verify the data of index after loading
"""
pass
def __str__( self ):
return "<alfanous.Indexing.BasicDocIndex '" \
+ self._ixpath + "'" \
+ str( self._ix.doc_count() ) + ">"
def get_index( self ):
"""return index"""
return self._ix
def get_schema( self ):
""" return schema """
return self._ix.schema
def get_reader( self ):
""" return reader """
return self._ix.reader()
def get_searcher( self ):
""" return searcher """
return self._ix.searcher
def __len__( self ):
return self._ix.doc_count()
def add_document( self, doc ):
""" add a new document
@param doc: the document
@type doc: dict
"""
writer = self.index.writer()
writer.add_document( **doc )
writer.commit()
def add_documents( self, doclist ):
""" add a new documents
@param doclist: the documents
@type doclist: list(dict)
"""
writer = self._ix.writer()
for doc in doclist:
writer.add_document( **doc )
writer.commit()
def update_documents( self, doclist ):
""" update documents
@param doclist: the documents
@type doclist: list(dict)
"""
writer = self._ix.writer()
for doc in doclist:
writer.update_document( **doc )
writer.commit()
def delete_by_query( self, query ):
""" delete a set of documents retrieved by a query """
writer = self._ix.writer()
writer.delete_by_query( query )
writer.commit()
def __call__( self ):
return self.OK
class QseDocIndex( BasicDocIndex ):
"""all props of Document Index"""
def __str__( self ):
return "<alfanous.Indexing.QseDocIndex '" \
+ self._ixpath + "'" \
+ str( self._ix.doc_count() ) + ">"
def verify( self ):
"""raise a ta7rif exception if it is wrong"""
nb = -1
if self.OK:
nb = len( self )
if nb != 6236 :
raise Ta7rif( "number of ayas wrong", value = nb, original = 6236, msg = "you must update your index" )
return nb
class ExtDocIndex( BasicDocIndex ):
""" all properties of extended doc index """
def __str__( self ):
return "<alfanous.Indexing.ExtendedDocIndex '" \
+ self._ixpath + "'" \
+ str( self._ix.doc_count() ) + ">"
if __name__ == "__main__":
E = ExtDocIndex( ixpath = "../indexes/extend/" )
D = QseDocIndex( ixpath = "../indexes/main/" )
|
Python
| 0.000006
|
@@ -3784,28 +3784,31 @@
f( %22
-number of ayas wrong
+Ayas count is not exact
%22, v
@@ -3868,16 +3868,18 @@
ur index
+es
%22 )%0A
|
ccdc17645440cf191f9cca27f32b2211fad4ccd0
|
Load coordinates info into the main table
|
luigi/tasks/release/load_coordinates.py
|
luigi/tasks/release/load_coordinates.py
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import luigi
from tasks.config import output
from tasks.utils.pgloader import PGLoader
from .utils.generic import file_pattern
CONTROL_FILE = """
LOAD CSV
FROM ALL FILENAMES MATCHING ~<{pattern}>
IN DIRECTORY '{directory}'
HAVING FIELDS (
accession,
primary_accession,
local_start,
local_end,
strand
)
INTO {db_url}
TARGET COLUMNS (
accession,
primary_accession,
local_start,
local_end,
strand
)
WITH truncate,
batch rows = 500,
batch size = 32MB,
prefetch rows = 500,
workers = 2, concurrency = 1,
skip header = 0,
fields escaped by double-quote,
fields terminated by ','
SET
work_mem to '256 MB',
maintenance_work_mem to '256 GB',
search_path = '{search_path}'
BEFORE LOAD DO
$$
ALTER TABLE rnacen.load_rnc_coordinates SET (
autovacuum_enabled = false,
toast.autovacuum_enabled = false
);
$$
AFTER LOAD DO
$$
ALTER TABLE rnacen.load_rnc_coordinates SET (
autovacuum_enabled = true,
toast.autovacuum_enabled = true
);
$$
;
"""
class LoadCoordinates(PGLoader): # pylint: disable=R0904
"""
This will load coordinates. The database parameter defaults to all
coordinates, if a value is given then it is assumed to be the name of the
database to load. All files that begin with that name will be loaded.
"""
database = luigi.Parameter(default='all')
def control_file(self):
config = output()
directory = os.path.join(config.base, 'genomic_locations')
return CONTROL_FILE.format(
pattern=file_pattern(self.database),
db_url=self.db_url(table='load_rnc_coordinates'),
search_path=self.db_search_path(),
directory=directory,
)
|
Python
| 0
|
@@ -1641,16 +1641,363 @@
e%0A);%0A$$%0A
+,%0A$$%0AINSERT INTO rnacen.rnc_coordinates AS t1 (%0A accession, primary_accession, local_start, local_end, strand, id%0A)%0ASELECT%0A accession, primary_accession, local_start, local_end, strand, NEXTVAL('rnc_coordinates_pk_seq')%0AFROM rnacen.load_rnc_coordinates as t2%0A ON CONFLICT (accession, primary_accession, local_start, local_end)%0A DO NOTHING;%0A$$%0A
;%0A%22%22%22%0A%0A%0A
|
d927932c47deb7ebae519183a4a7e3e440a3b69e
|
Fix KeyError in IQVIA (#49968)
|
homeassistant/components/iqvia/__init__.py
|
homeassistant/components/iqvia/__init__.py
|
"""Support for IQVIA."""
import asyncio
from datetime import timedelta
from functools import partial
from pyiqvia import Client
from pyiqvia.errors import IQVIAError
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
CONF_ZIP_CODE,
DATA_COORDINATOR,
DOMAIN,
LOGGER,
TYPE_ALLERGY_FORECAST,
TYPE_ALLERGY_INDEX,
TYPE_ALLERGY_OUTLOOK,
TYPE_ASTHMA_FORECAST,
TYPE_ASTHMA_INDEX,
TYPE_DISEASE_FORECAST,
TYPE_DISEASE_INDEX,
)
DEFAULT_ATTRIBUTION = "Data provided by IQVIA™"
DEFAULT_SCAN_INTERVAL = timedelta(minutes=30)
PLATFORMS = ["sensor"]
async def async_setup_entry(hass, entry):
"""Set up IQVIA as config entry."""
coordinators = {}
if not entry.unique_id:
# If the config entry doesn't already have a unique ID, set one:
hass.config_entries.async_update_entry(
entry, **{"unique_id": entry.data[CONF_ZIP_CODE]}
)
websession = aiohttp_client.async_get_clientsession(hass)
client = Client(entry.data[CONF_ZIP_CODE], session=websession)
async def async_get_data_from_api(api_coro):
"""Get data from a particular API coroutine."""
try:
return await api_coro()
except IQVIAError as err:
raise UpdateFailed from err
init_data_update_tasks = []
for sensor_type, api_coro in [
(TYPE_ALLERGY_FORECAST, client.allergens.extended),
(TYPE_ALLERGY_INDEX, client.allergens.current),
(TYPE_ALLERGY_OUTLOOK, client.allergens.outlook),
(TYPE_ASTHMA_FORECAST, client.asthma.extended),
(TYPE_ASTHMA_INDEX, client.asthma.current),
(TYPE_DISEASE_FORECAST, client.disease.extended),
(TYPE_DISEASE_INDEX, client.disease.current),
]:
coordinator = coordinators[sensor_type] = DataUpdateCoordinator(
hass,
LOGGER,
name=f"{entry.data[CONF_ZIP_CODE]} {sensor_type}",
update_interval=DEFAULT_SCAN_INTERVAL,
update_method=partial(async_get_data_from_api, api_coro),
)
init_data_update_tasks.append(coordinator.async_config_entry_first_refresh())
await asyncio.gather(*init_data_update_tasks)
hass.data[DOMAIN].setdefault(DATA_COORDINATOR, {})[entry.entry_id] = coordinators
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
return True
async def async_unload_entry(hass, entry):
"""Unload an OpenUV config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN][DATA_COORDINATOR].pop(entry.entry_id)
return unload_ok
class IQVIAEntity(CoordinatorEntity, SensorEntity):
"""Define a base IQVIA entity."""
def __init__(self, coordinator, entry, sensor_type, name, icon):
"""Initialize."""
super().__init__(coordinator)
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._entry = entry
self._icon = icon
self._name = name
self._state = None
self._type = sensor_type
@property
def extra_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
def name(self):
"""Return the name."""
return self._name
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return f"{self._entry.data[CONF_ZIP_CODE]}_{self._type}"
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return "index"
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
if not self.coordinator.last_update_success:
return
self.update_from_latest_data()
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callbacks."""
await super().async_added_to_hass()
if self._type == TYPE_ALLERGY_FORECAST:
self.async_on_remove(
self.hass.data[DOMAIN][DATA_COORDINATOR][self._entry.entry_id][
TYPE_ALLERGY_OUTLOOK
].async_add_listener(self._handle_coordinator_update)
)
self.update_from_latest_data()
@callback
def update_from_latest_data(self):
"""Update the entity from the latest data."""
raise NotImplementedError
|
Python
| 0
|
@@ -946,24 +946,61 @@
g entry.%22%22%22%0A
+ hass.data.setdefault(DOMAIN, %7B%7D)%0A
coordina
|
e9bbb93177045b9f3f8a78d575fde07ecdf2c307
|
Fix raw data url
|
api/urls.py
|
api/urls.py
|
from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from rest_framework.routers import DefaultRouter
from api import views
router = DefaultRouter()
# share routes
router.register(r'extras', views.ExtraDataViewSet, base_name=views.ExtraDataViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'entities', views.EntityViewSet, base_name=views.EntityViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'venues', views.VenueViewSet, base_name=views.VenueViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'organizations', views.OrganizationViewSet, base_name=views.OrganizationViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'publishers', views.PublisherViewSet, base_name=views.PublisherViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'institutions', views.InstitutionViewSet, base_name=views.InstitutionViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'identifiers', views.IdentifierViewSet, base_name=views.IdentifierViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'people', views.PersonViewSet, base_name=views.PersonViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'affiliations', views.AffiliationViewSet, base_name=views.AffiliationViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'contributors', views.ContributorViewSet, base_name=views.ContributorViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'funders', views.FunderViewSet, base_name=views.FunderViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'awards', views.AwardViewSet, base_name=views.AwardViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'tags', views.TagViewSet, base_name=views.TagViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'links', views.LinkViewSet, base_name=views.LinkViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'creativeworks', views.CreativeWorkViewSet, base_name=views.CreativeWorkViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'preprints', views.PreprintViewSet, base_name=views.PreprintViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'publications', views.PublicationViewSet, base_name=views.PublicationViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'projects', views.ProjectViewSet, base_name=views.ProjectViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'manuscripts', views.ManuscriptViewSet, base_name=views.ManuscriptViewSet.serializer_class.Meta.model._meta.model_name)
# workflow routes
router.register(r'normalizeddata', views.NormalizedDataViewSet, base_name=views.NormalizedDataViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'changesets', views.ChangeSetViewSet, base_name=views.ChangeSetViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'changes', views.ChangeViewSet, base_name=views.ChangeViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'raw_data', views.RawDataViewSet, base_name=views.RawDataViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'users', views.ShareUserViewSet, base_name=views.ShareUserViewSet.serializer_class.Meta.model._meta.model_name)
router.register(r'providers', views.ProviderViewSet, base_name=views.ProviderViewSet.serializer_class.Meta.model._meta.model_name)
urlpatterns = [
url(r'user_info/?', views.ShareUserView.as_view(), name='userinfo'),
url(r'search/(?P<url_bits>.*)', csrf_exempt(views.ElasticSearchView.as_view()), name='search'),
] + router.urls
|
Python
| 0.999523
|
@@ -3137,17 +3137,16 @@
er(r'raw
-_
data', v
|
5722cac8dcacafa0a4b8d9f23f7f0a38a0632283
|
Add support to specify an identity file for ssh connections
|
proxmoxer/backends/openssh.py
|
proxmoxer/backends/openssh.py
|
__author__ = 'Oleg Butovich'
__copyright__ = '(c) Oleg Butovich 2013-2015'
__licence__ = 'MIT'
from proxmoxer.backends.base_ssh import ProxmoxBaseSSHSession, BaseBackend
try:
import openssh_wrapper
except ImportError:
import sys
sys.stderr.write("Chosen backend requires 'openssh_wrapper' module\n")
sys.exit(1)
class ProxmoxOpenSSHSession(ProxmoxBaseSSHSession):
def __init__(self, host,
username,
configfile=None,
port=22,
timeout=5,
forward_ssh_agent=False,
sudo=False):
self.host = host
self.username = username
self.configfile = configfile
self.port = port
self.timeout = timeout
self.forward_ssh_agent = forward_ssh_agent
self.sudo = sudo
self.ssh_client = openssh_wrapper.SSHConnection(self.host,
login=self.username,
port=self.port,
timeout=self.timeout)
def _exec(self, cmd):
if self.sudo:
cmd = "sudo " + cmd
ret = self.ssh_client.run(cmd, forward_ssh_agent=self.forward_ssh_agent)
return ret.stdout, ret.stderr
def upload_file_obj(self, file_obj, remote_path):
self.ssh_client.scp((file_obj,), target=remote_path)
class Backend(BaseBackend):
def __init__(self, host, user, configfile=None, port=22, timeout=5, forward_ssh_agent=False, sudo=False):
self.session = ProxmoxOpenSSHSession(host, user,
configfile=configfile,
port=port,
timeout=timeout,
forward_ssh_agent=forward_ssh_agent,
sudo=sudo)
|
Python
| 0
|
@@ -583,35 +583,72 @@
sudo=False
+,%0A identity_file=None
):%0A
-
self.hos
@@ -858,16 +858,59 @@
= sudo%0A
+ self.identify_file = identity_file%0A
@@ -1193,16 +1193,106 @@
.timeout
+,%0A identify_file=self.identify_file
)%0A%0A d
@@ -1738,16 +1738,36 @@
do=False
+, identity_file=None
):%0A
@@ -2086,16 +2086,16 @@
_agent,%0A
-
@@ -2140,10 +2140,84 @@
udo=sudo
+,%0A identity_file=identity_file
)%0A
|
7bc245ba0079980bf55bfd6dcd630c9ae38310fc
|
Stop services instead of restarting them from stripdown
|
VMEncryption/main/oscrypto/encryptstates/UnmountOldrootState.py
|
VMEncryption/main/oscrypto/encryptstates/UnmountOldrootState.py
|
#!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
import os
import re
import sys
from time import sleep
from OSEncryptionState import *
class UnmountOldrootState(OSEncryptionState):
def __init__(self, context):
super(UnmountOldrootState, self).__init__('UnmountOldrootState', context)
def should_enter(self):
self.context.logger.log("Verifying if machine should enter unmount_oldroot state")
if not super(UnmountOldrootState, self).should_enter():
return False
self.context.logger.log("Performing enter checks for unmount_oldroot state")
self.command_executor.ExecuteInBash('[ -e "/oldroot" ]', True)
return True
def enter(self):
if not self.should_enter():
return
self.context.logger.log("Entering unmount_oldroot state")
self.command_executor.ExecuteInBash('mkdir -p /var/empty/sshd', True)
self.command_executor.ExecuteInBash('systemctl restart sshd.service', True)
proc_comm = ProcessCommunicator()
self.command_executor.Execute(command_to_execute="systemctl list-units",
raise_exception_on_failure=True,
communicator=proc_comm)
for line in proc_comm.stdout.split('\n'):
if not "running" in line:
continue
if "waagent.service" in line:
continue
match = re.search(r'\s(\S*?\.service)', line)
if match:
service = match.groups()[0]
self.context.logger.log("Restarting {0}".format(service))
self.command_executor.Execute('systemctl restart {0}'.format(service))
self.command_executor.Execute('swapoff -a', True)
if os.path.exists("/oldroot/mnt/resource"):
self.command_executor.Execute('umount /oldroot/mnt/resource')
proc_comm = ProcessCommunicator()
self.command_executor.Execute(command_to_execute="fuser -vm /oldroot",
raise_exception_on_failure=True,
communicator=proc_comm)
self.context.logger.log("Processes using oldroot:\n{0}".format(proc_comm.stdout))
procs_to_kill = filter(lambda p: p.isdigit(), proc_comm.stdout.split())
procs_to_kill = reversed(sorted(procs_to_kill))
for victim in procs_to_kill:
if int(victim) == os.getpid():
self.context.logger.log("Skipping suicide")
continue
self.command_executor.Execute('kill -9 {0}'.format(victim))
self.command_executor.Execute('telinit u', True)
sleep(3)
self.command_executor.Execute('umount /oldroot', True)
sleep(3)
while True:
self.context.logger.log("Restarting systemd-udevd")
self.command_executor.Execute('killall -s KILL systemd-udevd')
self.command_executor.Execute('/usr/lib/systemd/systemd-udevd --daemon')
sleep(10)
if self.command_executor.ExecuteInBash('[ -b /dev/sda2 ]', False) == 0:
break
self.command_executor.Execute('xfs_repair /dev/sda2', True)
def should_exit(self):
self.context.logger.log("Verifying if machine should exit unmount_oldroot state")
if os.path.exists('/oldroot/bin'):
self.context.logger.log("/oldroot was not unmounted")
return False
return super(UnmountOldrootState, self).should_exit()
|
Python
| 0
|
@@ -2035,16 +2035,42 @@
service%22
+ in line or %22sshd.service%22
in line
@@ -2225,82 +2225,8 @@
%5B0%5D%0A
- self.context.logger.log(%22Restarting %7B0%7D%22.format(service))%0A
@@ -2278,23 +2278,20 @@
stemctl
-restart
+stop
%7B0%7D'.fo
|
ae2f12bf1dcd7ec1b1d7b5843b56fdc8df32a50e
|
gunicorn debug=False
|
config-gunicorn.py
|
config-gunicorn.py
|
#!/usr/bin/env python
import os
def numCPUs():
if not hasattr(os, 'sysconf'):
raise RuntimeError('No sysconf detected.')
return os.sysconf('SC_NPROCESSORS_ONLN')
bind = '127.0.0.1:8000'
workers = 4
worker_class = 'gevent'
debug = True
daemon = True
pidfile = '/tmp/gunicorn.pid'
logfile = '/tmp/gunicorn.log'
|
Python
| 0.999639
|
@@ -243,19 +243,20 @@
debug =
-Tru
+Fals
e%0Adaemon
|
d717cc604d26f3470af3c2f686d04ce7eb1ef1f3
|
Generate proper contents for displaying a user profile page.
|
web_apps/game_support/src/local/view.py
|
web_apps/game_support/src/local/view.py
|
#!/usr/bin/env python
# ---------------------------------------------------------------------------------------------
"""
local/view.py
Copyright (c) 2015 Kevin Cureton
"""
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
# Imports
# ---------------------------------------------------------------------------------------------
import json
import os
import string
import pyramid.renderers
import pyramid.view
import local.stash
# ---------------------------------------------------------------------------------------------
# Globals
# ---------------------------------------------------------------------------------------------
# ---------------------------------------------------------------------------------------------
# View methods
# ---------------------------------------------------------------------------------------------
@pyramid.view.view_config(route_name='index', request_method='GET')
def indexPage(request):
""" The index (starting) page for the application.
"""
response = pyramid.renderers.render_to_response("local:site/templates/index.jinja2",
dict(),
request=request)
return response
@pyramid.view.view_config(route_name='view.create_user', request_method='GET')
def createNewUserPage(request):
""" The page for creating a new user.
"""
response = pyramid.renderers.render_to_response("local:site/templates/create_user.jinja2",
dict(),
request=request)
return response
@pyramid.view.view_config(route_name='view.create_user', request_method='GET')
def displayUserPage(request):
""" Display a detail page about a user.
"""
results = dict()
response = pyramid.renderers.render_to_response("local:site/templates/user_detail.jinja2",
results,
request=request)
return response
@pyramid.view.view_config(route_name='view.search_users', request_method='GET')
def searchForUsers(request):
""" The page for searching for users.
"""
print("DEBUG: Searching for users...")
results = dict()
results['error'] = ""
results['users'] = list()
if 'nickname' not in request.GET or request.GET['nickname'] == "":
results['error'] = "Please specify a nick name for the search."
else:
users_stash = local.stash.UsersStash()
found_users = users_stash.searchUsers(request.GET['nickname'])
tmp_users = list()
for nick_name in found_users.keys():
user_uid = found_users[nick_name]
tmp = dict()
tmp['url'] = "/users/%s" % user_uid
tmp['nickname'] = nick_name
tmp_users.append(tmp)
results['users'] = tmp_users
response = pyramid.renderers.render_to_response("local:site/templates/search_users.jinja2",
results,
request=request)
return response
@pyramid.view.view_config(route_name='view.display_battles', request_method='GET')
def displayBattlesPage(request):
""" The page for displaying battles.
"""
# TODO: Get the url parameters for start and end times.
response = pyramid.renderers.render_to_response("local:site/templates/display_battles.jinja2",
dict(),
request=request)
return response
# ---------------------------------------------------------------------------------------------
# Module test harness
# ---------------------------------------------------------------------------------------------
if __name__ == "__main__":
print("This is the test harness for the module")
sys.exit(0)
|
Python
| 0.999062
|
@@ -1817,38 +1817,39 @@
oute_name='view.
-create
+display
_user', request_
@@ -1948,32 +1948,231 @@
%22%22%22%0A
-results = dict()
+user_uid = request.matchdict%5B'user_uid'%5D%0A%0A results = dict()%0A results%5B'error'%5D = %22%22%0A%0A users_stash = local.stash.UsersStash()%0A user_info = users_stash.getUserInfo(user_uid)%0A%0A results%5B'user'%5D = user_info
%0A%0A re
|
ff6b9eddc27ee2b897ab20198d562ef1dfe257d5
|
support get docker info
|
app/dash.py
|
app/dash.py
|
#!/usr/bin/env python3
# coding=utf-8
"""
@version:0.1
@author: ysicing
@file: blog/dash.py
@time: 2017/9/20 22:46
"""
from flask import Blueprint, render_template
dash = Blueprint('dash', __name__)
@dash.route('/dash/')
def dash_index():
return render_template('dash.html')
|
Python
| 0
|
@@ -163,122 +163,318 @@
late
-%0A%0Adash = Blueprint('dash', __name__)%0A%0A%0A@dash.route('/dash/')%0Adef dash_index():%0A return render_template('dash.html'
+,jsonify%0Afrom app.plugins.docker import DockerApi%0A%0Adash = Blueprint('dash', __name__)%0A%0Adocker = DockerApi(host=None, timeout=None)%0A%0A%0A@dash.route('/dash/')%0Adef dash_index():%0A return render_template('dash.html')%0A%0A%0A@dash.route('/dash/docker')%0Adef dash_docker_info():%0A return jsonify(docker.get_docker_version()
)
|
95945f98b3c4689dc1fb5066f5102154cc4a6a28
|
bump version
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
setup(
name='vdist',
version='0.3.4',
description='Create OS packages from Python projects using Docker containers',
long_description='Create OS packages from Python projects using Docker containers',
author='L. Brouwer',
author_email='objectified@gmail.com',
license='MIT',
url='https://github.com/objectified/vdist',
packages=find_packages(),
install_requires=['jinja2==2.7.3', 'docker-py==0.7.2'],
package_data={'': ['internal_profiles.json', '*.sh']},
tests_require=['pytest'],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
keywords='python docker deployment packaging',
)
|
Python
| 0
|
@@ -84,9 +84,9 @@
0.3.
-4
+5
',%0A
|
2b28da0ee9c3a26e5d0bd7cebe56fece3585689a
|
Update file_opt_args
|
website/addons/osfstorage/decorators.py
|
website/addons/osfstorage/decorators.py
|
import httplib
import functools
from webargs import Arg
from webargs import core
from modularodm.exceptions import NoResultsFound
from modularodm.exceptions import ValidationValueError
from modularodm.storage.base import KeyExistsException
from framework.auth.decorators import must_be_signed
from website.models import User
from website.addons.osfstorage import model
from framework.exceptions import HTTPError
from website.addons.osfstorage import utils
from website.project.decorators import (
must_not_be_registration, must_have_addon,
)
USER_ARG = Arg(None, required=True, dest='user', use=User.from_cookie, validate=lambda x: x is not None)
class JSONParser(core.Parser):
def __init__(self, data):
self._data = data
def parse(self, args):
return super(JSONParser, self).parse(args, None, ('json',))
def parse_json(self, _, name, arg):
if self._data:
return core.get_value(self._data, name, arg.multiple)
else:
return core.Missing
def error_callback(self, err):
raise HTTPError(err.status_code, data={
'message_long': err.message
})
def path_validator(path):
return (
path.startswith('/') and
len(path.strip('/').split('/')) < 3
)
file_opt_args = {
'auth': Arg({
'id': Arg(None, required=True, dest='user', use=User.load, validate=lambda x: x is not None)
}),
'source': Arg(unicode, required=True, validate=path_validator),
'destination': Arg(unicode, required=True, validate=path_validator),
}
waterbutler_crud_args = {
'cookie': USER_ARG,
'path': Arg(str, required=True, validate=path_validator),
}
def handle_odm_errors(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except NoResultsFound:
raise HTTPError(httplib.NOT_FOUND)
except KeyExistsException:
raise HTTPError(httplib.CONFLICT)
return wrapped
def waterbutler_opt_hook(func):
@must_be_signed
@handle_odm_errors
@must_not_be_registration
@must_have_addon('osfstorage', 'node')
@functools.wraps(func)
def wrapped(payload, *args, **kwargs):
kwargs.update(JSONParser(payload).parse(file_opt_args))
destination = kwargs['destination']
kwargs.update({
'source': model.OsfStorageFileNode.get(
kwargs['source'],
kwargs['node_addon']
),
'destination': model.OsfStorageFileNode.get_folder(
destination['parent'],
destination['node'].get_addon('osfstorage')
),
'name': destination['name'],
})
return func(*args, **kwargs)
wrapped.undecorated = func # For testing
return wrapped
@must_be_signed
@handle_odm_errors
@must_not_be_registration
@must_have_addon('osfstorage', 'node')
def waterbutler_crud_hook(func):
@functools.wraps(func)
def wrapped(payload, *args, **kwargs):
kwargs.update(JSONParser(payload)).parse({
'cookie': USER_ARG,
'path': Arg(
None,
required=True,
dest='file_node',
validate=lambda x: model.OsfStorageFileNode.get(x, kwargs.get('node_addon'))
),
})
return func(*args, **kwargs)
return wrapped
|
Python
| 0.000022
|
@@ -321,16 +321,48 @@
rt User%0A
+from website.models import Node%0A
from web
@@ -1327,32 +1327,12 @@
'
-auth': Arg(%7B%0A 'id
+user
': A
@@ -1350,37 +1350,24 @@
quired=True,
- dest='user',
use=User.lo
@@ -1407,157 +1407,346 @@
one)
+,
%0A
-%7D),%0A 'source': Arg(unicode, required=True, validate=path_validator),%0A 'destination': Arg(unicode, required=True, validate=path_validator),
+'source': Arg(unicode, required=True),%0A 'destination': Arg(%7B%0A 'name': Arg(unicode, required=True, validate=lambda x: '/' not in x),%0A 'parent': Arg(unicode, required=True, validate=lambda x: '/' not in x),%0A 'node': Arg(None, required=True, dest='node', use=Node.load, validate=lambda x: x is not None),%0A %7D)
%0A%7D%0A%0A
|
7dcbb064e9bd87e30d322e695452ab140c30b5ed
|
Support for --version
|
src/contexts/__main__.py
|
src/contexts/__main__.py
|
import sys
from .plugin_discovery import load_plugins
from . import run_with_plugins, main
def cmd():
try:
import colorama
except ImportError:
pass
else:
colorama.init()
plugin_list = load_plugins()
exit_code = run_with_plugins(plugin_list)
sys.exit(exit_code)
if __name__ == "__main__":
cmd()
|
Python
| 0
|
@@ -97,16 +97,93 @@
cmd():%0A
+ if '--version' in sys.argv:%0A print_version()%0A sys.exit(0)%0A%0A
try:
@@ -384,16 +384,284 @@
code)%0A%0A%0A
+def print_version():%0A import pkg_resources%0A version = pkg_resources.require('contexts')%5B0%5D.version%0A py_version = '.'.join(str(i) for i in sys.version_info%5B0:3%5D)%0A%0A print(%22Contexts version %22 + version)%0A print(%22Running on Python version %22 + py_version)%0A%0A%0A%0A
if __nam
|
cdaa708e185b252ddebb542e89a9c4d5e6740f2c
|
Include old (>24h) messages in news feed
|
feedline.py
|
feedline.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Rapidly display fresh headlines from a TinyTinyRSS instance on the command line.
(c) 2017 Andreas Fischer <_@ndreas.de>
"""
import subprocess
import argparse
import getpass
import json
import os.path
import readchar
from ttrss import TinyTinyRSS
def get_conn():
"""
Get connection details either from a config file, the commandline, or via user input.
"""
conn = {}
if os.path.isfile('pyttrss.cfg'):
with open('pyttrss.cfg', 'r') as cfgfile:
conn = json.load(cfgfile)
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-u', '--user', help='Username')
parser.add_argument('-p', '--password', help='Password')
parser.add_argument('-U', '--url', help='Server URL')
args = parser.parse_args()
# Preference: Commandline > Configfile > User input
conn['user'] = args.user or conn['user'] or raw_input("Enter username: ")
conn['password'] = args.password or conn['password'] or getpass.getpass()
conn['url'] = args.url or conn['url'] or raw_input("Enter server URL: ")
return conn
if __name__ == "__main__":
with TinyTinyRSS(get_conn()) as ttrss:
print "Unread articles:", ttrss.getUnread()
read_art_ids = []
for article in ttrss.getHeadlines(feed_id=-3):
outstr = u"{:>20} | {}".format(article['feed_title'][:20], article['title'])
print outstr
#print article['feed_title'][:20], "\t", article['title']
char = readchar.readchar()
if char == "o":
subprocess.call(['xdg-open', article['link']])
elif char == "s":
continue
elif char == "q":
break
read_art_ids.append(article['id'])
ttrss.updateArticle(read_art_ids, 0, 2)
|
Python
| 0
|
@@ -1420,9 +1420,29 @@
id=-
-3
+4, view_mode=%22unread%22
):%0A
|
72438aceb58c7fe566a9a09ffccc0f8622a8df31
|
Update file list to new Hector file layout
|
setup.py
|
setup.py
|
"""
pyhector
--------
Python wrapper for the `Hector simple climate model
<https://github.com/JGCRI/hector>`_.
**Install** using ::
pip install pyhector
Find **usage** instructions in the `repository
<https://github.com/openclimatedata/pyhector>`_.
"""
from setuptools import setup, Extension
from setuptools.command.test import test as TestCommand
import glob
import os
import sys
import versioneer
path = os.path.abspath(os.path.dirname(__file__))
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
sys.exit(pytest.main(self.test_args))
class get_pybind_include(object):
"""Helper class to determine the pybind11 include path
The purpose of this class is to postpone importing pybind11
until it is actually installed, so that the ``get_include()``
method can be invoked. """
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
cmdclass = versioneer.get_cmdclass()
cmdclass.update({"test": PyTest})
libpyhector = Extension(
"pyhector._binding",
language="c++",
include_dirs=[
"include",
"hector/inst/include",
get_pybind_include(),
get_pybind_include(user=True),
],
libraries=["m", "boost_system", "boost_filesystem"],
extra_compile_args=["-std=c++11"],
sources=list(glob.glob("src/*.cpp") + glob.glob("hector/src/*.cpp")),
depends=list(glob.glob("include/*.h") + glob.glob("hector/inst/include/*.hpp")),
)
with open(os.path.join(path, "README.rst"), "r") as f:
readme = f.read()
setup(
name="pyhector",
version=versioneer.get_version(),
cmdclass=cmdclass,
description="Python wrapper for the Hector simple climate model",
long_description=readme,
long_description_content_type="text/x-rst",
url="https://github.com/openclimatedata/pyhector",
author="Sven Willner, Robert Gieseke",
author_email="sven.willner@pik-potsdam.de, robert.gieseke@pik-potsdam.de",
license="GNU Affero General Public License v3",
platforms="any",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="climate model climate change",
package_data={"pyhector": ["rcp_default.ini", "emissions/*"]},
include_package_data=True,
packages=["pyhector"],
install_requires=["numpy", "pandas", "pybind11>=2.2"],
tests_require=["pytest>=4.0", "pytest-cov"],
ext_modules=[libpyhector],
zip_safe=False,
)
|
Python
| 0
|
@@ -1526,68 +1526,1066 @@
ces=
-list(glob.glob(%22src/*.cpp%22) + glob.glob(%22hector/src/*.cpp%22))
+%5B%0A %22hector/src/bc_component.cpp%22,%0A %22hector/src/carbon-cycle-model.cpp%22,%0A %22hector/src/carbon-cycle-solver.cpp%22,%0A %22hector/src/ch4_component.cpp%22,%0A %22hector/src/core.cpp%22,%0A %22hector/src/dependency_finder.cpp%22,%0A %22hector/src/forcing_component.cpp%22,%0A %22hector/src/h_interpolator.cpp%22,%0A %22hector/src/halocarbon_component.cpp%22,%0A %22hector/src/logger.cpp%22,%0A %22hector/src/n2o_component.cpp%22,%0A %22hector/src/o3_component.cpp%22,%0A %22hector/src/oc_component.cpp%22,%0A %22hector/src/ocean_component.cpp%22,%0A %22hector/src/ocean_csys.cpp%22,%0A %22hector/src/oceanbox.cpp%22,%0A %22hector/src/oh_component.cpp%22,%0A %22hector/src/onelineocean_component.cpp%22,%0A %22hector/src/simpleNbox.cpp%22,%0A %22hector/src/slr_component.cpp%22,%0A %22hector/src/so2_component.cpp%22,%0A %22hector/src/spline_forsythe.cpp%22,%0A %22hector/src/temperature_component.cpp%22,%0A %22hector/src/unitval.cpp%22,%0A %22src/Hector.cpp%22,%0A %22src/main.cpp%22,%0A %22src/Observable.cpp%22,%0A %5D
,%0A
|
d0b6e2b9b3a936ea16a7c48fd951bb4f297c1190
|
Update setup.py to point to correct site
|
setup.py
|
setup.py
|
try:
from setuptools import setup, find_packages
except:
from distutils.core import setup, find_packages
install_requires = ['py3compat >= 0.2']
setup(
name='daisychain',
version='0.1',
description='Configuration-based OO-dependency resolution workflow engine',
author='Jeff Edwards',
author_email='jeff@edwardsj.com',
url='https://github.com/python-daisy/daisychain',
license='MIT License',
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
scripts = ['bin/daisy-chain'],
install_requires=install_requires
)
|
Python
| 0
|
@@ -382,16 +382,21 @@
on-daisy
+chain
/daisych
|
30bf6ddb0dde4e6bc953924fd6b22a09d97805cf
|
Add Test runs for Python 3.7 and remove 3.4 (#5295)
|
nox.py
|
nox.py
|
# Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = (
os.path.join('..', 'api_core'),
os.path.join('..', 'core'),
)
@nox.session
def default(session):
"""Default unit test session.
This is intended to be run **without** an interpreter set, so
that the current ``python`` (on the ``PATH``) or the version of
Python corresponding to the ``nox`` binary the ``PATH`` can
run the tests.
"""
# Install all test dependencies, then install this package in-place.
session.install('mock', 'pytest', 'pytest-cov', *LOCAL_DEPS)
session.install('-e', '.')
# Run py.test against the unit tests.
session.run(
'py.test',
'--quiet',
'--cov=google.cloud.runtimeconfig',
'--cov=tests.unit',
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under=97',
'tests/unit',
)
@nox.session
@nox.parametrize('py', ['2.7', '3.4', '3.5', '3.6'])
def unit(session, py):
"""Run the unit test suite."""
# Run unit tests against all supported versions of Python.
session.interpreter = 'python{}'.format(py)
# Set the virtualenv dirname.
session.virtualenv_dirname = 'unit-' + py
default(session)
@nox.session
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.interpreter = 'python3.6'
session.install('flake8', *LOCAL_DEPS)
session.install('.')
session.run('flake8', 'google', 'tests')
@nox.session
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.interpreter = 'python3.6'
# Set the virtualenv dirname.
session.virtualenv_dirname = 'setup'
session.install('docutils', 'Pygments')
session.run(
'python', 'setup.py', 'check', '--restructuredtext', '--strict')
@nox.session
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.interpreter = 'python3.6'
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing', '--fail-under=100')
session.run('coverage', 'erase')
|
Python
| 0
|
@@ -1546,15 +1546,8 @@
.7',
- '3.4',
'3.
@@ -1555,16 +1555,23 @@
', '3.6'
+, '3.7'
%5D)%0Adef u
|
a88a9ad6ed64c3bf4b5a9e40a41a68e9581654e7
|
Fix nox config. (#4599)
|
nox.py
|
nox.py
|
# Copyright 2017, Google LLC All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
@nox.session
@nox.parametrize('python_version', ['2.7', '3.4', '3.5', '3.6'])
def unit_tests(session, python_version):
"""Run the unit test suite."""
session.interpreter = 'python{}'.format(python_version)
session.virtualenv_dirname = 'unit-' + python_version
session.install('pytest')
session.install('-e', '.')
session.run('py.test', '--quiet', os.path.join('tests', 'unit'))
@nox.session
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.interpreter = 'python3.6'
session.install('docutils', 'pygments')
session.run('python', 'setup.py', 'check', '--restructuredtext',
'--strict')
@nox.session
@nox.parametrize('py', ['2.7', '3.6'])
def system(session, py):
"""Run the system test suite."""
# Sanity check: Only run system tests if the environment variable is set.
if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
session.skip('Credentials must be set via environment variable.')
# Run the system tests against latest Python 2 and Python 3 only.
session.interpreter = 'python{}'.format(py)
# Set the virtualenv dirname.
session.virtualenv_dirname = 'sys-' + py
# Install all test dependencies, then install this package into the
# virtualenv's dist-packages.
session.install('mock', 'pytest', *LOCAL_DEPS)
session.install('../test_utils/')
session.install('.')
# Run py.test against the system tests.
session.run('py.test', '--quiet', 'tests/system.py')
|
Python
| 0
|
@@ -2024,21 +2024,8 @@
est'
-, *LOCAL_DEPS
)%0A
@@ -2181,14 +2181,12 @@
s/system
-.py
+/
')%0A
|
82cc05e882698bdf2248ae0ae1589bb6455d0ca5
|
update fetch
|
fetch/rb.py
|
fetch/rb.py
|
import robotparser
from utils import config
from splinter import Browser
def index(session):
url = config.rb_url()
_robot_can_fetch(session, url)
def fetch():
with Browser(**config.browser_kwargs()) as browser:
browser.visit(url)
styles = []
group_names = browser.find_by_xpath("//*[contains(@class, 'groupname')]")
for group_name in group_names:
elements = group_name.find_by_xpath('following-sibling::ul[1]/li/a')
for el in elements:
styles.append({'group': group_name.text, 'name': el.text, 'href': el['href']})
return styles
session.visit(5, url, fetch)
def _robot_can_fetch(session, url):
robots_text = session.get(5, config.rb_robots(), map_to=lambda r: r.text)
rp = robotparser.RobotFileParser()
rp.parse(robots_text)
if not rp.can_fetch('*', url):
raise ValueError('Robot is not allowed to fetch {}'.format(url))
|
Python
| 0.000001
|
@@ -673,17 +673,17 @@
n.visit(
-5
+3
, url, f
|
5839d76a0e29a3fa6b07a460ff3f0d8cf9b889b7
|
Remove alpha release
|
setup.py
|
setup.py
|
import os
from setuptools import setup, find_packages
src_dir = os.path.dirname(__file__)
install_requires = [
"troposphere~=1.8.0",
"awacs~=0.6.0",
"stacker~=0.8.1",
]
tests_require = [
"nose~=1.0",
"mock~=2.0.0",
]
def read(filename):
full_path = os.path.join(src_dir, filename)
with open(full_path) as fd:
return fd.read()
if __name__ == "__main__":
setup(
name="stacker_blueprints",
version="0.7.1a1",
author="Michael Barrett",
author_email="loki77@gmail.com",
license="New BSD license",
url="https://github.com/remind101/stacker_blueprints",
description="Default blueprints for stacker",
long_description=read("README.rst"),
packages=find_packages(),
install_requires=install_requires,
tests_require=tests_require,
test_suite="nose.collector",
)
|
Python
| 0
|
@@ -460,11 +460,9 @@
0.7.
-1a1
+0
%22,%0A
|
38791c7bb480ea5c9efdb4bab3a9c785e5078153
|
bump to version 0.1alpha9
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
import os, sys
static_types = [
'*.js',
'*.html',
'*.css',
'*.ico',
'*.gif',
'*.jpg',
'*.png',
'*.txt*',
'*.py',
'*.template'
]
#if sys.platform != "win32":
# _install_requires.append("Twisted")
_install_requires = [
'csp>=0.1alpha8',
'rtjp>=0.1alpha2',
'eventlet',
'paste',
'static'
]
# python <= 2.5
if sys.version_info[1] <= 5:
_install_requires.append('simplejson')
setup(
name='hookbox',
version='0.1a4',
author='Michael Carter',
author_email='CarterMichael@gmail.com',
license='MIT License',
description='HookBox is a Comet server and message queue that tightly integrates with your existing web application via web hooks and a REST interface.',
long_description='',
packages= find_packages(),
package_data = {'': reduce(list.__add__, [ '.git' not in d and [ os.path.join(d[len('hookbox')+1:], e) for e in
static_types ] or [] for (d, s, f) in os.walk(os.path.join('hookbox', 'static'))
]) },
zip_safe = False,
install_requires = _install_requires,
entry_points = '''
[console_scripts]
hookbox = hookbox.start:main
''',
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
],
)
|
Python
| 0
|
@@ -328,9 +328,9 @@
lpha
-8
+9
', %0A
@@ -541,9 +541,9 @@
0.1a
-4
+5
',%0A
|
7156cc172b3ba87e3247367c6bf51cc24ce9a902
|
Update PyPI usage
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
setup.py
Created by Stephan Hügel on 2015-06-21
"""
from __future__ import unicode_literals
import os
import re
import io
from setuptools import setup, find_packages, Distribution
def read(*names, **kwargs):
with io.open(
os.path.join(os.path.dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
) as fp:
return fp.read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file,
re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
class BinaryDistribution(Distribution):
def is_pure(self):
return False
version=find_version("convertbng/util.py")
setup(
name='convertbng',
version=version,
description='Fast lon, lat to BNG conversion',
author='Stephan Hügel',
author_email='urschrei@gmail.com',
license='MIT License',
url='https://github.com/urschrei/convertbng',
include_package_data=True,
distclass=BinaryDistribution,
download_url='https://github.com/urschrei/convertbng/tarball/v%s' % version,
keywords=['Geo', 'BNG'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=find_packages(),
long_description="""\
Fast lon, lat to BNG conversion
---------------------------------------------
Uses a Rust 1.0 binary to perform fast lon, lat to BNG conversion\n
This module exposes two methods:\n
util.convertbng() – pass a lon, lat. Returns a tuple of Eastings, Northings\n
util.convertbng_list() – pass lists (or Numpy arrays) of lons, lats.
Returns a list of Easting, Northing tuples\n\n
Call them like so:\n
from convertbng.util import convertbng, convertbng_list\n\n
res = convertbng(lon, lat)\n
res_list = convertbng_list([lons], [lats])\n\n
This version requires Python 2.7.x / 3.4.x"""
)
|
Python
| 0
|
@@ -1785,85 +1785,103 @@
%22%22%5C%0A
-Fast lon, lat to BNG conversion%0A---------------------------------------------
+===============================%0AFast lon, lat to BNG conversion%0A===============================
%0AUse
@@ -1891,17 +1891,17 @@
Rust 1.
-0
+x
binary
@@ -2179,29 +2179,49 @@
n%5Cn%0A
-Call them like so:%5Cn%0A
+Usage%0A=====%0A%0A.. code-block:: python%0A%0A
from
@@ -2271,21 +2271,22 @@
bng_list
-%5Cn%5Cn%0A
+%0A%0A
res = co
@@ -2303,19 +2303,178 @@
on, lat)
-%5Cn%0A
+%0A%0A lons = %5Blon1, lon2, lon3%5D%0A lats = %5Blat1, lat2, lat3%5D%0A # assumes import numpy as np%0A lons_np = np.array(lons)%0A lats_np = np.array(lats)%0A %0A
res_list
@@ -2496,27 +2496,71 @@
ist(
-%5B
lons
-%5D
,
-%5B
lats
-%5D)%5Cn%5Cn
+)%0A res_list_np = convertbng_list(lons_np, lats_np)
%0A%0ATh
|
ab077d26c51c3e6db6e82bd256b14563373587fe
|
Resolve requests version conflict
|
setup.py
|
setup.py
|
# Copyright (C) 2015-2016 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import find_packages, setup
botoRequirement = 'boto==2.38.0'
def runSetup():
"""
Calls setup(). This function exists so the setup() invocation preceded more internal
functionality. The `version` module is imported dynamically by importVersion() below.
"""
setup(
name='toil',
version=version.distVersion,
description='Pipeline management software for clusters.',
author='Benedict Paten',
author_email='benedict@soe.usc.edu',
url="https://github.com/BD2KGenomics/toil",
classifiers=["License :: OSI Approved :: Apache Software License"],
license="Apache License v2.0",
install_requires=[
'bd2k-python-lib>=1.14a1.dev35',
'dill==0.2.5',
'six>=1.10.0',
'future',
'requests',
'docker==2.5.1'],
extras_require={
'mesos': [
'psutil==3.0.1'],
'aws': [
botoRequirement,
'cgcloud-lib==' + version.cgcloudVersion,
'futures==3.0.5'],
'azure': [
'azure==1.0.3'],
'encryption': [
'pynacl==1.1.2'],
'google': [
'gcs_oauth2_boto_plugin==1.9',
botoRequirement],
'cwl': [
'cwltool==1.0.20170822192924',
'schema-salad >= 2.6, < 3',
'cwltest>=1.0.20170214185319']},
package_dir={'': 'src'},
packages=find_packages(where='src',
# Note that we intentionally include the top-level `test` package for
# functionality like the @experimental and @integrative decoratorss:
exclude=['*.test.*']),
# Unfortunately, the names of the entry points are hard-coded elsewhere in the code base so
# you can't just change them here. Luckily, most of them are pretty unique strings, and thus
# easy to search for.
entry_points={
'console_scripts': [
'toil = toil.utils.toilMain:main',
'_toil_worker = toil.worker:main',
'cwltoil = toil.cwl.cwltoil:main [cwl]',
'toil-cwl-runner = toil.cwl.cwltoil:main [cwl]',
'cwl-runner = toil.cwl.cwltoil:main [cwl]',
'_toil_mesos_executor = toil.batchSystems.mesos.executor:main [mesos]']})
def importVersion():
"""
Load and return the module object for src/toil/version.py, generating it from the template if
required.
"""
import imp
try:
# Attempt to load the template first. It only exists in a working copy cloned via git.
import version_template
except ImportError:
# If loading the template fails we must be in a unpacked source distribution and
# src/toil/version.py will already exist.
pass
else:
# Use the template to generate src/toil/version.py
import os
import errno
from tempfile import NamedTemporaryFile
new = version_template.expand_()
try:
with open('src/toil/version.py') as f:
old = f.read()
except IOError as e:
if e.errno == errno.ENOENT:
old = None
else:
raise
if old != new:
with NamedTemporaryFile(dir='src/toil', prefix='version.py.', delete=False) as f:
f.write(new)
os.rename(f.name, 'src/toil/version.py')
# Unfortunately, we can't use a straight import here because that would also load the stuff
# defined in src/toil/__init__.py which imports modules from external dependencies that may
# yet to be installed when setup.py is invoked.
return imp.load_source('toil.version', 'src/toil/version.py')
version = importVersion()
runSetup()
|
Python
| 0
|
@@ -1446,16 +1446,24 @@
requests
+==2.18.4
',%0A
|
b61a6e79703b6f807f1b179f23fa9dd7836ab957
|
Version bump to 2.4.0
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
long_description = open('README.rst').read()
setup(
name='PyChromecast',
version='2.3.0',
license='MIT',
url='https://github.com/balloob/pychromecast',
author='Paulus Schoutsen',
author_email='paulus@paulusschoutsen.nl',
description='Python module to talk to Google Chromecast.',
long_description=long_description,
packages=find_packages(),
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=list(val.strip() for val in open('requirements.txt')),
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
Python
| 0
|
@@ -132,17 +132,17 @@
sion='2.
-3
+4
.0',%0A
|
2f766e439b9d91ab4d4682245a2360bc1e5c2bb5
|
Update version
|
setup.py
|
setup.py
|
import matplotlib
import os
MPLBE = os.environ.get('MPLBE')
if MPLBE:
matplotlib.use(MPLBE)
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
DESCRIPTION = "General Matplotlib Exporter"
LONG_DESCRIPTION = open('README.md').read()
NAME = "mplexporter"
AUTHOR = "Jake VanderPlas"
AUTHOR_EMAIL = "jakevdp@cs.washington.edu"
MAINTAINER = "Jake VanderPlas"
MAINTAINER_EMAIL = "jakevdp@cs.washington.edu"
DOWNLOAD_URL = 'https://github.com/mpld3/mplexporter'
URL = DOWNLOAD_URL
LICENSE = 'BSD 3-clause'
VERSION = '0.0.1'
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
license=LICENSE,
packages=['mplexporter', 'mplexporter.renderers'],
)
|
Python
| 0
|
@@ -560,11 +560,11 @@
'0.
-0.1
+1.0
'%0A%0As
|
6797300eeeb014debc5472927c5b5711597881ea
|
bump to 0.2.1
|
setup.py
|
setup.py
|
"""
Copyright 2012 DISQUS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from setuptools import setup
VERSION = '0.2'
NAME = 'jones'
install_requires = [
'zc-zookeeper-static',
'kazoo>=0.2b1'
]
web_requires = install_requires + [
'flask',
'raven'
]
tests_require = web_requires + [
'nose',
'unittest2',
'mock',
]
if __name__ == '__main__':
setup(
name=NAME,
version=VERSION,
author='Matthew Hooker',
author_email='mwhooker@gmail.com',
url='https://github.com/disqus/jones',
description='Configuration frontend for Zookeeper.',
license='Apache License 2.0',
py_modules = ['jones.client'],
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
extras_require={
'test': tests_require,
'web': web_requires
},
test_suite='nose.collector',
include_package_data=True,
)
|
Python
| 0.000055
|
@@ -594,16 +594,18 @@
N = '0.2
+.1
'%0ANAME =
|
d611830525e93e1c1a364ed88695d62003490e07
|
Bump version number
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
setup(
name="trajprocess",
version='2.0.4',
packages=find_packages(),
requires=['numpy', 'mdtraj', 'nose'],
zip_safe=False,
include_package_data=True,
)
|
Python
| 0.000002
|
@@ -90,9 +90,9 @@
2.0.
-4
+5
',%0A
|
abfdbaee5f80c7c02436268016718a5362f9083d
|
make setup.py pypi conform
|
setup.py
|
setup.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
version = '0.1.5'
setup(
name='SerpScrap',
version=version,
description='A python module to scrape and extract data like links, titles, descriptions, ratings, from search engine result pages.',
long_description=open('README.md').read(),
author='Ronald Schmidt',
author_email='ronald.schmidt@zu-web.de',
url='https://github.com/ecoron/SerpScrap',
packages=find_packages(),
dependency_links=[
'git+git://github.com/ecoron/GoogleScraper#egg=GoogleScraper'
],
install_requires=[
'GoogleScraper',
'chardet==2.3.0',
'beautifulsoup4==4.4.1',
'html2text==2016.4.2',
],
)
|
Python
| 0.000001
|
@@ -171,16 +171,18 @@
iption='
+''
A python
@@ -258,16 +258,20 @@
ratings,
+%0A
from se
@@ -294,17 +294,40 @@
lt pages
-.
+ %0A and listed urls.''
',%0A l
@@ -485,24 +485,43 @@
SerpScrap',%0A
+ license='MIT',%0A
packages
@@ -776,17 +776,390 @@
6.4.2',%0A
-
%5D,%0A
+ classifiers=%5B%0A 'Development Status :: 1 - Planning',%0A 'Intended Audience :: Developers',%0A 'Topic :: Internet',%0A 'License :: OSI Approved :: MIT License',%0A 'Programming Language :: Python :: 3.3',%0A 'Programming Language :: Python :: 3.4',%0A 'Programming Language :: Python :: 3.5',%0A %5D,%0A keywords='serp url scraper',%0A
)%0A
|
5d36740917dbc08aca8202d4fb5915b4f17a9552
|
Add log level specification to cax
|
cax/main.py
|
cax/main.py
|
import argparse
import logging
import os.path
import time
from cax.config import mongo_password, set_json, get_task_list, get_config
from cax.tasks import checksum, clear, data_mover, process
from cax import __version__
def main():
parser = argparse.ArgumentParser(
description="Copying All kinds of XENON1T data.")
parser.add_argument('--once', action='store_true',
help="Run all tasks just one, then exits")
parser.add_argument('--config', action='store', dest='config_file',
help="Load a custom .json config file into cax")
args = parser.parse_args()
run_once = args.once
# Check passwords and API keysspecified
mongo_password()
# Setup logging
cax_version = 'cax_v%s - ' % __version__
logging.basicConfig(filename='cax.log',
level=logging.INFO,
format=cax_version + '%(asctime)s [%(levelname)s] %(message)s')
logging.info('Daemon is starting')
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
# Get specified cax.json configuration file for cax:
if args.config_file:
if not os.path.isfile(args.config_file):
logging.error("Config file %s not found", args.config_file)
else:
logging.info("Using custom config file: %s",
args.config_file)
set_json(args.config_file)
tasks = [process.ProcessBatchQueue(),
data_mover.SCPPush(),
data_mover.SCPPull(),
checksum.AddChecksum(),
checksum.CompareChecksums(),
clear.ClearDAQBuffer(),
clear.RetryStalledTransfer(),
clear.RetryBadChecksumTransfer(),
]
# Raises exception if unknown host
get_config()
user_tasks = get_task_list()
while True:
for task in tasks:
# Skip tasks that user did not specify
if user_tasks and task.__class__.__name__ not in user_tasks:
continue
logging.info("Executing %s." % task.__class__.__name__)
task.go()
# Decide to continue or not
if run_once:
break
else:
logging.info('Sleeping.')
time.sleep(60)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -496,16 +496,50 @@
'store',
+ type=str,%0A
dest='c
@@ -628,38 +628,319 @@
x%22)%0A
-%0A args = parser.parse_args(
+ parser.add_argument('--log', dest='log', type=str, default='info',%0A help=%22Logging level e.g. debug%22)%0A%0A args = parser.parse_args()%0A%0A log_level = getattr(logging, args.log.upper())%0A if not isinstance(log_level, int):%0A raise ValueError('Invalid log level: %25s' %25 args.log
)%0A%0A
@@ -1172,25 +1172,22 @@
evel=log
-ging.INFO
+_level
,%0A
@@ -1452,17 +1452,14 @@
(log
-ging.INFO
+_level
)%0A%0A
|
8548f3dc25af0a9cee7d34bcfcec2dc4ca5d1a88
|
Edit spam regex
|
findspam.py
|
findspam.py
|
# -*- coding: utf-8 -*-
import re
import phonenumbers
class FindSpam:
rules = [
{'regex': u"(?i)\\b(baba(ji)?|nike|vashi?k[ae]r[ae]n|sumer|kolcak|porn|molvi|judi bola|ituBola.com|lost lover|11s|acai|skin care|swtor2credits|me2.do|black magic|bam2u|Neuro(3X|flexyn)|Nutra|Bowtrol|Forskolin|Blackline Elite|TestCore Pro|Xtreme Antler|fake (passports?|driver'?s? licen[cs]e|ID cards?)|bagprada)\\b|ಌ|(support|service|helpline)( phone)? number|1[ -]?866[ -]?978[ -]?6819", 'all': True,
'sites': [], 'reason': "Bad keyword in {}", 'title': True, 'username': True},
{'regex': u"(?i)\\b(weight (los[es]|reduction)|muscles? build(ing)?|muscles?( (grow(th)?|diets?))?|anti aging|SkinCentric|los[es] weight)\\b", 'all': True,
'sites': ["fitness.stackexchange.com"], 'reason': "Bad keyword in {}", 'title': True, 'username': True},
{'regex': u"(?i)^(?:(?=.*?\\b(?:online|hd)\\b)(?=.*?(?:free|full|unlimited)).*?movies?\\b|(?=.*?\\b(?:acai|kisn)\\b)(?=.*?care).*products?\\b|(?=.*?packer).*mover)", 'all': True,
'sites': [], 'reason': "Bad keywords in {}", 'title': True, 'username': True},
{'regex': u"\\d(?:_*\\d){9}|\\+?\\d_*\\d[\\s\\-]?(?:_*\\d){8,10}|\\d[ -]?\\d{3}[ -]?\\d{3}[ -]?\\d{4}", 'all': True,
'sites': ["patents.stackexchange.com"], 'reason': "Phone number detected", 'validation_method': 'checkphonenumbers', 'title': True, 'username': False},
{'regex': u"(?i)\\b(nigg(a|er)|asshole|crap|fag|fuck(ing?)?|shit|whore)s?\\b", 'all': True,
'sites': [], 'reason': "Offensive {} detected",'insensitive':True, 'title': True, 'username': False},
{'regex': u"^(?=.*[A-Z])[^a-z]*$", 'all': True, 'sites': [], 'reason': "All-caps title", 'title': True, 'username': False},
{'regex': u"^(?=.*[0-9])[^a-zA-Z]*$", 'all': True, 'sites': [], 'reason': "Numbers-only title", 'title': True, 'username': False},
{'regex': u"https?://[a-zA-Z0-9_.-]+\\.[a-zA-Z]{2,4}(/[a-zA-Z0-9_/?=.-])?", 'all': True,
'sites': ["stackoverflow.com", "superuser.com", "askubuntu.com"], 'reason': "URL in title", 'title': True, 'username': False}
]
@staticmethod
def testpost(title, user_name, site):
result = [];
for rule in FindSpam.rules:
if rule['all'] != (site in rule['sites']):
matched_title = re.compile(rule['regex'], re.UNICODE).findall(title)
matched_username = re.compile(rule['regex'], re.UNICODE).findall(user_name)
if matched_title and rule['title']:
try:
if getattr(FindSpam, "%s" % rule['validation_method'])(matched_title):
result.append(rule['reason'])
except KeyError: # There is no special logic for this rule
result.append(rule['reason'].replace("{}", "title"))
if matched_username and rule['username']:
try:
if getattr(FindSpam, "%s" % rule['validation_method'])(matched_username):
result.append(rule['reason'])
except KeyError: # There is no special logic for this rule
result.append(rule['reason'].replace("{}", "username"))
return result
@staticmethod
def checkphonenumbers(matched):
test_formats = [ "IN", "US", None ]
for phone_number in matched:
for testf in test_formats:
try:
z = phonenumbers.parse(phone_number, testf)
if phonenumbers.is_possible_number(z) and phonenumbers.is_valid_number(z):
print "Possible %s, Valid %s, Explain: %s" % (phonenumbers.is_possible_number(z), phonenumbers.is_valid_number(z), z)
return True
except phonenumbers.phonenumberutil.NumberParseException:
pass
return False
|
Python
| 0.000003
|
@@ -282,16 +282,27 @@
Bowtrol%7C
+Slim Genix%7C
Forskoli
|
8649b296e05c432dd3841d8c5dc8d9aebd6d09db
|
update global test script
|
cea/test.py
|
cea/test.py
|
"""
Test all the main scripts in one go - drink coffee while you wait :)
"""
import properties
import demand
import emissions
import embodied
import graphs
properties.test_properties()
demand.test_demand()
emissions.test_lca_operation()
embodied.test_lca_embodied()
graphs.test_graph_demand()
|
Python
| 0
|
@@ -279,16 +279,44 @@
_graph_demand()%0A
+%0Aprint 'full test completed'
|
3112a99c4099842fc599212a606a9e946b84cdd2
|
Add error handling for not having enough files to choose from
|
odc.py
|
odc.py
|
import os
import glob
import random
import smart_open
import pandas as pd
import click
N_POSITIVE = 99684
N_NEGATIVE = 996941
N_TOTAL = N_POSITIVE + N_NEGATIVE
POSITIVE_RATIO = N_POSITIVE / N_TOTAL
def read_submission(file, n_advertise):
df = pd.read_csv(file)
columns = df.columns.values
assert len(columns) == 2, 'There must be only two columns'
assert columns[0] == 'household_id', 'The first column must be household_id'
assert columns[1] == 'advertise', 'The second column must be advertise'
n_found = len(df[df['advertise'] != 0])
assert n_found == n_advertise, 'There must be exactly {} non-zeros and found {}'.format(
n_advertise, n_found)
return df
def read_spends(file):
spend_lookup = {}
total_possible = 0
spenders = set()
with open(file) as f:
for l in f:
hhid, spend = l.strip().split(',')
hhid = int(hhid)
spend = float(spend)
spend_lookup[hhid] = spend
total_possible += spend
if spend > 0:
spenders.add(hhid)
return spend_lookup, total_possible, spenders
def filter_advertise(submission):
return submission[submission['advertise'] != 0]
def compute_revenue(submission, spend_lookup):
revenue = 0
for ix, row in submission.iterrows():
revenue += spend_lookup[row.household_id]
return revenue
def compute_n_responders(submission, spenders):
n_responders = 0
for ix, row in submission.iterrows():
if row.household_id in spenders:
n_responders += 1
return n_responders
@click.group()
def cli():
pass
@cli.command()
@click.option('--ratio', is_flag=True,
help='Score by computing top K using ratio instead of K=100,000')
@click.argument('spend_file')
@click.argument('submission_file')
def score(ratio, spend_file, submission_file):
spend_lookup, total_possible, spenders = read_spends(spend_file)
if ratio:
n_examples = len(spend_lookup)
n_advertise = int(n_examples * POSITIVE_RATIO)
print("Using {} total examples, expecting exactly {} advertisements".format(
n_examples, n_advertise, POSITIVE_RATIO))
else:
n_advertise = 100000
raw_submission = read_submission(submission_file, n_advertise)
filtered_submission = filter_advertise(raw_submission)
revenue = compute_revenue(filtered_submission, spend_lookup)
n_responders = compute_n_responders(filtered_submission, spenders)
print('Revenue:', revenue)
print('Fraction of Possible Revenue:', revenue / total_possible)
print('Number of Responders:', n_responders)
print('Fraction of Possible Responders', n_responders / len(spenders))
def is_positive_example(line):
fields = line.split(',')
return float(fields[1]) != 0
@cli.command()
@click.option('--seed')
@click.argument('n_samples', type=int)
@click.argument('input_path', type=str)
@click.argument('output_path', type=str)
def sample(seed, n_samples, input_path, output_path):
if os.path.isdir(input_path):
print('Directory detected, using all files in directory')
glob_path = os.path.join(input_path, '/*')
files = [smart_open.smart_open(p) for p in glob.glob(glob_path)]
else:
print('Single file detected')
files = [smart_open.smart_open(input_path)]
output = open(output_path, 'w')
if seed is not None:
random.seed(seed)
total_positive = int(n_samples * POSITIVE_RATIO)
total_negative = n_samples - total_positive
print('Finding a total of {} examples, {} positive and {} negative'.format(
n_samples, total_positive, total_negative))
n_positive = 0
n_negative = 0
while n_positive + n_negative < n_samples:
random_file = random.choice(files)
try:
line = next(random_file).decode('utf8')
except StopIteration:
continue
is_positive = is_positive_example(line)
if is_positive and n_positive < total_positive:
output.write(line)
n_positive += 1
continue
if not is_positive and n_negative < total_negative:
output.write(line)
n_negative += 1
continue
for f in files:
f.close()
output.close()
if __name__ == '__main__':
cli()
|
Python
| 0
|
@@ -3774,24 +3774,147 @@
n_samples:%0A
+ if len(files) == 0:%0A raise Exception('There are not enough files to get %7B%7D examples'.format(n_samples))%0A
rand
@@ -3916,20 +3916,21 @@
random_
-file
+index
= rando
@@ -3935,21 +3935,71 @@
dom.
-choice(files)
+randrange(len(files))%0A random_file = files%5Brandom_index%5D
%0A
@@ -4098,32 +4098,79 @@
-continue
+random_file.close()%0A files.pop(random_index)
%0A%0A is
|
747ed560ac6a3854feb0a2c23885b1aa0be61b5f
|
fix some style issues
|
app/main.py
|
app/main.py
|
from google.appengine.api import files
from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from google.appengine.ext import blobstore
import csv
import re
import sys
import os
import webapp2
import jinja2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.join(os.path.dirname(__file__), 'templates')),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class Logfile(ndb.Model):
bk = ndb.BlobKeyProperty()
datadate = ndb.DateTimeProperty(auto_now=True)
@classmethod
def singleton(cls):
return cls.get_or_insert('SINGLE')
class MainPage(webapp2.RequestHandler):
def get(self):
bks = Logfile.singleton()
template_values = {'data_date': bks.datadate,
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
class ReloadLogfile(webapp2.RequestHandler):
def get(self):
data = urlfetch.fetch('https://nethack.devnull.net/tournament/scores.xlogfile', deadline=60).content
savescores(data)
class UniqueDeaths(webapp2.RequestHandler):
def get(self, username):
self.response.headers['Content-Type'] = 'text/plain'
mydeaths = []
possibledeaths=[]
done=set()
with open('death_yes.txt') as deathyes:
for line in deathyes:
possibledeaths.append(re.compile(line.rstrip()+'$'))
scores = readscores()
reader = csv.reader(scores, delimiter=':')
for line in reader:
if username == line[15].split('=')[1]:
mydeaths.append(line[16].split('=')[1].decode('unicode-escape'))
for death in mydeaths:
death = death.replace('(with the Amulet)', '') # the tournament seems to do this; if so it's a bug...
for exp in possibledeaths:
if exp.match(death):
done.add(exp)
self.response.write(str(len(done))+'\n')
tmp = []
for d in possibledeaths:
if d not in done:
tmp.append(d.pattern)
for d in tmp:
self.response.write(d + '\n')
class UniqueRedir(webapp2.RequestHandler):
def post(self):
un = self.request.get('username')
if un is not None:
self.redirect("/unique/" + self.request.get('username'))
else:
self.redirect("/")
application = webapp2.WSGIApplication(
[
('/', MainPage),
(r'/unique/(.*)', UniqueDeaths),
(r'/unique', UniqueRedir),
(r'/reload', ReloadLogfile),
], debug=True)
def readscores():
"""read scores from datastore, return filelike suitable for CSV reader"""
return open('scores.xlogfile', 'rb') # FIXME: actually read from DS
def savescores(data):
"""write scores back to datastore, from string"""
# FIXME: actually save to DS here...
|
Python
| 0.000018
|
@@ -1,43 +1,4 @@
-from google.appengine.api import files%0A
from
@@ -75,51 +75,8 @@
ndb
-%0Afrom google.appengine.ext import blobstore
%0A%0Aim
@@ -98,19 +98,8 @@
re%0A
-import sys%0A
impo
@@ -329,16 +329,17 @@
=True)%0A%0A
+%0A
class Lo
@@ -834,16 +834,17 @@
lues))%0A%0A
+%0A
class Re
@@ -2397,16 +2397,17 @@
t(%22/%22)%0A%0A
+%0A
applicat
@@ -2772,16 +2772,17 @@
rom DS%0A%0A
+%0A
def save
@@ -2890,14 +2890,8 @@
here...%0A
-%0A %0A
|
90c07db3c507e1394cf0a72e73f9c7cc425b20a4
|
return False
|
one.py
|
one.py
|
def one(iterable):
"""
Return X if X is the only one value where bool(i) is True for
each every i in the iterable. In any other case return None.
>>> one((True, False, False))
True
>>> one((True, False, True))
False
>>> one((0, 0, 'a'))
'a'
>>> one((0, False, None))
False
>>> bool(one((True, True)))
False
>>> bool(one((False, True)))
True
"""
iterable = iter(iterable)
for item in iterable:
if item:
break
else:
return False
if any(iterable):
return False
return item
if __name__ == "__main__":
import doctest
doctest.testmod()
|
Python
| 0.999999
|
@@ -23,134 +23,214 @@
%22%22%22
-%0A
Return
-X if X is
+the object in
the
-only one value where bool(i) is
+given iterable that evaluates to
True
- for
+.%0A
%0A
-each every i in the iterable. In any other case
+If the given iterable has more than one object that evaluates to True,%0A or if there is no object that fulfills such condition,
return
None
@@ -225,19 +225,20 @@
return
-Non
+Fals
e.%0A%0A
@@ -396,29 +396,24 @@
lse%0A %3E%3E%3E
-bool(
one((True, T
@@ -413,25 +413,24 @@
True, True))
-)
%0A False%0A
@@ -446,27 +446,21 @@
ol(one((
-False, True
+'', 1
)))%0A
@@ -722,8 +722,9 @@
estmod()
+%0A
|
6726af1a15c3b64ea9cbb68e18a7983477713842
|
Update 0.91
|
src/cerberus_ac/admin.py
|
src/cerberus_ac/admin.py
|
# -*- coding: utf-8 -*-
"""Admin module."""
# from django.contrib import admin
# from django.contrib.admin.sites import AdminSite
#
# from cerberus_ac.views import EditUserPermissions
# from .models import *
#
#
# class SecurityAdmin(AdminSite):
# pass
#
# class DataAdmin(AdminSite):
# pass
#
# class AuditAdmin(AdminSite):
# pass
#
# security_admin_site = SecurityAdmin(name='SecurityAdmin')
# data_admin_site = DataAdmin(name='DataAdmin')
# audit_admin_site = AuditAdmin(name='AuditAdmin')
#
# # Security Admin Pages
# # Logs
# @security_admin_site.register(AccessHistory)
# class ObjectAccessHistoryAdmin(admin.ModelAdmin):
# pass
#
# @security_admin_site.register(PrivilegeHistory)
# class PrivChangesHistoryAdmin(admin.ModelAdmin):
# pass
#
# # User Permissions
# @security_admin_site.register(RolePrivilege)
# class PermissionsAdmin(admin.ModelAdmin):
# pass
from cerberus_ac.views import EditUserPermissions
from .models import *
class SecurityAdmin(AdminSite):
pass
class DataAdmin(AdminSite):
pass
class AuditAdmin(AdminSite):
pass
security_admin_site = SecurityAdmin(name='SecurityAdmin')
data_admin_site = DataAdmin(name='DataAdmin')
audit_admin_site = AuditAdmin(name='AuditAdmin')
# # Security Admin Pages
# # Logs
# @security_admin_site.register(AccessHistory)
# class ObjectAccessHistoryAdmin(admin.ModelAdmin):
# pass
#
# @security_admin_site.register(PrivilegeHistory)
# class PrivChangesHistoryAdmin(admin.ModelAdmin):
# pass
#
# # User Permissions
# @security_admin_site.register(RolePrivilege)
# class PermissionsAdmin(admin.ModelAdmin):
# pass
# Data Admin Pages
|
Python
| 0
|
@@ -43,96 +43,8 @@
%22%22%0A%0A
-# from django.contrib import admin%0A# from django.contrib.admin.sites import AdminSite%0A#%0A
# fr
@@ -91,16 +91,16 @@
issions%0A
+
# from .
@@ -157,32 +157,33 @@
e):%0A# pass%0A#
+
%0A# class DataAdm
@@ -362,16 +362,16 @@
Admin')%0A
+
# audit_
@@ -416,748 +416,8 @@
in')
-%0A#%0A# # Security Admin Pages%0A# # Logs%0A# @security_admin_site.register(AccessHistory)%0A# class ObjectAccessHistoryAdmin(admin.ModelAdmin):%0A# pass%0A#%0A# @security_admin_site.register(PrivilegeHistory)%0A# class PrivChangesHistoryAdmin(admin.ModelAdmin):%0A# pass%0A#%0A# # User Permissions%0A# @security_admin_site.register(RolePrivilege)%0A# class PermissionsAdmin(admin.ModelAdmin):%0A# pass%0A%0Afrom cerberus_ac.views import EditUserPermissions%0Afrom .models import *%0A%0A%0Aclass SecurityAdmin(AdminSite):%0A pass%0A%0Aclass DataAdmin(AdminSite):%0A pass%0A%0Aclass AuditAdmin(AdminSite):%0A pass%0A%0Asecurity_admin_site = SecurityAdmin(name='SecurityAdmin')%0Adata_admin_site = DataAdmin(name='DataAdmin')%0Aaudit_admin_site = AuditAdmin(name='AuditAdmin')
%0A%0A#
|
b16c86b418e1c706bf25347d276d25876637d89b
|
add failing test for customized type deep in type hierarchy
|
spyne/test/interface/test_interface.py
|
spyne/test/interface/test_interface.py
|
#!/usr/bin/env python
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import unittest
from spyne.application import Application
from spyne.decorator import rpc
from spyne.model import Array
from spyne.model import ComplexModel
from spyne.model import AnyXml, Uuid
from spyne.model import UnsignedLong
from spyne.model import UnsignedInteger16
from spyne.model import Integer
from spyne.model import DateTime
from spyne.model import Unicode
from spyne.protocol.http import HttpRpc
from spyne.protocol.soap import Soap11
from spyne.service import ServiceBase
class TestInterface(unittest.TestCase):
def test_imports(self):
import logging
logging.basicConfig(level=logging.DEBUG)
class KeyValuePair(ComplexModel):
__namespace__ = "1"
key = Unicode
value = Unicode
class Something(ComplexModel):
__namespace__ = "2"
d = DateTime
i = Integer
class SomethingElse(ComplexModel):
__namespace__ = "3"
a = AnyXml
b = UnsignedLong
s = Something
class BetterSomething(Something):
__namespace__ = "4"
k = UnsignedInteger16
class Service1(ServiceBase):
@rpc(SomethingElse, _returns=Array(KeyValuePair))
def some_call(ctx, sth):
pass
class Service2(ServiceBase):
@rpc(BetterSomething, _returns=Array(KeyValuePair))
def some_other_call(ctx, sth):
pass
application = Application([Service1, Service2],
in_protocol=HttpRpc(),
out_protocol=Soap11(),
name='Service', tns='target_namespace'
)
imports = application.interface.imports
tns = application.interface.get_tns()
smm = application.interface.service_method_map
print(imports)
assert imports[tns] == set(['1','3','4'])
assert imports['3'] == set(['2'])
assert imports['4'] == set(['2'])
assert smm['{%s}some_call' % tns]
assert smm['{%s}some_call' % tns][0].service_class == Service1
assert smm['{%s}some_call' % tns][0].function == Service1.some_call
assert smm['{%s}some_other_call' % tns]
assert smm['{%s}some_other_call' % tns][0].service_class == Service2
assert smm['{%s}some_other_call' % tns][0].function == Service2.some_other_call
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
@@ -3158,16 +3158,651 @@
r_call%0A%0A
+ def test_empty(self):%0A RequestStatus = Unicode(values=%5B'new', 'processed'%5D)%0A%0A class RequestUnsigned(ComplexModel):%0A pass%0A%0A class DataRequest(RequestUnsigned):%0A operator = Uuid%0A status = Array(RequestStatus)%0A%0A class HelloWorldService(ServiceBase):%0A @rpc(DataRequest)%0A def some_call(ctx, dgrntcl):%0A pass%0A%0A Application(%5BHelloWorldService%5D, 'spyne.examples.hello.soap',%0A in_protocol=Soap11(validator='lxml'),%0A out_protocol=Soap11())%0A%0A # test passes if instantiating Application doesn't fail%0A%0A%0A
if __nam
|
baab52477f637364f0a1a974b4ee13114c667bca
|
allow multiple encodings in headers (i.e. "From: =?iso-8859-2?Q?...?= <email@address.com>")
|
cia-mail.py
|
cia-mail.py
|
#!/usr/local/bin/python
#
# Copyright (C) Merlijn van Deen <valhallasw@gmail.com>, 2009
#
# Distributed under the terms of the MIT license.
#
import sys, time
from email.Parser import Parser
from email.Header import Header, decode_header
from xml.sax.saxutils import escape
from xmlrpclib import ServerProxy
e = Parser().parse(sys.stdin)
# Stupid email library. This parses all headers into nice unicode strings...
headers = dict([(header, Header(*decode_header(e[header])[0]).__unicode__()) for header in e.keys()])
author = headers['From']
author = author[:author.find('<')].strip() # remove email address
author = author.strip("\"\'")
subject = headers['Subject']
subject = subject.replace('\n', ' ')
message = """
<message>
<generator>
<name>CIA Python client for mail</name>
<version>0.2</version>
</generator>
<source>
<project>%(project)s</project>
</source>
<timestamp>%(timestamp)s</timestamp>
<body>
<commit>
<author>%(author)s</author>
<log>%(subject)s</log>
</commit>
</body>
</message>""" % {
'project' : escape(sys.argv[1]),
'timestamp': int(time.time()),
'author' : escape(author.encode('utf-8')),
'subject' : escape(subject.encode('utf-8'))
}
print message
print ServerProxy('http://cia.vc/RPC2').hub.deliver(message)
|
Python
| 0.000001
|
@@ -440,16 +440,89 @@
er,
-Header(*
+' '.join(%5Btext.decode(encoding if encoding else 'ascii') for (text, encoding) in
deco
@@ -545,25 +545,9 @@
er%5D)
-%5B0%5D).__unicode__(
+%5D
)) f
|
8453607c1fb1cb1835bc1323f4c59366015e93fe
|
Create a command-line vulgarizer
|
estimate.py
|
estimate.py
|
import sys
from fractions import Fraction
from math import log10
digits = sys.argv[1]
print("Estimating %s as a fraction..." % digits)
def vulgarize(rpt):
"""Calculate a vulgar fraction for a given continued fraction"""
f = Fraction(0)
if tuple(rpt) == (0,): return f # Avoid dividing by zero
for term in reversed(rpt):
f = 1 / (term + f)
return 1/f
def magnitude(x):
"""Give an indication of the magnitude of a number
Bigger numbers have bigger magnitudes, and you can see the direction
of the number in the result (so -4 is further from zero than +2 is).
"""
if x < 0: return -log10(-x)
if x == 0: return 0
return log10(x)
frac = []
orig = Fraction(digits)
residue = 1/orig
while residue:
t = 1/residue
frac.append(int(t))
residue = t - int(t)
vulg = vulgarize(frac)
error = magnitude(vulg - orig)
print(f"%{len(digits)*2}s %+6.2f %r" % (vulg, error, frac))
|
Python
| 0.000309
|
@@ -63,79 +63,8 @@
10%0A%0A
-digits = sys.argv%5B1%5D%0Aprint(%22Estimating %25s as a fraction...%22 %25 digits)%0A%0A
def
@@ -566,16 +566,232 @@
g10(x)%0A%0A
+digits = sys.argv%5B1%5D%0Aif %22,%22 in digits:%0A%09digits = %5Bint(d.strip()) for d in digits.split(%22,%22)%5D%0A%09frac = vulgarize(digits)%0A%09print(frac, digits, float(frac))%0A%09sys.exit(0)%0Aprint(%22Estimating %25s as a fraction...%22 %25 digits)%0A%0A
frac = %5B
|
0e2ef0a70fa6627c0eb4a292e69d3ed1f8500f36
|
Add the ability to graph the results
|
estimate.py
|
estimate.py
|
import sys
from fractions import Fraction
from math import log10
def vulgarize(rpt):
"""Calculate a vulgar fraction for a given continued fraction"""
f = Fraction(0)
if tuple(rpt) == (0,): return f # Avoid dividing by zero
for term in reversed(rpt):
f = 1 / (term + f)
return 1/f
def magnitude(x):
"""Give an indication of the magnitude of a number
Bigger numbers have bigger magnitudes, and you can see the direction
of the number in the result (so -4 is further from zero than +2 is).
"""
if x < 0: return -log10(-x)
if x == 0: return 0
return log10(x)
digits = sys.argv[1]
if "," in digits:
digits = [int(d.strip()) for d in digits.split(",")]
frac = vulgarize(digits)
print(frac, digits, float(frac))
sys.exit(0)
print("Estimating %s as a fraction..." % digits)
frac = []
orig = Fraction(digits)
residue = 1/orig
while residue:
t = 1/residue
frac.append(int(t))
residue = t - int(t)
vulg = vulgarize(frac)
error = magnitude(vulg - orig)
print(f"%{len(digits)*2}s %+6.2f %r" % (vulg, error, frac))
|
Python
| 0.000004
|
@@ -833,16 +833,30 @@
1/orig%0A
+accuracy = %5B%5D%0A
while re
@@ -1033,12 +1033,1099 @@
ror, frac))%0A
+%09if vulg != orig:%0A%09%09# Estimate the accuracy by showing, in effect, how many%0A%09%09# correct digits there are before there's an error.%0A%09%09# (Accuracy becomes immeasurable for the last term.)%0A%09%09accuracy.append(-log10(abs(vulg - orig)))%0A%0Aif %22--graph%22 in sys.argv:%0A%09import matplotlib.pyplot as plt%0A%09# Convert accuracy into accuracy-gained-last-time%0A%09# From three terms %5Ba, b, c%5D, we look at the accuracy gained by%0A%09# adding term b, and then plot that alongside c.%0A%09from operator import sub%0A%09accuracy = %5B0%5D + list(map(sub, accuracy, %5B0%5D + accuracy%5B:-1%5D))%0A%09# Different y-scales - see https://matplotlib.org/gallery/api/two_scales.html%0A%09fig, ax1 = plt.subplots()%0A%09ax1.set_xlabel(%22N Terms%22)%0A%09ax1.set_ylabel(%22Term%22, color=%22tab:red%22)%0A%09ax1.set_yscale(%22log%22) # Since accuracy is already, in effect, logarithmic, do the same here.%0A%09ax1.plot(frac, color=%22tab:red%22)%0A%09ax1.tick_params(axis=%22y%22, labelcolor=%22tab:red%22)%0A%09ax2 = ax1.twinx()%0A%09ax2.set_ylabel(%22Accuracy gained%22, color=%22tab:blue%22)%0A%09ax2.plot(accuracy, color=%22tab:blue%22)%0A%09ax2.tick_params(axis=%22y%22, labelcolor=%22tab:blue%22)%0A%09fig.tight_layout()%0A%09plt.show()%0A
|
a54cb5529e5611b2d21c837d5422e31abd8d2819
|
Add :q alias for quit
|
placidity/commands/quit/quit.py
|
placidity/commands/quit/quit.py
|
class Quit:
aliases = ('quit', 'quit()', )
description = 'Quits the application'
def execute(self):
raise SystemExit
|
Python
| 0
|
@@ -37,16 +37,22 @@
quit()',
+ ':q',
)%0A d
|
e2d009c2e64340d101319824af1130bb92b4b021
|
Add debug logger method to utils
|
app/util.py
|
app/util.py
|
import os
def chown(username, path, destination):
"""Set owner and group of file to that of the parent directory."""
s = os.stat(path)
os.chown(os.path.join(path, destination), s.st_uid, s.st_gid)
def construct_path(path, format, *args):
"""Constructs a path using locally available variables."""
return os.path.join(path.format(**format), *args)
|
Python
| 0.000001
|
@@ -362,8 +362,244 @@
*args)%0A
+%0Adef logger(config):%0A %22%22%22 Returns a logger if development mode, else a no-op. %22%22%22%0A def log(message):%0A print('%5BDebug%5D: %7B%7D'.format(message))%0A%0A if config%5B'DEBUG'%5D:%0A return log%0A else:%0A return lambda x: None%0A
|
cf6ddfdac8a56194ad1297921a390be541d773cc
|
Remove last digit of version number if it's 0.
|
app_info.py
|
app_info.py
|
# coding=UTF8
import datetime
name = "Devo"
release_date = datetime.date(2012, 12, 13)
version = (1, 0, 0)
version_string = ".".join(str(x) for x in version)
identifier = "com.iogopro.devo"
copyright = u"Copyright © 2010-2012 Luke McCarthy"
developer = "Developer: Luke McCarthy <luke@iogopro.co.uk>"
company_name = "Iogopro Software"
url = "http://iogopro.com/devo"
|
Python
| 0.000158
|
@@ -147,23 +147,61 @@
or x in
+(
version
+ if version%5B2%5D != 0 else version%5B:2%5D)
)%0A%0Aident
|
206715b0d205adca9598b07b9fb57063e5b4a220
|
Bump VERSION_INT up
|
src/constants.py
|
src/constants.py
|
#
# Copyright 2013 TeamSWAP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import wx, os, sys
VERSION = "0.7.0"
VERSION_INT = 12
URL_CHECK = "http://faultexception.com/swap/check_updates.php"
PARSER_SERVER_ADDR = ("swapserver.no-ip.biz", 57680)
NODE_SERVER_ADDR = "swapserver.no-ip.biz:57681"
IS_COMPILED = 'frozen' in dir(sys)
# Static Messages
MSG_FAILED_KEY_GENERATION_TEXT = "Failed to generate new key! Please report this to the developer."
MSG_FAILED_KEY_GENERATION_TITLE = "Error"
MSG_FAILED_JOIN_INVALID_KEY_TEXT = "No such key was found"
MSG_FAILED_JOIN_INVALID_KEY_TITLE = "Error"
MSG_FAILED_JOIN_UPDATE_REQUIRED_TEXT = "Your SWAP client is too old! Please close SWAP and run the SWAP shortcut (if installed), or run updater.exe"
MSG_FAILED_JOIN_UPDATE_REQUIRED_TITLE = "Error"
MSG_COMBAT_LOGGING_DISABLED_TEXT = "Combat logging is disabled. If SWTOR is running please enter enable Combat Logging in Preferences. If SWTOR is not running, click OK and we'll take care of it for you."
MSG_COMBAT_LOGGING_DISABLED_TITLE = "Whoops!"
# Menu ID
MENU_ID_EXIT = wx.NewId()
MENU_ID_PREFERENCES = wx.NewId()
MENU_ID_OVERLAY_DARK = wx.NewId()
MENU_ID_OVERLAY_SIZE_TO_GRID = wx.NewId()
MENU_ID_OVERLAY_SNAP = wx.NewId()
MENU_ID_OVERLAY_RESET = wx.NewId()
MENU_ID_OVERLAY_CLOSE = wx.NewId()
# Menu Title
MENU_TITLE_EXIT = "Exit"
MENU_TITLE_PREFERENCES = "Preferences"
MENU_TITLE_OVERLAY_DARK = "Dark Overlays"
MENU_TITLE_OVERLAY_SIZE_TO_GRID = "Size Overlays to Grid"
MENU_TITLE_OVERLAY_SNAP = "Snap Overlays to Edges"
MENU_TITLE_OVERLAY_RESET = "Reset Overlays"
MENU_TITLE_OVERLAY_CLOSE = "Close Overlays"
# Menu Tip
MENU_TIP_EXIT = "Exits the program."
MENU_TIP_PREFERENCES = "Change program settings"
MENU_TIP_OVERLAY_DARK = "Toggles dark theme for overlays"
MENU_TIP_OVERLAY_SIZE_TO_GRID = "Toggles sizing overlays to grid"
MENU_TIP_OVERLAY_SNAP = "Toggles snapping overlays to screen edges"
MENU_TIP_OVERLAY_RESET = "Reset all overlays' position and size"
MENU_TIP_OVERLAY_CLOSE = "Close all open overlays"
MENU_TIP_OVERLAY_SELECT = "Toggle selected overlay"
|
Python
| 0.000001
|
@@ -641,17 +641,17 @@
_INT = 1
-2
+5
%0D%0AURL_CH
|
1e7c95ee7d920a5d0f312608b323c7449ca4fe1c
|
Bump version.
|
floobits.py
|
floobits.py
|
#!/usr/bin/env python
# coding: utf-8
import os
from floo import emacs_handler
from floo.common import migrations
from floo.common import reactor
from floo.common import utils
from floo.common import shared as G
def cb(port):
print('Now listening on %s' % port)
def main():
G.__VERSION__ = '0.11'
G.__PLUGIN_VERSION__ = '1.5.5'
utils.reload_settings()
if not os.path.exists(G.FLOORC_JSON_PATH):
migrations.migrate_floorc()
utils.reload_settings()
migrations.rename_floobits_dir()
migrations.migrate_symlinks()
try:
utils.normalize_persistent_data()
except Exception:
pass
emacs = emacs_handler.EmacsHandler()
G.emacs = emacs
_, port = reactor.reactor.listen(emacs)
utils.set_timeout(cb, 100, port)
reactor.reactor.block()
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -335,17 +335,17 @@
= '1.5.
-5
+6
'%0A ut
|
5950997c8925804338f224f1278c3018479dab09
|
scale pixels to 16 shades
|
ppp.py
|
ppp.py
|
#! /usr/bin/python
"""
ppp.py
peggy.pi.pic
Take picture with Raspberry Pi camera and then display
as 25 x 25 pixel image (16 shades) on Peggy2
"""
# http://picamera.readthedocs.org/en/release-1.9/recipes1.html#capturing-to-a-pil-image
import io
import time
import picamera
from PIL import Image
# Create the in-memory stream
stream = io.BytesIO()
with picamera.PiCamera() as camera:
camera.hflip = True
camera.vflip = True
camera.start_preview()
time.sleep(2)
camera.capture(stream, format='bmp')
# "Rewind" the stream to the beginning so we can read its content
stream.seek(0)
image = Image.open(stream)
#crop square
image = image.crop((280,0,1000,720))
#convert to grey
image = image.convert('L')
# # test - show image
# image.show()
image.thumbnail((25,25))
# # save image to file as test
# imgout = open('/home/pi/temp.bmp', 'w')
# image.save(imgout)
# imgout.close()
#
pxls = list(image.getdata())
# convert pixels to 16 levels from 256
for i, p in enumerate(pxls):
pxls[i] = p//16
# look at pixel values in 25 x 25 array
i = 0
for p in pxls:
print p,
if i % 25 == 24:
print '\n'
i += 1
|
Python
| 0.000001
|
@@ -723,283 +723,350 @@
L')%0A
-%0A# # test - show image%0A# image.show()%0A%0Aimage.thumbnail((25,25))%0A%0A# # save image to file as test%0A# imgout = open('/home/pi/temp.bmp', 'w')%0A# image.save(imgout)%0A# imgout.close()%0A#%0A
+image.thumbnail((25,25))%0Apxls = list(image.getdata())%0A%0A# convert pixels to 16 levels from 256%0A# note: may want to check range of values and rescale%0A# in order to preserve as much info as possible%0Amax
pxl
-s
=
-list(image.getdata())%0A%0A# convert pixels to 16 levels from 256%0Afor i, p in enumerate(pxls):
+max(list)%0Aminpxl = min(list)%0Adeltapxl = maxpxl - minpxl%0A%0Afor i, p in enumerate(pxls):%0A scaledpxl = (pxls%5Bi%5D - minpxl) * 255 / deltapxl
%0A
@@ -1080,15 +1080,26 @@
%5D =
-p
+scaledpxl
//16%0A%0A
+%0A#
# lo
@@ -1138,14 +1138,18 @@
ray%0A
+#
i = 0%0A
+#
for
@@ -1159,20 +1159,22 @@
n pxls:%0A
+#
+
print p,
@@ -1174,16 +1174,18 @@
rint p,%0A
+#
if i
@@ -1201,16 +1201,17 @@
24:%0A
+#
prin
@@ -1202,24 +1202,25 @@
4:%0A#
+
print '%5Cn'%0A
@@ -1218,16 +1218,18 @@
nt '%5Cn'%0A
+#
i +=
@@ -1231,8 +1231,178 @@
i += 1%0A
+%0Aimage.putdata(pxl, scale = 16) #scale by 16 for regular display%0A# # save image to file as test%0Aimgout = open('/home/pi/temp.bmp', 'w')%0Aimage.save(imgout)%0Aimgout.close()%0A
|
a9de7732dc442df94770d91f8bef9eac45aea7de
|
Disable bytecode writing for testing
|
test/test.py
|
test/test.py
|
#!/usr/bin/env python
'''CSS Property Sorter Script Unittest
Copyright (c) 2012 Yu-Jie Lin
Author: Yu-Jie Lin <livibetter@gmail.com>, http://yjl.im
License: MIT license (http://opensource.org/licenses/mit-license.php)
'''
from __future__ import print_function
import glob
import os.path
import unittest
import sys
sys.path.append('..')
from sortcss import make_parser, sort_properties as SP
def F(filename):
with open(filename) as f:
return f.read()
class TestFile(unittest.TestCase):
def __init__(self, basename, n, args_line, args, methodName='runTest'):
super(TestFile, self).__init__(methodName)
self.basename = basename
self.n = n
self.args_line = args_line
self.args = args
self.source = basename + '-0.css'
self.expect = '%s-%s.css' % (basename, n)
# For appending text after default error message
self.longMessage = True
def runTest(self):
err_msg = '\n' \
'* Source : %s\n' \
'* Expect : %s\n' \
'* Arg line: %s\n' \
'* Argparse: %s' \
% (self.source, self.expect, self.args_line, self.args)
self.assertMultiLineEqual(SP(F(self.source), self.args),
F(self.expect),
err_msg)
if __name__ == '__main__':
parser = make_parser()
suite = unittest.TestSuite()
for src in glob.glob('case/test-????-0.css'):
basename = src.replace('-0.css', '')
arg = basename + '.arg'
add_one = True
if os.path.exists(arg):
with open(arg) as f:
for line in f:
n, args_line = line.rstrip('\n').split(' ', 1)
if n == '1':
add_one = False
args = parser.parse_args(args_line.split())
testcase = TestFile(basename, n, args_line, args)
suite.addTest(testcase)
if add_one:
suite.addTest(TestFile(basename, '1', '', parser.parse_args('')))
unittest.TextTestRunner().run(suite)
|
Python
| 0.000001
|
@@ -335,16 +335,47 @@
d('..')%0A
+sys.dont_write_bytecode = True%0A
from sor
@@ -421,16 +421,48 @@
s as SP%0A
+sys.dont_write_bytecode = False%0A
%0A%0Adef F(
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.