commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
c93b2ba0ed45aeeb8d82c8e04f6a2f5197ba732b
|
refactor rabbitmq consumer
|
spider/rpc.py
|
spider/rpc.py
|
# -*- coding: utf-8 -*-
import logging
import sys
import time
import json
from multiprocessing import Process
import pika
from scrapy.utils.project import get_project_settings
from task import crawl, gen_lxmlspider, gen_blogspider
settings = get_project_settings()
def cron(ch, method, properties, body):
logger = logging.getLogger(__name__)
args = json.loads(body)
p = Process(target=crawl,
args=(args,))
logger.info('cron task starting ...')
p.daemon = True
p.start()
def lxmlspider(ch, method, properties, body):
args = json.loads(body)
gen_lxmlspider(args)
def blogspider(ch, method, properties, body):
args = json.loads(body)
gen_blogspider(args)
def task(callback, key):
url = '{}?heartbeat=3600'.format(settings['BROKER_URL'])
connection = pika.BlockingConnection(pika.connection.URLParameters(url))
channel = connection.channel()
channel.exchange_declare(exchange='direct_logs',
type='direct')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange='direct_logs',
queue=queue_name,
routing_key=key)
channel.basic_consume(callback,
queue=queue_name,
no_ack=True)
channel.start_consuming()
def init_logger(settings):
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(settings['LOG_LEVEL'])
handler.setFormatter(logging.Formatter(settings['LOG_FORMAT'],
settings['LOG_DATEFORMAT']))
root.addHandler(handler)
def main():
init_logger(settings)
logger = logging.getLogger(__name__)
TASKS = [(cron, settings['CRAWL_KEY']),
(lxmlspider, settings['LXMLSPIDER_KEY']),
(blogspider, settings['BLOGSPIDER_KEY'])]
consumers = [(Process(target=task,
args=_),
_) for _ in TASKS]
time.sleep(60)
for p, _ in consumers:
p.start()
logger.info('rpc task running ...')
while True:
for i, (p, args) in enumerate(consumers):
logger.info('check task state ...')
if not p.is_alive():
logger.error((
'function {} got exception'
).format(TASKS[i][0].__name__))
p.join()
np = Process(target=task,
args=args)
np.start()
consumers[i] = (np, args)
time.sleep(120)
if __name__ == '__main__':
main()
|
Python
| 0.999999
|
@@ -49,19 +49,30 @@
sys%0A
+from time
import
-time
+sleep
%0Aimp
@@ -80,16 +80,63 @@
rt json%0A
+from functools import partial%0Aimport threading%0A
from mul
@@ -334,12 +334,31 @@
ef c
-r
on
-(
+sume(callback, jobs,
ch,
@@ -477,20 +477,23 @@
target=c
-rawl
+allback
,%0A
@@ -537,17 +537,14 @@
fo('
-cron task
+%7B%7D job
sta
@@ -549,24 +549,50 @@
tarting ...'
+.format(callback.__name__)
)%0A p.daem
@@ -619,208 +619,25 @@
t()%0A
-%0A%0Adef lxmlspider(ch, method, properties, body):%0A args = json.loads(body)%0A gen_lxmlspider(args)%0A%0A%0Adef blogspider(ch, method, properties, body):%0A args = json.loads(body)%0A gen_blogspider(args
+ jobs.append(p
)%0A%0A%0A
@@ -657,24 +657,38 @@
back, key):%0A
+ jobs = %5B%5D%0A
url = '%7B
@@ -1161,16 +1161,147 @@
ey=key)%0A
+%0A def _process_data_events(connection, channel, queue_name, callback):%0A callback_ = partial(consume, callback, jobs)%0A
chan
@@ -1318,34 +1318,39 @@
consume(callback
+_
,%0A
+
@@ -1369,32 +1369,36 @@
eue=queue_name,%0A
+
@@ -1428,31 +1428,440 @@
-channel.start_consuming
+ while True:%0A connection.process_data_events()%0A for j in jobs:%0A if not j.is_alive():%0A j.join()%0A sleep(120)%0A t = threading.Thread(target=_process_data_events,%0A args=(connection,%0A channel,%0A queue_name,%0A callback))%0A t.setDaemon(True)%0A t.start
()%0A%0A
@@ -2309,18 +2309,19 @@
S = %5B(cr
-on
+awl
, settin
@@ -2352,16 +2352,20 @@
(
+gen_
lxmlspid
@@ -2411,16 +2411,20 @@
(
+gen_
blogspid
@@ -2567,29 +2567,24 @@
TASKS%5D%0A
-time.
sleep(60)%0A
@@ -3136,13 +3136,8 @@
-time.
slee
|
d4a2632a0dcdd6731a5930f321135ec7f9864460
|
Use new API, which requires being explicit about tracking ODF model.
|
AFQ/tests/test_tractography.py
|
AFQ/tests/test_tractography.py
|
import os.path as op
import numpy as np
import numpy.testing as npt
import nibabel.tmpdirs as nbtmp
from AFQ.csd import fit_csd
from AFQ.dti import fit_dti
from AFQ.tractography import track
from AFQ.utils.testing import make_tracking_data
seeds = np.array([[-80., -120., -60.],
[-81, -121, -61],
[-81, -120, -60]])
tmpdir = nbtmp.InTemporaryDirectory()
fbval = op.join(tmpdir.name, 'dti.bval')
fbvec = op.join(tmpdir.name, 'dti.bvec')
fdata = op.join(tmpdir.name, 'dti.nii.gz')
make_tracking_data(fbval, fbvec, fdata)
min_length = 20
step_size = 0.5
def test_csd_tracking():
for sh_order in [4, 8, 10]:
fname = fit_csd(fdata, fbval, fbvec,
response=((0.0015, 0.0003, 0.0003), 100),
sh_order=8, lambda_=1, tau=0.1, mask=None,
out_dir=tmpdir.name)
for directions in ["det", "prob"]:
sl = track(fname, directions,
max_angle=30.,
sphere=None,
seed_mask=None,
n_seeds=seeds,
stop_mask=None,
step_size=step_size,
min_length=min_length)
npt.assert_(len(sl[0]) >= step_size * min_length)
def test_dti_tracking():
fdict = fit_dti(fdata, fbval, fbvec)
for directions in ["det", "prob"]:
sl = track(fdict['params'],
directions,
max_angle=30.,
sphere=None,
seed_mask=None,
n_seeds=1,
step_size=step_size,
min_length=min_length)
npt.assert_(len(sl[0]) >= min_length * step_size)
|
Python
| 0
|
@@ -946,32 +946,72 @@
me, directions,%0A
+ odf_model=%22CSD%22,%0A
|
e1ed03b278c699f0a55319ad8baf69989e5a62d6
|
Check timestamps from session_client
|
lastuser_ui/views/dashboard.py
|
lastuser_ui/views/dashboard.py
|
# -*- coding: utf-8 -*-
from functools import wraps
from collections import defaultdict
from cStringIO import StringIO
import unicodecsv
from flask import g, current_app, abort, render_template
from lastuser_core.models import db, User, USER_STATUS
from .. import lastuser_ui
def requires_dashboard(f):
"""
Decorator to require a login for the given view.
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if not g.user or g.user.userid not in current_app.config.get('DASHBOARD_USERS', []):
abort(403)
return f(*args, **kwargs)
return decorated_function
@lastuser_ui.route('/dashboard')
@requires_dashboard
def dashboard():
user_count = User.query.filter_by(status=USER_STATUS.ACTIVE).count()
mau = db.session.query('mau').from_statement(db.text(
'''SELECT COUNT(DISTINCT(user_session.user_id)) AS mau FROM user_session, "user" WHERE user_session.user_id = "user".id AND "user".status = :status AND user_session.accessed_at >= (NOW() AT TIME ZONE 'UTC') - INTERVAL '30 days' '''
)).params(status=USER_STATUS.ACTIVE).first()[0]
return render_template('dashboard.html',
user_count=user_count,
mau=mau
)
@lastuser_ui.route('/dashboard/data/users_by_month.csv')
@requires_dashboard
def dashboard_data_users_by_month():
users_by_month = db.session.query('month', 'count').from_statement(db.text(
'''SELECT date_trunc('month', "user".created_at) AS month, count(*) AS count FROM "user" WHERE status=:status GROUP BY month ORDER BY month'''
)).params(status=USER_STATUS.ACTIVE)
outfile = StringIO()
out = unicodecsv.writer(outfile, 'excel')
out.writerow(['month', 'count'])
for month, count in users_by_month:
out.writerow([month.strftime('%Y-%m-%d'), count])
return outfile.getvalue(), 200, {'Content-Type': 'text/plain'}
@lastuser_ui.route('/dashboard/data/users_by_client.csv')
@requires_dashboard
def dashboard_data_users_by_client():
client_users = defaultdict(lambda: {'counts': {'hour': 0, 'day': 0, 'week': 0, 'month': 0, 'quarter': 0, 'halfyear': 0, 'year': 0}})
for label, interval in (
('hour', '1 hour'),
('day', '1 day'),
('week', '1 week'),
('month', '1 month'),
('quarter', '3 months'),
('halfyear', '6 months'),
('year', '1 year'),
):
clients = db.session.query('client_id', 'count', 'title', 'website').from_statement(db.text(
'''SELECT client_users.client_id, count(*) AS count, client.title AS title, client.website AS website FROM (SELECT user_session.user_id, session_client.client_id FROM user_session, session_client, "user" WHERE user_session.user_id = "user".id AND session_client.user_session_id = user_session.id AND "user".status = :status AND user_session.accessed_at >= (NOW() AT TIME ZONE 'UTC') - INTERVAL :interval GROUP BY session_client.client_id, user_session.user_id) AS client_users, client WHERE client.id = client_users.client_id GROUP by client_users.client_id, client.title, client.website ORDER BY count DESC'''
)).params(status=USER_STATUS.ACTIVE, interval=interval).all()
for row in clients:
client_users[row.client_id]['title'] = row.title
client_users[row.client_id]['website'] = row.website
client_users[row.client_id]['id'] = row.client_id
client_users[row.client_id]['counts'][label] = row.count - sum(client_users[row.client_id]['counts'].values())
users_by_client = sorted(client_users.values(), key=lambda r: sum(r['counts'].values()), reverse=True)
outfile = StringIO()
out = unicodecsv.writer(outfile, 'excel')
out.writerow(['title', 'hour', 'day', 'week', 'month', 'quarter', 'halfyear', 'year'])
for row in users_by_client:
out.writerow([
row['title'],
row['counts']['hour'],
row['counts']['day'],
row['counts']['week'],
row['counts']['month'],
row['counts']['quarter'],
row['counts']['halfyear'],
row['counts']['year']
])
return outfile.getvalue(), 200, {'Content-Type': 'text/plain'}
|
Python
| 0.000001
|
@@ -2852,35 +2852,36 @@
tus AND
-user_
session
-.access
+_client.updat
ed_at %3E=
|
705cb5cf3eec171baf3a8b91b8cc77d9987a1414
|
Fix ImproperlyConfigured exception
|
precision/accounts/views.py
|
precision/accounts/views.py
|
from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from django.views.generic.base import TemplateResponseMixin, View
from .forms import LoginForm
class SignInView(TemplateResponseMixin, View):
def get(self, request):
template_name = 'accounts/sign_in.html'
form = LoginForm()
context = {'section': 'sign_in', 'form': form}
return self.render_to_response(context)
def post(self, request):
form = LoginForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
user = authenticate(email=cd['username'], password=cd['password'])
if user is not None:
if user.is_active:
login(request, user)
return HttpResponse('Authenticated successfully')
else:
return HttpResponse('Disabled account')
else:
return HttpResponse('Invalid login')
else:
return redirect('accounts:sign_in')
|
Python
| 0.000003
|
@@ -277,41 +277,8 @@
w):%0A
-%0A def get(self, request):%0A
@@ -316,16 +316,45 @@
in.html'
+%0A%0A def get(self, request):
%0A
|
32f33a9b2f16edb6fe080f850e06b2384af06a5a
|
change string type
|
privacyidea/api/resolver.py
|
privacyidea/api/resolver.py
|
# -*- coding: utf-8 -*-
#
# http://www.privacyidea.org
# (c) cornelius kölbel, privacyidea.org
#
# 2016-04-10 Cornelius Kölbel <cornelius@privacyidea.org>
# Make route the outermost decorator
# 2014-12-08 Cornelius Kölbel, <cornelius@privacyidea.org>
# Complete rewrite during flask migration
# Try to provide REST API
#
# privacyIDEA is a fork of LinOTP. Some code is adapted from
# the system-controller from LinOTP, which is
# Copyright (C) 2010 - 2014 LSE Leading Security Experts GmbH
# License: AGPLv3
# contact: http://www.linotp.org
# http://www.lsexperts.de
# linotp@lsexperts.de
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
The code of this module is tested in tests/test_api_system.py
"""
from flask import (Blueprint,
request)
from .lib.utils import (getParam,
optional,
required,
send_result)
from ..lib.log import log_with
from ..lib.resolver import (get_resolver_list,
save_resolver,
delete_resolver, pretestresolver)
from flask import g
import logging
from ..api.lib.prepolicy import prepolicy, check_base_action
from ..lib.policy import ACTION
log = logging.getLogger(__name__)
resolver_blueprint = Blueprint('resolver_blueprint', __name__)
@resolver_blueprint.route('/', methods=['GET'])
@resolver_blueprint.route('/<resolver>', methods=['GET'])
@log_with(log)
def get_resolvers(resolver=None):
"""
returns a json list of the specified resolvers.
The passwords of resolvers (e.g. Bind PW of the LDAP resolver or password of the
SQL resolver) will be returned as "__CENSORED__".
You can run a POST request to update the data and privacyIDEA will ignore the "__CENSORED__"
password or you can even run a testresolver.
:param resolver: the name of the resolver
:type resolver: basestring
:param type: Only return resolvers of type (like passwdresolver..)
:type type: basestring
:param editable: Set to "1" if only editable resolvers should be returned.
:type editable: basestring
:return: a json result with the configuration of resolvers
"""
typ = getParam(request.all_data, "type", optional)
editable = getParam(request.all_data, "editable", optional)
if editable == "1":
editable = True
elif editable == "0":
editable = False
res = get_resolver_list(filter_resolver_name=resolver,
filter_resolver_type=typ,
editable=editable,
censor=True)
g.audit_object.log({"success": True,
"info": resolver})
return send_result(res)
@resolver_blueprint.route('/<resolver>', methods=['POST'])
@log_with(log)
@prepolicy(check_base_action, request, ACTION.RESOLVERWRITE)
def set_resolver(resolver=None):
"""
This creates a new resolver or updates an existing one.
A resolver is uniquely identified by its name.
If you update a resolver, you do not need to provide all parameters.
Parameters you do not provide are left untouched.
When updating a resolver you must not change the type!
You do not need to specify the type, but if you specify a wrong type,
it will produce an error.
:param resolver: the name of the resolver.
:type resolver: basestring
:param type: the type of the resolver. Valid types are passwdresolver,
ldapresolver, sqlresolver, scimresolver
:type type: string
:return: a json result with the value being the database id (>0)
Additional parameters depend on the resolver type.
LDAP:
* LDAPURI
* LDAPBASE
* BINDDN
* BINDPW
* TIMEOUT
* SIZELIMIT
* LOGINNAMEATTRIBUTE
* LDAPSEARCHFILTER
* LDAPFILTER
* USERINFO
* NOREFERRALS - True|False
* EDITABLE - True|False
SQL:
* Database
* Driver
* Server
* Port
* User
* Password
* Table
* Map
Passwd
* Filename
"""
param = request.all_data
if resolver:
# The resolver parameter was passed as a part of the URL
param.update({"resolver": resolver})
res = save_resolver(param)
g.audit_object.log({"success": res,
"info": resolver})
return send_result(res)
@resolver_blueprint.route('/<resolver>', methods=['DELETE'])
@log_with(log)
@prepolicy(check_base_action, request, ACTION.RESOLVERDELETE)
def delete_resolver_api(resolver=None):
"""
This function deletes an existing resolver.
A resolver can not be deleted, if it is contained in a realm
:param resolver: the name of the resolver to delete.
:return: json with success or fail
"""
res = delete_resolver(resolver)
g.audit_object.log({"success": res,
"info": resolver})
return send_result(res)
@resolver_blueprint.route('/test', methods=["POST"])
@log_with(log)
@prepolicy(check_base_action, request, ACTION.RESOLVERWRITE)
def test_resolver():
"""
Send the complete parameters of a resolver to the privacyIDEA server
to test, if these settings will result in a successful connection.
If you are testing existing resolvers, you can send the "__CENSORED__"
password. privacyIDEA will use the already stored password from the
database.
:return: a json result with True, if the given values can create a
working resolver and a description.
"""
param = request.all_data
rtype = getParam(param, "type", required)
success, desc = pretestresolver(rtype, param)
return send_result(success, details={"description": desc})
|
Python
| 0.000153
|
@@ -2530,34 +2530,27 @@
e resolver:
-basestring
+str
%0A :param
@@ -2625,26 +2625,19 @@
e type:
-basestring
+str
%0A :pa
@@ -2728,26 +2728,19 @@
itable:
-basestring
+str
%0A :re
@@ -3987,18 +3987,11 @@
er:
-basestring
+str
%0A
@@ -4125,19 +4125,16 @@
ype: str
-ing
%0A :re
|
2e4eb5a54a02698b69b02c550980b86b25cd13f9
|
Add `__repr__` to the `MALAffinity` class
|
malaffinity/malaffinity.py
|
malaffinity/malaffinity.py
|
"""
malaffinity class
"""
import copy
import bs4
import requests
import statistics
from . import calcs
# Lines are getting too long, best to import the exceptions
# manually, instead of just importing `exceptions`
from .exceptions import (
InvalidUsernameError, NoAffinityError,
MALRateLimitExceededError
)
class MALAffinity:
"""
The MALAffinity class
Stores a `base user`s' scores, to be compared with
other users' scores
"""
_URL = "https://myanimelist.net/malappinfo.php"
def __init__(self, base_user=None, round=False):
"""
Initialise an instance of `MALAffinity`
If `base_user` is `None`, the `init` function MUST be
called sometime after initialisation, with a `base_user`
provided, before affinity calculations take place
:param base_user: Base MAL username
:type base_user: str or None
:param round: Decimal places to round affinity values to
:type round: int or False
"""
# Will get overridden in `init` function.
self._base_user = None
self._base_scores = {}
self._round = round
if base_user:
self.init(base_user)
def _retrieve_scores(self, username):
"""
Retrieve a users' animelist scores
Only anime scored > 0 will be returned, and all
PTW entries are ignored, even if they are scored
:param str username: MAL username
:return: `id`, `scores` pairs
:rtype: list
"""
params = {
"u": username,
"status": "all",
"type": "anime"
}
resp = requests.request("GET", self._URL, params=params)
# Check if MAL's hitting you with a 429 and raise an exception if so.
if resp.status_code == requests.codes.too_many_requests:
raise MALRateLimitExceededError("MAL rate limit exceeded")
resp = bs4.BeautifulSoup(resp.content, "xml")
all_anime = resp.find_all("anime")
# Check if there's actually any anime being returned to us.
# If not, user probably doesn't exist.
# MAL should do a better job of highlighting this, but eh.
if not len(all_anime):
raise InvalidUsernameError("User `{}` does not exist"
.format(username))
# TODO: Generator?
scores = []
for anime in all_anime:
# See if anime is on their PTW and move on if so.
# This makes sure rated anime that the user hasn't
# seen does not get added to `scores`.
# Why do people even do this?
# PTW == status "6"
if anime.my_status.string == "6":
continue
id = anime.series_animedb_id.string
id = int(id)
score = anime.my_score.string
# Might need changing if MAL allows float scores.
score = int(score)
if score > 0:
scores.append({"id": id, "score": score})
# Check if there's actually anything in scores.
# If not, user probably doesn't have any rated anime.
if not len(scores):
raise NoAffinityError("User `{}` hasn't rated any anime"
.format(username))
return scores
# TODO: Rename this?
def init(self, base_user):
"""
Get the base users' list and create the `base scores`
dict that other people's scores will be compared to
Base scores will be saved to self._base_scores
You may want to check that this is populated after
running this function, before running anything else
:param str base_user: Base users' username
"""
self._base_user = base_user
base_list = self._retrieve_scores(base_user)
for anime in base_list:
id = anime["id"]
score = anime["score"]
self._base_scores[id] = [score]
return self
def calculate_affinity(self, username):
"""
Get the affinity between the base user and another user
Will either return the unrounded Pearson's correlation
coefficient * 100, or rounded value, depending on the
value of the `self._round` variable
:param str username: The username to compare the base users' scores to
:return: (float affinity, int shared)
:rtype: tuple
"""
# Check if there's actually a base user to compare scores with.
# `init` will assign the username to the `self._base_user` var and
# populate the `self._base_scores` dict when it retrieves the
# base user's scores, so we can test if those vars have been set.
if not self._base_user or not self._base_scores:
# Too lazy to make a custom exception for this.
raise Exception("No base user has been specified. Call the `init` "
"function to retrieve a base users' scores")
# Create a local, deep-copy of the scores
scores = copy.deepcopy(self._base_scores)
their_list = self._retrieve_scores(username)
for anime in their_list:
id = anime["id"]
score = anime["score"]
if id in scores:
scores[id].append(score)
# Force to list so no errors when deleting keys.
for key in list(scores.keys()):
if not len(scores[key]) == 2:
del scores[key]
# Handle cases where the shared scores are <= 10 so
# affinity can not be accurately calculated.
if len(scores) <= 10:
raise NoAffinityError("Shared rated anime count between "
"`{}` and `{}` is less than eleven"
.format(self._base_user, username))
# Sort multiple rows of scores into two arrays for calculations.
# E.G. [1,2], [3,4], [5,6] to [1,3,5], [2,4,6]
values = scores.values()
scores1, scores2 = list(zip(*values))
# Check if standard deviation of scores1 or scores2 is zero. If so,
# affinity can't be calculated as dividing by zero is impossible
if not statistics.stdev(scores1) or not statistics.stdev(scores2):
raise NoAffinityError("Standard deviation of `{}` "
"or `{}`'s scores is zero"
.format(self._base_user, username))
pearson = calcs.pearson(scores1, scores2)
pearson *= 100
if self._round is not False:
pearson = round(pearson, self._round)
return pearson, len(scores)
|
Python
| 0.000017
|
@@ -1197,24 +1197,279 @@
base_user)%0A%0A
+ def __repr__(self):%0A # TODO: Surely there has to be a better way of doing this...%0A # TODO: Make this look less ugly%0A return 'MALAffinity(base_user=%7B%7D, round=%7B%7D)' %5C%0A .format(repr(self._base_user), repr(self._round))%0A%0A
def _ret
|
5b7bb415b03e9bb3b432fd8e6dc40a9b5ecd4539
|
as_user or None
|
corehq/apps/formplayer_api/management/commands/prime_formplayer_restores.py
|
corehq/apps/formplayer_api/management/commands/prime_formplayer_restores.py
|
import csv
import sys
from concurrent import futures
from django.core.management.base import BaseCommand
from corehq.apps.formplayer_api.sync_db import sync_db
from corehq.apps.users.models import CouchUser
from corehq.apps.users.util import format_username
class Command(BaseCommand):
help = "Call the Formplayer API for each user in passed in CSV to force a sync."
def add_arguments(self, parser):
parser.add_argument('path', help='Path to CSV file. Columns "domain, username, as_user"')
parser.add_argument('-t', '--threads', type=int, default=10, help='Number of threads to use.')
def handle(self, path, **options):
pool_size = options['threads']
with open(path, 'r') as file:
reader = csv.reader(file)
results = []
with futures.ThreadPoolExecutor(max_workers=pool_size) as executor:
row = next(reader)
if row[0] != "domain": # skip header
executor.submit(process_row, row)
for row in reader:
executor.submit(process_row, row)
futures.wait(results)
def process_row(row):
domain, username, as_user = row
user = CouchUser.get_by_username(username)
if not user:
sys.stderr.write(f"Row failure: unknown username: {','.join(row)}\n")
return
restore_as_user = None
if as_user:
as_username = format_username(as_user, domain) if '@' not in as_user else as_user
restore_as_user = CouchUser.get_by_username(as_username)
if not restore_as_user:
sys.stderr.write(f"Row failure: unknown as_user: {','.join(row)}\n")
if domain != restore_as_user.domain:
sys.stderr.write(f"Row failure: domain mismatch with as_user: {','.join(row)}\n")
try:
sync_db(domain, user.username, as_user)
except Exception as e:
sys.stderr.write(f"Row failure: {e}: {','.join(row)}\n")
|
Python
| 0.999902
|
@@ -1362,35 +1362,8 @@
rn%0A%0A
- restore_as_user = None%0A
@@ -1838,16 +1838,24 @@
as_user
+ or None
)%0A ex
|
15f1afc9292f55850f7bade4b468fafc971c752a
|
Exclude past contest rounds. HACK
|
project/convention/views.py
|
project/convention/views.py
|
from __future__ import division
from haystack.views import basic_search
from django.shortcuts import (
get_list_or_404,
get_object_or_404,
render,
redirect,
)
from django.contrib import messages
from django.contrib.auth.decorators import login_required
# from django.core.exceptions import (
# DoesNotExist,
# )
from .models import (
Contestant,
Performance,
Note,
)
from .forms import (
ContestantSearchForm,
NoteForm,
ProfileForm,
)
from noncense.forms import (
NameForm,
)
def contestant(request, slug):
"""
Returns details about a particular contestant.
"""
contestant = get_object_or_404(Contestant, slug=slug)
performances = contestant.performances.all()
try:
prev = contestant.next_performance.get_previous_by_stagetime().contestant
except Performance.DoesNotExist:
prev = None
try:
next = contestant.next_performance.get_next_by_stagetime().contestant
except Performance.DoesNotExist:
next = None
if request.user.is_authenticated():
note, created = Note.objects.get_or_create(
contestant=contestant,
profile=request.user.profile,
)
else:
note = None
if request.method == 'POST':
form = NoteForm(request.POST, instance=note)
if form.is_valid():
form.save()
messages.success(
request,
"""Note saved.""",
)
else:
form = NoteForm(instance=note)
return render(
request, 'contestant.html', {
'contestant': contestant,
'performances': performances,
'prev': prev,
'next': next,
'form': form,
},
)
def performances(request):
"""
Returns performances ordered by the program schedule.
"""
performances = get_list_or_404(
Performance.objects.select_related('contest', 'contestant').filter(place=None).order_by(
'contest',
'contest_round',
'session',
'appearance',
)
)
return render(request, 'performances.html', {'performances': performances})
def contests(request):
"""
Returns performances ordered by contest score.
"""
performances = Performance.objects.exclude(place=None).order_by(
'contest__contest_type',
'place',
'-contest_round',
)
return render(
request,
'contests.html',
{'performances': performances}
)
def search(request):
"""
Extends the default Haystack search view.
"""
response = basic_search(
request,
template='search.html',
form_class=ContestantSearchForm,
results_per_page=100,
)
return response
@login_required
def profile(request):
if request.method == 'POST':
p_form = ProfileForm(request.POST, prefix='p', instance=request.user.profile)
u_form = NameForm(request.POST, prefix='u', instance=request.user)
if p_form.is_valid() and u_form.is_valid():
p_form.save()
u_form.save()
messages.success(request, 'Profile details updated.')
return redirect('profile')
else:
p_form = ProfileForm(prefix='p', instance=request.user.profile)
u_form = NameForm(prefix='u', instance=request.user)
return render(
request,
'profile.html', {
'p_form': p_form,
'u_form': u_form,
}
)
|
Python
| 0.999955
|
@@ -1937,16 +1937,29 @@
related(
+%0A
'contest
@@ -1977,27 +1977,125 @@
ant'
-).filter(place=None
+%0A ).filter(%0A place=None%0A ).exclude(%0A contest_round=Performance.QUARTERS,%0A
).or
|
c0684358b217318327d71470ee86074b3556148a
|
Use double quotes consistently
|
bc125csv/__main__.py
|
bc125csv/__main__.py
|
from bc125csv.handler import main
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -47,17 +47,17 @@
==
-'
+%22
__main__
':%0A%09
@@ -56,9 +56,9 @@
in__
-'
+%22
:%0A%09m
@@ -62,8 +62,9 @@
%0A%09main()
+%0A
|
acd620c930a92511c2e2099a4fc82d41825fdf93
|
improve _VALID_URL regex(#16484)
|
youtube_dl/extractor/teamcoco.py
|
youtube_dl/extractor/teamcoco.py
|
# coding: utf-8
from __future__ import unicode_literals
import json
from .common import InfoExtractor
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
mimetype2ext,
parse_duration,
parse_iso8601,
qualities,
)
class TeamcocoIE(InfoExtractor):
_VALID_URL = r'https?://teamcoco\.com/video/(?P<id>[^/?#]+)'
_TESTS = [
{
'url': 'http://teamcoco.com/video/mary-kay-remote',
'md5': '55d532f81992f5c92046ad02fec34d7d',
'info_dict': {
'id': '80187',
'ext': 'mp4',
'title': 'Conan Becomes A Mary Kay Beauty Consultant',
'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.',
'duration': 495.0,
'upload_date': '20140402',
'timestamp': 1396407600,
}
}, {
'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush',
'md5': 'cde9ba0fa3506f5f017ce11ead928f9a',
'info_dict': {
'id': '19705',
'ext': 'mp4',
'description': 'Louis C.K. got starstruck by George W. Bush, so what? Part one.',
'title': 'Louis C.K. Interview Pt. 1 11/3/11',
'duration': 288,
'upload_date': '20111104',
'timestamp': 1320405840,
}
}, {
'url': 'http://teamcoco.com/video/timothy-olyphant-drinking-whiskey',
'info_dict': {
'id': '88748',
'ext': 'mp4',
'title': 'Timothy Olyphant Raises A Toast To “Justified”',
'description': 'md5:15501f23f020e793aeca761205e42c24',
'upload_date': '20150415',
'timestamp': 1429088400,
},
'params': {
'skip_download': True, # m3u8 downloads
}
}, {
'url': 'http://teamcoco.com/video/full-episode-mon-6-1-joel-mchale-jake-tapper-and-musical-guest-courtney-barnett?playlist=x;eyJ0eXBlIjoidGFnIiwiaWQiOjl9',
'info_dict': {
'id': '89341',
'ext': 'mp4',
'title': 'Full Episode - Mon. 6/1 - Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett',
'description': 'Guests: Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett',
},
'params': {
'skip_download': True, # m3u8 downloads
},
'skip': 'This video is no longer available.',
}
]
def _graphql_call(self, query_template, object_type, object_id):
find_object = 'find' + object_type
return self._download_json(
'http://teamcoco.com/graphql/', object_id, data=json.dumps({
'query': query_template % (find_object, object_id)
}))['data'][find_object]
def _real_extract(self, url):
display_id = self._match_id(url)
response = self._graphql_call('''{
%s(slug: "video/%s") {
... on RecordSlug {
record {
id
title
teaser
publishOn
thumb {
preview
}
tags {
name
}
duration
}
}
... on NotFoundSlug {
status
}
}
}''', 'Slug', display_id)
if response.get('status'):
raise ExtractorError('This video is no longer available.', expected=True)
record = response['record']
video_id = record['id']
srcs = self._graphql_call('''{
%s(id: "%s") {
src
}
}''', 'RecordVideoSource', video_id)['src']
formats = []
get_quality = qualities(['low', 'sd', 'hd', 'uhd'])
for format_id, src in srcs.items():
if not isinstance(src, dict):
continue
src_url = src.get('src')
if not src_url:
continue
ext = determine_ext(src_url, mimetype2ext(src.get('type')))
if format_id == 'hls' or ext == 'm3u8':
# compat_urllib_parse.urljoin does not work here
if src_url.startswith('/'):
src_url = 'http://ht.cdn.turner.com/tbs/big/teamcoco' + src_url
formats.extend(self._extract_m3u8_formats(
src_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
else:
if src_url.startswith('/mp4:protected/'):
# TODO Correct extraction for these files
continue
tbr = int_or_none(self._search_regex(
r'(\d+)k\.mp4', src_url, 'tbr', default=None))
formats.append({
'url': src_url,
'ext': ext,
'tbr': tbr,
'format_id': format_id,
'quality': get_quality(format_id),
})
self._sort_formats(formats)
return {
'id': video_id,
'display_id': display_id,
'formats': formats,
'title': record['title'],
'thumbnail': record.get('thumb', {}).get('preview'),
'description': record.get('teaser'),
'duration': parse_duration(record.get('duration')),
'timestamp': parse_iso8601(record.get('publishOn')),
}
|
Python
| 0.001603
|
@@ -339,16 +339,25 @@
/(?P%3Cid%3E
+(%5B%5E/%5D+/)*
%5B%5E/?#%5D+)
@@ -2655,24 +2655,160 @@
vailable.',%0A
+ %7D, %7B%0A 'url': 'http://teamcoco.com/video/the-conan-audiencey-awards-for-04/25/18',%0A 'only_matching': True,%0A
%7D%0A
|
32537dafa3c13761b910ab8449ff80d60df6f02b
|
Bump version to 2.3.4-dev
|
indico/__init__.py
|
indico/__init__.py
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import warnings
from indico.util.mimetypes import register_custom_mimetypes
__version__ = '2.3.3'
register_custom_mimetypes()
# TODO: remove in 3.0
warnings.filterwarnings('ignore', message='Python 2 is no longer supported by the Python core team.',
module='authlib')
warnings.filterwarnings('ignore', message='Python 2 is no longer supported by the Python core team.',
module='cryptography')
|
Python
| 0
|
@@ -305,17 +305,21 @@
= '2.3.
-3
+4-dev
'%0A%0Aregis
|
41acddb1e2edcac54cd3ae5287a7c2977b02f305
|
Change `ugettext` to `ugettext_lazy`
|
cmsplugin_bootstrap_carousel/models_default.py
|
cmsplugin_bootstrap_carousel/models_default.py
|
# coding: utf-8
import os
from django.db import models
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils.translation import ugettext as _
from cms.models.pluginmodel import CMSPlugin
from PIL import Image
from cStringIO import StringIO
from . import config
class Carousel(CMSPlugin):
domid = models.CharField(max_length=50, verbose_name=_('Name'))
interval = models.IntegerField(default=5000)
def copy_relations(self, oldinstance):
for item in oldinstance.carouselitem_set.all():
item.pk = None
item.carousel = self
item.save()
def __unicode__(self):
return self.domid
class CarouselItem(models.Model):
carousel = models.ForeignKey(Carousel, verbose_name=_("Carousel"))
caption_title = models.CharField(max_length=100, blank=True, null=True, verbose_name=_("Caption Title"))
button_title = models.CharField(max_length=255, blank=True, verbose_name=_("Button Title"))
button_url = models.URLField(blank=True, verbose_name=_("Button URL"))
caption_content = models.TextField(blank=True, null=True, verbose_name=_("Caption Content"))
image = models.ImageField(upload_to=config.CAROUSEL_UPLOADS_FOLDER, null=True, verbose_name=_("Image"))
text_position = models.CharField(max_length=10, choices=config.CAROUSEL_TEXT_POSITIONS,
default=config.CAROUSEL_TEXT_POSITION_LEFT, verbose_name=_("Text Position"))
transition = models.CharField(max_length=30, choices=config.CAROUSEL_TRANSITION_CHOICES,
default=config.CAROUSEL_TRANS_NO_TRANSITION, verbose_name=_("Transition"))
start_position = models.CharField(max_length=20, choices=config.CAROUSEL_MOVEMENT_POSITION_CHOICES,
default=config.CAROUSEL_MOVEMENT_POSITION_LEFT_TOP_LABEL,
verbose_name=_("Start Position"))
end_position = models.CharField(max_length=20, choices=config.CAROUSEL_MOVEMENT_POSITION_CHOICES,
default=config.CAROUSEL_MOVEMENT_POSITION_LEFT_TOP_LABEL,
verbose_name=_("End Position"))
zoom = models.CharField(max_length=3, choices=config.CAROUSEL_ZOOM_CHOICES,
default=config.CAROUSEL_ZOOM_NO, verbose_name=_('Zoom'))
target = models.CharField(max_length=10, choices=config.CAROUSEL_OPEN_TAB_CHOICES,
default=config.CAROUSEL_OPEN_IN_CURRENT_TAB, verbose_name=_('Target'))
def save(self, *args, **kwargs):
if self.image:
img = Image.open(self.image.file)
if img.mode not in ('L', 'RGB'):
img = img.convert('RGB')
size = config.BOOTSTRAP_CAROUSEL_IMGSIZE
extension = config.BOOTSTRAP_CAROUSEL_FILE_EXTENSION
if size != img.size:
img.thumbnail(size, Image.ANTIALIAS)
temp_handle = StringIO()
img.save(temp_handle, extension)
temp_handle.seek(0)
suf = SimpleUploadedFile(os.path.split(self.image.name)[-1],
temp_handle.read(), content_type='image/%s' % extension)
fname = "%s.%s" % (os.path.splitext(self.image.name)[0], extension)
self.image.save(fname, suf, save=False)
super(CarouselItem, self).save()
|
Python
| 0.000052
|
@@ -155,16 +155,21 @@
ugettext
+_lazy
as _%0A%0Af
|
862f81d54624ea198d6351f5ea7c88b66bc02019
|
Make the Nick an argument to Client.py
|
src/Client.py
|
src/Client.py
|
#!python
__author__ = 'JacobAMason'
import sys
from twisted.words.protocols import irc
from twisted.internet import protocol, reactor
import StringIO
class Bot(irc.IRCClient):
def _get_nickname(self):
return self.factory.nickname
nickname = property(_get_nickname)
def signedOn(self):
self.join(self.factory.channel)
print "Signed on as %s." % (self.nickname)
def joined(self, channel):
print "Joined %s." % (channel)
def privmsg(self, user, channel, message):
if not message.startswith(self.nickname):
return
else:
idx = message.find(' ')
message = message[idx+1:]
# create file-like string to capture output
codeOut = StringIO.StringIO()
codeErr = StringIO.StringIO()
# capture output and errors
sys.stdout = codeOut
sys.stderr = codeErr
errorText = ""
try:
exec message
except Exception, err:
errorText = str(err)
# restore stdout and stderr
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
s = codeErr.getvalue()
if s:
self.msg(channel, "error: %s\n" % s)
if errorText:
self.msg(channel, "error: %s\n" % errorText)
s = codeOut.getvalue()
if s:
self.msg(channel, "%s" % s)
codeOut.close()
codeErr.close()
def dataReceived(self, bytes):
print str(bytes).rstrip()
# Make sure to up-call - otherwise all of the IRC logic is disabled!
return irc.IRCClient.dataReceived(self, bytes)
class BotFactory(protocol.ClientFactory):
protocol = Bot
def __init__(self, channel, nickname="Boa"):
self.channel = channel
self.nickname = nickname
def clientConnectionLost(self, connector, reason):
print "Lost connection (%s), reconnecting..." % (reason)
connector.connect()
def clientConnectionFailed(self, connector, reason):
print "Could not connect: %s" % (reason)
if __name__ == "__main__":
channel = sys.argv[1]
reactor.connectTCP("coop.test.adtran.com", 6667, BotFactory('#' + channel))
reactor.run()
|
Python
| 0.001602
|
@@ -2129,16 +2129,43 @@
argv%5B1%5D%0A
+ nickname = sys.argv%5B2%5D%0A
reac
@@ -2208,16 +2208,39 @@
%22, 6667,
+%0A
BotFact
@@ -2256,16 +2256,26 @@
channel
+, nickname
))%0A r
|
ecf71bd004d99b679936e07453f5a938e19f71dc
|
Add aiohttp as a execution requirement
|
megalist_dataflow/setup.py
|
megalist_dataflow/setup.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
setuptools.setup(
name='megalist_dataflow',
version='0.1',
author='Alvaro Stivi',
author_email='astivi@google.com',
url='https://cse.googlesource.com/solutions/megalist',
install_requires=['googleads==20.0.0', 'google-api-python-client==1.7.9',
'bloom-filter==1.3', 'google-cloud-core==1.0.2',
'google-cloud-datastore==1.9.0'],
packages=setuptools.find_packages(),
)
|
Python
| 0
|
@@ -11,10 +11,10 @@
t 20
-19
+20
Goo
@@ -980,16 +980,32 @@
e==1.9.0
+, aiohttp==3.6.2
'%5D,%0A
|
8ffb2beea77897e3fa40691f35f2e089dbc5df9a
|
Add more documentation
|
coalib/bearlib/languages/LanguageDefinition.py
|
coalib/bearlib/languages/LanguageDefinition.py
|
import os
from coalib.bearlib.abstractions.SectionCreatable import SectionCreatable
from coalib.misc.StringConstants import StringConstants
from coalib.parsing.ConfParser import ConfParser
class LanguageDefinition(SectionCreatable):
def __init__(self, language_family: str, language: str):
"""
Creates a new LanguageDefinition object from file.
:param language_family: The language family. E.g. C for
C++ and C and C# and so on.
:param language: The actual language (e.g. C++).
:raises ConfParser.FileNotFoundError: If no definition is available
for the given family.
:raises KeyError: If no definition is available
for the given language.
"""
SectionCreatable.__init__(self)
self.language = language.lower()
filename = os.path.join(StringConstants.language_definitions,
language_family.lower() + ".coalang")
self.lang_dict = ConfParser().parse(filename)[language.lower()]
def __getitem__(self, item):
return self.lang_dict[item]
|
Python
| 0
|
@@ -362,16 +362,263 @@
file.%0A%0A
+ A Language Definition holds constants which may help parsing the%0A language. If you want to write a bear you'll probably want to use those%0A definitions to keep your bear independent of the semantics of each%0A language.%0A%0A
|
fd3e9c09f14b554883e47102b4750faef5c10ecc
|
Print body in case of HTTPError
|
unit-healthcheck.py
|
unit-healthcheck.py
|
#!/usr/bin/env python
import logging
import argparse
import os
import json
import sys
try:
from urllib2 import Request, urlopen
except ImportError:
from urllib.request import Request, urlopen
TSURU_TARGET = os.environ['TSURU_TARGET']
TSURU_TOKEN = os.environ['TSURU_TOKEN']
def main():
logging.basicConfig(level=logging.INFO, format='%(levelname)s %(message)s')
parser = argparse.ArgumentParser(
description='Get healthcheck for all units'
)
parser.add_argument('-a', metavar='app', type=str,
help='Name of app')
parser.add_argument('-p', metavar='path', type=str,
default='/healthcheck',
help='Path of healthcheck')
args = parser.parse_args()
get_units(args.a, args.p)
def get_units(app, path):
url = '%s/apps/%s' % (TSURU_TARGET, app)
req = Request(url, None, {'Authorization': TSURU_TOKEN})
try:
resp = urlopen(req, timeout=3)
except Exception as err:
logging.exception(err)
return
if resp.code != 200:
logging.error('Failed to get unit list: %d', resp.code)
return
ok = True
data = json.loads(resp.read().decode('utf-8'))
for unit in data['units']:
if unit['ProcessName'] != 'web':
logging.warn('[%s] skip task: %s', unit['Ip'], unit['ProcessName'])
continue
if unit['Status'] != 'started':
logging.warn('[%s] skip status: %s', unit['Ip'], unit['Status'])
continue
if not healthcheck_unit(unit, path):
ok = False
if not ok:
sys.exit(1)
def healthcheck_unit(unit, path):
addr = unit['Address']
url = "%s://%s%s" % (addr['Scheme'], addr['Host'], path)
try:
resp = urlopen(url, timeout=3)
except Exception as err:
logging.error('[%s] Failed to healthcheck unit: %s', url, err)
return False
if resp.code == 200:
logging.info('[%s] Healthcheck OK', url)
else:
logging.error(
'[%s] Failed to healthcheck unit, status code: %s, body: %s',
url, str(resp.code), resp.read())
return False
return True
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -126,16 +126,27 @@
urlopen
+, HTTPError
%0Aexcept
@@ -205,16 +205,27 @@
urlopen
+, HTTPError
%0A%0A%0ATSURU
@@ -1818,34 +1818,302 @@
en(url, timeout=
-3)
+5)%0A except HTTPError as err:%0A logging.error('%5B%25s%5D Failed to healthcheck unit: %25s', url, err)%0A%0A if hasattr(err, 'file'):%0A logging.error(err.file.read().decode('utf-8'))%0A else:%0A logging.error(err.read())%0A%0A return False%0A
%0A except Exce
|
74272b9916c29bb9e97d4761801ee3730b053b87
|
comment fix
|
athenet/sparsifying/utils/numlike.py
|
athenet/sparsifying/utils/numlike.py
|
"""Template class with arithmetic operations that can be passed through neural
network.
All classes that are being used for derest should inherit from this class."""
class Numlike(object):
"""Template class with arithmetic operations that can be passed through
neural network.
All classes that are being used for derest should inherit from this
class."""
def __init__(self):
"""Create numlike."""
pass
def __getitem__(self, at):
"""Returns specified slice of numlike
at: Coordinates / slice to be taken."""
raise NotImplementedError
def __setitem__(self, at, other):
"""Just like Theano set_subtensor function, but as a operator.
at: Coordinates / slice to be set.
other: Data to be put at 'at'"""
raise NotImplementedError
def shape(self):
"""Returns shape of numlike."""
raise NotImplementedError
def __add__(self, other):
"""Returns sum of two numlikes.
other: numlike."""
raise NotImplementedError
def __radd__(self, other):
raise NotImplementedError
def __sub__(self, other):
"""Returns difference between two numlikes.
other: numlike to be subtracted."""
raise NotImplementedError
def __rsub__(self, other):
"""Returns diffeerence between number and numlike.
other: A number that self will be subtracted from."""
raise NotImplementedError
def __mul__(self, other):
"""Returns product of two numlikes.
other: numlike to be multiplied."""
raise NotImplementedError
def __rmul__(self, other):
raise NotImplementedError
def __div__(self, other):
"""Returns quotient of self and other."""
raise NotImplementedError
def reciprocal(self):
"""Returns reciprocal of the numlike."""
raise NotImplementedError
def neg(self):
"""Returns (-1) * numlike"""
raise NotImplementedError
def exp(self):
"""Returns numlike representing the exponential of the numlike."""
raise NotImplementedError
def square(self):
"""Returns square of the numlike."""
raise NotImplementedError
def power(self, exponent):
"""For numlike N, returns N^exponent.
exponent: Number to be passed as exponent to N^exponent."""
raise NotImplementedError
def dot(self, other):
"""Dot product of numlike vector and a number array (other)"""
raise NotImplementedError
|
Python
| 0
|
@@ -513,16 +513,17 @@
numlike
+.
%0A%0A
@@ -516,32 +516,33 @@
mlike.%0A%0A
+:
at: Coordinates
@@ -561,16 +561,25 @@
e taken.
+%0A
%22%22%22%0A
@@ -727,16 +727,17 @@
+:
at: Coor
@@ -767,24 +767,25 @@
et.%0A
+:
other: Data
@@ -801,16 +801,26 @@
at 'at'
+.%0A
%22%22%22%0A
@@ -1017,32 +1017,33 @@
likes.%0A%0A
+:
other: numlike.%22
@@ -1033,32 +1033,41 @@
:other: numlike.
+%0A
%22%22%22%0A rais
@@ -1238,32 +1238,33 @@
likes.%0A%0A
+:
other: numlike t
@@ -1279,16 +1279,25 @@
tracted.
+%0A
%22%22%22%0A
@@ -1422,24 +1422,25 @@
e.%0A%0A
+:
other: A num
@@ -1477,16 +1477,25 @@
ed from.
+%0A
%22%22%22%0A
@@ -1608,16 +1608,17 @@
+:
other: n
@@ -1641,16 +1641,25 @@
tiplied.
+%0A
%22%22%22%0A
@@ -2032,16 +2032,17 @@
numlike
+.
%22%22%22%0A
@@ -2389,16 +2389,17 @@
+:
exponent
@@ -2578,16 +2578,17 @@
(other)
+.
%22%22%22%0A
|
9ba255886ca5315be1b95ccac28d496e3941f155
|
Bump alpha version
|
uplink/__about__.py
|
uplink/__about__.py
|
"""
This module is the single source of truth for any package metadata
that is used both in distribution (i.e., setup.py) and within the
codebase.
"""
__version__ = "0.8.0a0"
|
Python
| 0
|
@@ -165,11 +165,11 @@
%220.8.0a
-0
+1
%22%0A
|
ec665be1811b458f849cbed09ef3d3c61f9e4533
|
Change order of environment setup
|
metatlas/tools/notebook.py
|
metatlas/tools/notebook.py
|
"""Jupyter notebook helper functions"""
import logging
import os
import shutil
import sys
from pathlib import Path
import pandas as pd
from IPython.core.display import display, HTML
from metatlas.tools.logging import activate_logging
logger = logging.getLogger(__name__)
def configure_environment(log_level):
"""
Sets environment variables and configures logging
inputs:
log_level: one of 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'
"""
activate_logging(console_level=log_level)
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
def validate_kernel():
"""
Raise error if problem with kernel
When on NERSC, this will install the correct kernel if needed
"""
allowed_exe = [
"/global/common/software/m2650/metatlas-targeted-20210521/bin/python",
]
error_msg = "Invalid kernel setting in Jupyter Notebook."
on_nersc = "METATLAS_LOCAL" not in os.environ
if on_nersc and sys.executable not in allowed_exe:
install_kernel()
logger.critical('Please check that the kernel is set to "Metatlas Targeted".')
raise ValueError(error_msg)
try:
# pylint: disable=import-outside-toplevel,unused-import
import dataset # noqa: F401
except ModuleNotFoundError as module_error:
logger.critical(
'Could not find dataset module. Please check that the kernel is set to "Metatlas Targeted".'
)
raise ModuleNotFoundError from module_error
def install_kernel():
"""
Copies kernel.json from repo to active location under home directory.
Only for use on NERC!
"""
logger.info('Installing kernel.json for "Metatlas Targeted".')
repo_path = Path(__file__).resolve().parent.parent.parent
source = repo_path / "notebooks" / "kernels" / "metatlas-targeted.kernel.json"
dest_dir = Path.home() / ".local" / "share" / "jupyter" / "kernels" / "metatlas-targeted"
os.makedirs(dest_dir, exist_ok=True)
shutil.copyfile(source, dest_dir / "kernel.json")
logger.info('Reload the page and change kernel to "Metatlas Targeted".')
def configure_pandas_display(max_rows=5000, max_columns=500, max_colwidth=100):
"""Set pandas display options"""
pd.set_option("display.max_rows", max_rows)
pd.set_option("display.max_columns", max_columns)
pd.set_option("display.max_colwidth", max_colwidth)
def configure_notebook_display():
"""Configure output from Jupyter"""
# set notebook to have minimal side margins
display(HTML("<style>.container { width:100% !important; }</style>"))
def setup(log_level):
"""High level function to prepare the metatlas notebook"""
validate_kernel()
configure_environment(log_level)
configure_notebook_display()
configure_pandas_display()
|
Python
| 0.000001
|
@@ -2659,30 +2659,8 @@
%22%22%22%0A
- validate_kernel()%0A
@@ -2688,24 +2688,46 @@
(log_level)%0A
+ validate_kernel()%0A
configur
|
646416efa7378b645af56031c06e7544cb72627f
|
Delete comment
|
user_recommender.py
|
user_recommender.py
|
from tumblr_manager import TumblrManager, TumblrScraper
class UserRecommender(object):
user_counter = {}
def __init__(self, consumer_key=None, consumer_secret=None, oauth_token=None, oauth_token_secret=None):
self.tm = TumblrManager(consumer_key, consumer_secret, oauth_token, oauth_token_secret)
self.ts = TumblrScraper(consumer_key, consumer_secret, oauth_token, oauth_token_secret)
def recommend(self, n=20):
"""
:param int n: number of users
:return: a list including n users
"""
self.set_counter()
cnt = sorted(self.user_counter.items(), key=lambda x: x[1], reverse=True)
users = [t[0] for t in cnt]
return users[:n]
def set_counter(self):
url_list = self.tm.fetch_urls()
all_users = self.ts.fetch_users_from_url(url_list)
# non_followed_users = list(filter(lambda x: not self.tm.is_following(x), all_users))
for user in all_users:
self.add_user(user)
def add_user(self, user):
if user not in self.user_counter:
self.user_counter[user] = 1
else:
self.user_counter[user] += 1
|
Python
| 0
|
@@ -842,102 +842,8 @@
st)%0A
- # non_followed_users = list(filter(lambda x: not self.tm.is_following(x), all_users))%0A
|
f261ae4489fe72768973c3eb0cf0f82cd6f0a8a5
|
Refactor IsObjectOwner component
|
backend/shmitter/base/permissions.py
|
backend/shmitter/base/permissions.py
|
import abc
import functools
import operator
from rest_framework.compat import is_authenticated
##############################
# Base
##############################
class BasePermission(metaclass=abc.ABCMeta):
"""
A base class from which all permission classes should inherit.
"""
@abc.abstractmethod
def has_permission(self, request, view):
pass
@abc.abstractmethod
def has_object_permission(self, request, view, obj):
pass
class BaseComponent(BasePermission, metaclass=abc.ABCMeta):
"""
A base class from which all component classes should inherit.
"""
def __invert__(self):
return Not(self)
def __or__(self, component):
return Or(self, component)
def __and__(self, component):
return And(self, component)
class EvaluatePermissionsMixin:
def has_permission(self, request, view):
return self.evaluate_permissions('has_permission', request, view)
def has_object_permission(self, request, view, obj):
return self.evaluate_permissions('has_object_permission', request, view, obj)
##############################
# Operators
##############################
class Not(EvaluatePermissionsMixin, BaseComponent):
def __init__(self, component):
self.component = component
def evaluate_permissions(self, permission_name, *args, **kwargs):
return not getattr(self.component, permission_name)(*args, **kwargs)
class Or(EvaluatePermissionsMixin, BaseComponent):
def __init__(self, *components):
self.components = components
def evaluate_permissions(self, permission_name, *args, **kwargs):
return any(getattr(component, permission_name)(*args, **kwargs)
for component in self.components)
class And(EvaluatePermissionsMixin, BaseComponent):
def __init__(self, *components):
self.components = components
def evaluate_permissions(self, permission_name, *args, **kwargs):
return all(getattr(component, permission_name)(*args, **kwargs)
for component in self.components)
##############################
# Components
##############################
class ActionPermissionComponent(BaseComponent):
def has_permission(self, request, view):
"""
Return `True` if permission is granted, `False` otherwise.
"""
return True
def has_object_permission(self, request, view, obj):
"""
Return `True` if permission is granted, `False` otherwise.
"""
return True
class AllowAny(ActionPermissionComponent):
"""
Allow any access.
"""
def has_permission(self, request, view):
return True
def has_object_permission(self, request, view, obj):
return True
class DenyAll(ActionPermissionComponent):
"""
Deny any access.
"""
def has_permission(self, request, view):
return False
def has_object_permission(self, request, view, obj):
return False
class IsAuthenticated(ActionPermissionComponent):
"""
Allows access only to authenticated users.
"""
def has_permission(self, request, view):
return request.user and is_authenticated(request.user)
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view)
class IsAdminUser(ActionPermissionComponent):
"""
Allows access only to admin users.
"""
def has_permission(self, request, view):
return request.user and is_authenticated(request.user) and request.user.is_staff
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view)
class IsSuperUser(ActionPermissionComponent):
"""
Allows access only to superusers.
"""
def has_permission(self, request, view):
return request.user and is_authenticated(request.user) and request.user.is_superuser
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view)
class IsObjectOwner(ActionPermissionComponent):
"""
Allow access only to users that own `obj`.
"""
def has_object_permission(self, request, view, obj):
if obj.owner is None:
return False
return obj.owner == request.user
##############################
# Permissions
##############################
class ActionPermission(EvaluatePermissionsMixin, BasePermission):
enough_perms = None
global_perms = None
create_perms = AllowAny()
destroy_perms = AllowAny()
list_perms = AllowAny()
retrieve_perms = AllowAny()
update_perms = AllowAny()
partial_update_perms = AllowAny()
def _validate_permissions(self, perms):
if isinstance(perms, BaseComponent):
return perms
elif isinstance(perms, (tuple, list)):
# If we have a list of components, then we need to convert them
# into one single component using `And` operator.
return functools.reduce(operator.and_, perms)
elif issubclass(perms, BaseComponent):
return perms()
else:
raise RuntimeError('Invalid permission definition')
def _get_action_permissions(self, action):
action_perms = getattr(self, '{}_perms'.format(action))
action_perms = self._validate_permissions(action_perms)
return action_perms
def _get_enough_permissions(self):
enough_perms = self.enough_perms
if enough_perms:
enough_perms = self._validate_permissions(enough_perms)
return enough_perms
def _get_global_permissions(self):
global_perms = self.global_perms
if global_perms:
global_perms = self._validate_permissions(global_perms)
return global_perms
def _get_required_permissions(self, action):
perms = self._get_action_permissions(action)
global_perms = self._get_global_permissions()
if global_perms is not None:
perms = global_perms & perms
enough_perms = self._get_enough_permissions()
if enough_perms is not None:
perms = enough_perms | perms
return perms
def evaluate_permissions(self, permission_name, request, view, *args, **kwargs):
perms = self._get_required_permissions(view.action)
return getattr(perms, permission_name)(request, view, *args, **kwargs)
class ShmitterPermission(ActionPermission):
enough_perms = IsSuperUser()
|
Python
| 0.000107
|
@@ -4209,63 +4209,8 @@
j):%0A
- if obj.owner is None:%0A return False%0A
|
e73372c8a5428547d1544ae44d57119c8b5aa29c
|
make a Iowa WFO outline
|
util/make_shapes.py
|
util/make_shapes.py
|
"""Serialization of geometries for use in pyIEM.plot mapping
We use a pickled protocol=2, which is compat binary.
"""
import psycopg2
import cPickle
from shapely.wkb import loads
import datetime
# Be annoying
print("Be sure to run this against Mesonet database and not laptop!")
def dump_states(fn):
pgconn = psycopg2.connect(database='postgis', host='iemdb',
user='nobody')
cursor = pgconn.cursor()
cursor.execute(""" SELECT state_abbr,
ST_asEWKB(ST_Simplify(the_geom, 0.01)),
ST_x(ST_Centroid(the_geom)), ST_Y(ST_Centroid(the_geom)) from states""")
data = {}
for row in cursor:
data[row[0]] = dict(geom=loads(str(row[1])), lon=row[2], lat=row[3])
# for polygon in geom:
# data[row[0]].append(np.asarray(polygon.exterior))
f = open('../pyiem/data/%s' % (fn, ), 'wb')
cPickle.dump(data, f, 2)
f.close()
def dump_climdiv(fn):
pgconn = psycopg2.connect(database='postgis', host='iemdb',
user='nobody')
cursor = pgconn.cursor()
cursor.execute(""" SELECT iemid, ST_asEWKB(geom),
ST_x(ST_Centroid(geom)), ST_Y(ST_Centroid(geom))
from climdiv""")
data = {}
for row in cursor:
data[row[0]] = dict(geom=loads(str(row[1])), lon=row[2], lat=row[3])
# for polygon in geom:
# data[row[0]].append(np.asarray(polygon.exterior))
f = open('../pyiem/data/%s' % (fn, ), 'wb')
cPickle.dump(data, f, 2)
f.close()
def dump_cwa(fn):
pgconn = psycopg2.connect(database='mesosite', host='iemdb',
user='nobody')
cursor = pgconn.cursor()
cursor.execute(""" SELECT wfo, ST_asEWKB(ST_Simplify(geom, 0.01)),
ST_x(ST_Centroid(geom)), ST_Y(ST_Centroid(geom)), region
from cwa""")
data = {}
for row in cursor:
data[row[0]] = dict(geom=loads(str(row[1])), lon=row[2], lat=row[3],
region=row[4])
# for polygon in geom:
# data[row[0]].append(np.asarray(polygon.exterior))
f = open('../pyiem/data/%s' % (fn, ), 'wb')
cPickle.dump(data, f, 2)
f.close()
def dump_ugc(gtype, fn):
pgconn = psycopg2.connect(database='postgis', host='iemdb',
user='nobody')
cursor = pgconn.cursor()
cursor.execute(""" SELECT ugc, wfo, ST_asEWKB(simple_geom),
ST_x(centroid), ST_Y(centroid) from ugcs
WHERE end_ts is null and substr(ugc, 3, 1) = %s""", (gtype,))
data = {}
for row in cursor:
data[row[0]] = dict(cwa=row[1][:3], geom=loads(str(row[2])),
lon=row[3], lat=row[4])
# for polygon in geom:
# data[row[0]].append(np.asarray(polygon.exterior))
f = open('../pyiem/data/%s' % (fn, ), 'wb')
cPickle.dump(data, f, 2)
f.close()
def check_file(fn):
sts = datetime.datetime.now()
data = cPickle.load(open("../pyiem/data/%s" % (fn, ), 'rb'))
ets = datetime.datetime.now()
print (ets - sts).total_seconds(), len(data.keys()), fn
dump_ugc('C', 'ugcs_county.pickle')
dump_ugc('Z', 'ugcs_zone.pickle')
check_file('ugcs_county.pickle')
check_file('ugcs_zone.pickle')
dump_cwa("cwa.pickle")
check_file('cwa.pickle')
dump_climdiv("climdiv.pickle")
check_file("climdiv.pickle")
dump_states('us_states.pickle')
check_file('us_states.pickle')
|
Python
| 0.003036
|
@@ -1967,98 +1967,633 @@
- # for polygon in geom:%0A # data%5Brow%5B0%5D%5D.append(np.asarray(polygon.exterior))%0A
+f = open('../pyiem/data/%25s' %25 (fn, ), 'wb')%0A cPickle.dump(data, f, 2)%0A f.close()%0A%0A%0Adef dump_iowawfo(fn):%0A %22%22%22 A region with the Iowa WFOs%22%22%22%0A pgconn = psycopg2.connect(database='postgis', host='iemdb',%0A user='nobody')%0A cursor = pgconn.cursor()%0A%0A cursor.execute(%22%22%22 SELECT ST_asEWKB(ST_Simplify(ST_Union(the_geom), 0.01))%0A from cwa%0A WHERE wfo in ('DMX', 'ARX', 'DVN', 'OAX', 'FSD')%22%22%22)%0A row = cursor.fetchone()%0A%0A geo = loads(str(row%5B0%5D))%0A data = dict()%0A data%5B'iowawfo'%5D = dict(geom=geo,%0A lon=geo.centroid.x, lat=geo.centroid.y)
%0A
@@ -3591,16 +3591,47 @@
)), fn%0A%0A
+dump_iowawfo('iowawfo.pickle')%0A
dump_ugc
|
906d765e387367654a02a36e9b5ba7aca4480ed6
|
Check if zip only contains one file
|
util/zipwrangler.py
|
util/zipwrangler.py
|
from pathlib import Path
from zipfile import ZipFile
from tempfile import TemporaryDirectory
import shutil
ignore = ['__MACOSX', '.DS_Store']
def get_cleaned_contents(zipfile, ignore_list=ignore, verbose=False):
contents = []
for info in zipfile.infolist():
if not any(ignored in info.filename for ignored in ignore_list):
contents.append(info)
elif verbose:
print(f'ignored: {info.filename}')
return contents
def clean_unzip_with_temp_dir(zipfilename: Path, target=None, ignore_list=ignore, overwrite=False, remove_zip=False):
zipfile = ZipFile(str(zipfilename))
if target is None:
target = Path.cwd() / zipfilename.stem
else:
target = target / zipfilename.stem
try:
target.mkdir(exist_ok=overwrite)
except FileExistsError:
print(f'file exists, not extracting {zipfilename.name} to {target}')
return
contents = get_cleaned_contents(zipfile, ignore_list)
with TemporaryDirectory(dir=Path.cwd().absolute()) as tempdir:
temp = Path(tempdir)
for file in contents:
zipfile.extract(file, path=tempdir)
contents = list(temp.iterdir())
while len(contents) == 1:
content = contents.pop()
contents = list(content.iterdir())
for i in contents:
shutil.move(str(i), str(target))
if remove_zip:
zipfilename.unlink()
def main():
for zipfilename in Path.cwd().glob('*.zip'):
clean_unzip_with_temp_dir(zipfilename)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -1272,42 +1272,156 @@
-contents = list(content.iterdir())
+if content.is_dir():%0A contents = list(content.iterdir())%0A else:%0A contents = %5Bcontent%5D%0A break
%0A%0A
|
9785c4f887353ed4835b3ecee602f07f78cd0eab
|
remove some chatter from make_examples.py
|
doc/make_examples_rst.py
|
doc/make_examples_rst.py
|
#!/usr/bin/env python
"""
generate the rst files for the examples by iterating over the pylab examples
"""
# This code was developed from the Matplotlib gen_rst.py module
# and is distributed with the same license as Matplotlib
import os, glob
import os
import re
import sys
#fileList = []
#rootdir = '../../examples'
def out_of_date(original, derived):
"""
Returns True if derivative is out-of-date wrt original,
both of which are full file paths.
TODO: this check isn't adequate in some cases. Eg, if we discover
a bug when building the examples, the original and derived
will be unchanged but we still want to fource a rebuild. We can
manually remove from _static, but we may need another solution
"""
return (not os.path.exists(derived) or
os.stat(derived).st_mtime < os.stat(original).st_mtime)
def main(exampledir,sourcedir):
noplot_regex = re.compile(r"#\s*-\*-\s*noplot\s*-\*-")
datad = {}
for root, subFolders, files in os.walk(exampledir):
for fname in files:
if ( fname.startswith('.') or fname.startswith('#') or fname.startswith('_') or
fname.find('.svn')>=0 or not fname.endswith('.py') ):
continue
fullpath = os.path.join(root,fname)
contents = file(fullpath).read()
# indent
relpath = os.path.split(root)[-1]
datad.setdefault(relpath, []).append((fullpath, fname, contents))
subdirs = datad.keys()
subdirs.sort()
fhindex = file(os.path.join(sourcedir,'examples','index.rst'), 'w')
fhindex.write("""\
.. _examples-index:
*****************
NetworkX Examples
*****************
.. htmlonly::
:Release: |version|
:Date: |today|
.. toctree::
:maxdepth: 2
""")
for subdir in subdirs:
output_dir= os.path.join(sourcedir,'examples',subdir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
static_dir = os.path.join(sourcedir, 'static', 'examples')
if not os.path.exists(static_dir):
os.makedirs(static_dir)
subdirIndexFile = os.path.join(subdir, 'index.rst')
fhsubdirIndex = file(os.path.join(output_dir,'index.rst'), 'w')
fhindex.write(' %s\n\n'%subdirIndexFile)
#thumbdir = '../_static/plot_directive/mpl_examples/%s/thumbnails/'%subdir
#for thumbname in glob.glob(os.path.join(thumbdir,'*.png')):
# fhindex.write(' %s\n'%thumbname)
fhsubdirIndex.write("""\
.. _%s-examples-index:
##############################################
%s
##############################################
.. htmlonly::
:Release: |version|
:Date: |today|
.. toctree::
:maxdepth: 1
"""%(subdir, subdir.title()))
data = datad[subdir]
data.sort()
#parts = os.path.split(static_dir)
#thumb_dir = ('../'*(len(parts)-1)) + os.path.join(static_dir, 'thumbnails')
for fullpath, fname, contents in data:
basename, ext = os.path.splitext(fname)
static_file = os.path.join(static_dir, fname)
#thumbfile = os.path.join(thumb_dir, '%s.png'%basename)
#print ' static_dir=%s, basename=%s, fullpath=%s, fname=%s, thumb_dir=%s, thumbfile=%s'%(static_dir, basename, fullpath, fname, thumb_dir, thumbfile)
rstfile = '%s.rst'%basename
outfile = os.path.join(output_dir, rstfile)
fhsubdirIndex.write(' %s\n'%rstfile)
if (not out_of_date(fullpath, static_file) and
not out_of_date(fullpath, outfile)):
continue
print '%s/%s'%(subdir,fname)
fhstatic = file(static_file, 'w')
fhstatic.write(contents)
fhstatic.close()
fh = file(outfile, 'w')
fh.write('.. _%s-%s:\n\n'%(subdir, basename))
base=fname.partition('.')[0]
title = '%s'%(base.replace('_',' ').title())
#title = '<img src=%s> %s example code: %s'%(thumbfile, subdir, fname)
fh.write(title + '\n')
fh.write('='*len(title) + '\n\n')
pngname=base+".png"
png=os.path.join(static_dir,pngname)
print os.getcwd(),png,os.path.exists(png)
linkname = os.path.join('..', '..', 'static', 'examples')
if os.path.exists(png):
print "adding link",os.path.join(linkname,pngname)
fh.write('.. image:: %s \n\n'%os.path.join(linkname,pngname))
linkname = os.path.join('..', '..', '_static', 'examples')
fh.write("[`source code <%s>`_]\n\n::\n\n" % os.path.join(linkname,fname))
# indent the contents
contents = '\n'.join([' %s'%row.rstrip() for row in contents.split('\n')])
fh.write(contents)
# fh.write('\n\nKeywords: python, matplotlib, pylab, example, codex (see :ref:`how-to-search-examples`)')
fh.close()
fhsubdirIndex.close()
fhindex.close()
if __name__ == '__main__':
import sys
main(sys.argv[1],sys.argv[2])
|
Python
| 0.000022
|
@@ -4227,62 +4227,8 @@
me)%0A
- print os.getcwd(),png,os.path.exists(png)%0A
@@ -4333,75 +4333,8 @@
g):%0A
- print %22adding link%22,os.path.join(linkname,pngname)%0A
|
2d09314ab58bb766372dc6e263fb17428b1fd3cd
|
Fix check for existing pools.
|
doc/pool_scripts/cats.py
|
doc/pool_scripts/cats.py
|
import os
import photomosaic.flickr
import photomosaic as pm
if not os.path.isfile('~/pools/cats/pool.json'):
FLICKR_API_KEY = os.environ['FLICKR_API_KEY']
pm.set_options(flickr_api_key=FLICKR_API_KEY)
photomosaic.flickr.from_search('cats', '~/pools/cats/')
pool = pm.make_pool('~/pools/cats/*.jpg')
pm.export_pool(pool, '~/pools/cats/pool.json') # save color analysis for future reuse
|
Python
| 0
|
@@ -78,16 +78,35 @@
.isfile(
+os.path.expanduser(
'~/pools
@@ -122,16 +122,17 @@
l.json')
+)
:%0A FL
|
89e2c825481ebd62e467ba60b4cd73ddb09149e8
|
Fix deprecation warning.
|
jazzband/config.py
|
jazzband/config.py
|
import os
from datetime import timedelta
from decouple import Csv, config
from markdown.extensions.toc import TocExtension
from markdown.extensions.wikilinks import WikiLinkExtension
from .renderer import smart_pygmented_markdown
IS_PRODUCTION = "PRODUCTION" in os.environ
ROOT_DIR = os.path.dirname(__file__)
SECRET_KEY = config("SECRET_KEY", "dev key")
DEBUG = config("DEBUG", True, cast=bool)
SERVER_NAME = config("SERVER_NAME", "localhost:5000")
HOSTNAMES = config("HOSTNAMES", "localhost:5000,0.0.0.0:5000", cast=Csv())
REDIS_URL = config("REDIS_URL", "redis://redis:6379/0")
QUEUE_URL = config("QUEUE_URL", REDIS_URL)
CACHE_REDIS_URL = config("CACHE_REDIS_URL", REDIS_URL)
CACHE_TYPE = "redis"
CACHE_KEY_PREFIX = "cache"
CACHE_DEFAULT_TIMEOUT = 60 * 5
MAIL_DEFAULT_SENDER = config("MAIL_DEFAULT_SENDER", "Jazzband <roadies@jazzband.co>")
MAIL_PASSWORD = config("MAIL_PASSWORD")
MAIL_PORT = config("MAIL_PORT", 587, cast=int)
MAIL_SERVER = config("MAIL_SERVER", "localhost")
MAIL_USERNAME = config("MAIL_USERNAME", "")
MAIL_USE_TLS = config("MAIL_USE_TLS", False, cast=bool)
# how many seconds to set the expires and max_age headers
HTTP_CACHE_TIMEOUT = config("HTTP_CACHE_TIMEOUT", 60 * 60, cast=int)
FLATPAGES_ABOUT_ROOT = "../docs/about"
FLATPAGES_ABOUT_EXTENSION = FLATPAGES_NEWS_EXTENSION = [".md"]
FLATPAGES_NEWS_MARKDOWN_EXTENSIONS = [
"codehilite",
"fenced_code",
"footnotes",
"admonition",
"tables",
"abbr",
"smarty",
WikiLinkExtension(base_url="/about/", end_url="", html_class=""),
]
FLATPAGES_ABOUT_MARKDOWN_EXTENSIONS = FLATPAGES_NEWS_MARKDOWN_EXTENSIONS + [
TocExtension(permalink=True)
]
FLATPAGES_ABOUT_HTML_RENDERER = FLATPAGES_NEWS_HTML_RENDERER = smart_pygmented_markdown
FLATPAGES_NEWS_ROOT = "../docs/news"
# Set these values in the .env file or env vars
GITHUB_OAUTH_CLIENT_ID = config("GITHUB_CLIENT_ID", "")
GITHUB_OAUTH_CLIENT_SECRET = config("GITHUB_CLIENT_SECRET", "")
GITHUB_ORG_NAME = config("GITHUB_ORG_NAME", "jazzband")
GITHUB_ORG_ID = config("GITHUB_ORG_ID", 15129049)
GITHUB_SCOPE = config("GITHUB_SCOPE", "read:org,user:email")
GITHUB_MEMBERS_TEAM_ID = config("GITHUB_MEMBERS_TEAM_ID", 0, cast=int)
GITHUB_MEMBERS_TEAM_SLUG = config("GITHUB_MEMBERS_TEAM_SLUG", "members")
GITHUB_ROADIES_TEAM_SLUG = config("GITHUB_ROADIES_TEAM_SLUG", "roadies")
GITHUB_ADMIN_TOKEN = config("GITHUB_ADMIN_TOKEN", "")
GITHUB_WEBHOOKS_KEY = config("GITHUB_WEBHOOKS_KEY", "")
SESSION_COOKIE_NAME = "session"
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = not DEBUG
SESSION_REFRESH_EACH_REQUEST = False
PERMANENT_SESSION_LIFETIME = timedelta(days=14)
USE_SESSION_FOR_NEXT = True
SQLALCHEMY_DATABASE_URI = config("DATABASE_URL", "postgres://postgres@db/postgres")
if IS_PRODUCTION:
SQLALCHEMY_DATABASE_URI += "?sslmode=require"
VALIDATE_IP = config("GITHUB_VALIDATE_IP", True, cast=bool)
VALIDATE_SIGNATURE = config("GITHUB_VALIDATE_SIGNATURE", True, cast=bool)
else:
VALIDATE_IP = False
VALIDATE_SIGNATURE = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
CSP_REPORT_URI = config("CSP_REPORT_URI", None)
CSP_REPORT_ONLY = config("CSP_REPORT_ONLY", False, cast=bool)
CSP_RULES = {
"default-src": "'self'",
"font-src": "'self' data:",
"frame-src": "'self'",
"script-src": "'self'",
"style-src": "'self' 'unsafe-inline'",
"img-src": "* data:",
"object-src": "'none'",
}
FEATURE_POLICY = {
"geolocation": "'none'",
"midi": "'none'",
"notifications": "'self'",
"push": "'self'",
"sync-xhr": "'self'",
"microphone": "'none'",
"camera": "'none'",
"magnetometer": "'none'",
"gyroscope": "'none'",
"speaker": "'self'",
"vibrate": "'self'",
"fullscreen": "'self'",
"payment": "'self'",
}
SENTRY_USER_ATTRS = ["id", "login", "is_banned", "is_member"]
if "GIT_REV" in os.environ:
SENTRY_CONFIG = {"release": os.environ["GIT_REV"]}
UPLOAD_ROOT = "/app/uploads"
UPLOAD_ENABLED = config("UPLOAD_ENABLED", True, cast=bool)
RELEASE_ENABLED = config("RELEASE_ENABLED", True, cast=bool)
MAX_CONTENT_LENGTH = 60 * 1024 * 1024 # 60M
ONE_MINUTE = 60 * 1000 # in ms
|
Python
| 0.000001
|
@@ -692,21 +692,49 @@
TYPE = %22
-redis
+flask_caching.backends.RedisCache
%22%0ACACHE_
|
a8e773e1ff7e0caf67d34d3b00719a57ee81da32
|
Use the server hostname to get the prefix.
|
uwhoisd/__init__.py
|
uwhoisd/__init__.py
|
"""
A 'universal' WHOIS proxy server.
"""
import logging
import logging.config
import os.path
import re
import socket
import sys
from uwhoisd import net, utils
USAGE = "Usage: %s <config>"
PORT = socket.getservbyname('whois', 'tcp')
CONFIG = """
[uwhoisd]
iface=0.0.0.0
port=4343
registry_whois=false
suffix=whois-servers.net
[overrides]
[prefixes]
[recursion_patterns]
[broken]
"""
logger = logging.getLogger('uwhoisd')
class UWhois(object):
"""
Universal WHOIS proxy.
"""
__slots__ = (
'conservative',
'overrides',
'prefixes',
'recursion_patterns',
'registry_whois',
'suffix',
'broken',
)
def __init__(self):
super(UWhois, self).__init__()
self.suffix = None
self.overrides = {}
self.prefixes = {}
self.recursion_patterns = {}
self.broken = {}
self.registry_whois = False
self.conservative = ()
def _get_dict(self, parser, section):
"""
Pull a dictionary out of the config safely.
"""
if parser.has_section(section):
values = dict(
(key, utils.decode_value(value))
for key, value in parser.items(section))
else:
values = {}
setattr(self, section, values)
def read_config(self, parser):
"""
Read the configuration for this object from a config file.
"""
self.registry_whois = utils.to_bool(
parser.get('uwhoisd', 'registry_whois'))
self.suffix = parser.get('uwhoisd', 'suffix')
self.conservative = [
zone
for zone in parser.get('uwhoisd', 'conservative').split("\n")
if zone != '']
for section in ('overrides', 'prefixes', 'broken'):
self._get_dict(parser, section)
for zone, pattern in parser.items('recursion_patterns'):
self.recursion_patterns[zone] = re.compile(
utils.decode_value(pattern),
re.I)
def get_whois_server(self, zone):
"""
Get the WHOIS server for the given zone.
"""
if zone in self.overrides:
server = self.overrides[zone]
else:
server = zone + '.' + self.suffix
if ':' in server:
server, port = server.split(':', 1)
port = int(port)
else:
port = PORT
return server, port
def get_registrar_whois_server(self, zone, response):
"""
Extract the registrar's WHOIS server from the registry response.
"""
matches = self.recursion_patterns[zone].search(response)
return None if matches is None else matches.group('server')
def get_prefix(self, zone):
"""
Gets the prefix required when querying the servers for the given zone.
"""
return self.prefixes[zone] if zone in self.prefixes else ''
def _thin_query(self, server_index, response, port, query):
server = self.get_registrar_whois_server(server_index, response)
if server is not None:
if not self.registry_whois:
response = ""
with net.WhoisClient(server, port) as client:
logger.info(
"Recursive query to %s about %s",
server, query)
response += client.whois(query)
return response
def whois(self, query):
"""
Query the appropriate WHOIS server.
"""
# Figure out the zone whose WHOIS server we're meant to be querying.
for zone in self.conservative:
if query.endswith('.' + zone):
break
else:
if query.split('.')[-1].isdigit():
zone = query.split('.')[0]
elif ':' in query:
zone = 'ipv6'
else:
_, zone = utils.split_fqdn(query)
# Query the registry's WHOIS server.
server, port = self.get_whois_server(zone)
with net.WhoisClient(server, port) as client:
logger.info("Querying %s about %s", server, query)
prefix = self.get_prefix(zone)
if len(prefix) == 0:
prefix = self.get_prefix(server)
response = client.whois(prefix + query)
# Thin registry? Query the registrar's WHOIS server.
if zone in self.recursion_patterns:
response = self._thin_query(zone, response, port, query)
elif server in self.recursion_patterns:
response = self._thin_query(server, response, port, query)
if self.broken.get(server) is not None:
response += self.broken.get(server)
return response
def main():
"""
Execute the daemon.
"""
if len(sys.argv) != 2:
print >> sys.stderr, USAGE % os.path.basename(sys.argv[0])
return 1
logging.config.fileConfig(sys.argv[1])
try:
logger.info("Reading config file at '%s'", sys.argv[1])
parser = utils.make_config_parser(CONFIG, sys.argv[1])
iface = parser.get('uwhoisd', 'iface')
port = parser.getint('uwhoisd', 'port')
logger.info("Listen on %s:%d", iface, port)
uwhois = UWhois()
uwhois.read_config(parser)
cache = utils.to_bool(parser.get('cache', 'enable'))
redis_cache = utils.to_bool(parser.get('redis_cache', 'enable'))
if cache:
logger.info("Caching activated")
cache = utils.Cache(
max_size=parser.getint('cache', 'max_size'),
max_age=parser.getint('cache', 'max_age'))
def whois(query):
"""Caching wrapper around UWhois."""
cache.evict_expired()
if query in cache:
logger.info("Cache hit for %s", query)
response = cache[query]
else:
response = uwhois.whois(query)
cache[query] = response
return response
elif redis_cache:
logger.info("Redis caching activated")
import redis
redis_host = parser.get('redis_cache', 'host')
redis_port = parser.getint('redis_cache', 'port')
redis_database = parser.getint('redis_cache', 'db')
redis_expire = parser.getint('redis_cache', 'expire')
redis_cache = redis.StrictRedis(redis_host, redis_port,
redis_database)
def whois(query):
"""Redis caching wrapper around UWhois."""
response = redis_cache.get(query)
if response is None:
response = uwhois.whois(query)
redis_cache.setex(query, redis_expire, response)
return response
else:
logger.info("Caching deactivated")
whois = uwhois.whois
except Exception, ex: # pylint: disable-msg=W0703
print >> sys.stderr, "Could not parse config file: %s" % str(ex)
return 1
net.start_service(iface, port, whois)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0
|
@@ -2768,28 +2768,30 @@
refix(self,
-zone
+server
):%0A %22
@@ -2916,47 +2916,20 @@
ixes
-%5Bzone%5D if zone in self.prefixes else ''
+.get(server)
%0A%0A
|
be3caa1af2a1c6c51cd1b8a9300949e4604d0c0f
|
exclude some symbols from the documentation list using `__no_apidoc__`
|
doc/utils/checkapidoc.py
|
doc/utils/checkapidoc.py
|
# -*- coding: utf-8 -*-
"""Trac API doc checker
Verify that all symbols belonging to modules already documented in the doc/api
Sphinx sources are referenced.
See http://trac.edgewall.org/wiki/TracDev/ApiDocs
"""
import fnmatch
import os
import re
import sys
excluded_docs = ['index.rst']
api_doc = 'doc/api'
def usage(cmd):
print "Usage: %s [FILE...]" % (cmd,)
print
print "FILE is a %s file and can be a glob pattern." % (api_doc,)
print "If no files are given, check all."
exit(0)
def main(argv):
api_files = [rst for rst in os.listdir('doc/api')
if fnmatch.fnmatch(rst, '*.rst')
and rst not in excluded_docs]
cmd = argv.pop(0)
def has(*options):
for opt in options:
if opt in argv:
return argv.pop(argv.index(opt))
if has('-h', '--help'):
usage(cmd)
verbose = has('-v', '--verbose')
only_documented = not has('-a', '--all')
if argv:
given_files = []
for arg in argv:
arg = arg.replace('\\', '/').replace(api_doc + '/', '')
arg = arg.replace('.rst', '') + '.rst'
if '*' in arg: # glob pattern
given_files += [rst for rst in api_files
if fnmatch.fnmatch(rst, arg)]
elif arg in api_files:
given_files.append(arg)
api_files = given_files
for rst in api_files:
check_api_doc(rst, verbose, only_documented)
def check_api_doc(rst, verbose, only_documented):
if verbose:
print "== Checking %s ... " % (rst,)
module_name = rst.replace('_', '.').replace('.rst', '')
try:
module = __import__(module_name, globals(), {}, ['__all__'])
except ImportError, e:
print "Skipping %s (%s)" % (rst, e)
return
all = getattr(module, '__all__', None)
if not all:
print "Warning: %s doesn't define __all__, using exported symbols." % (
module_name,)
all = get_default_symbols(module, only_documented)
symbols, keywords = get_sphinx_documented_symbols(rst)
for symbol in sorted(all):
if symbol in symbols:
if verbose:
print " - OK %14s :: %s" % (
keywords[symbols.index(symbol)], symbol)
else:
value = getattr(module, symbol)
cls = getattr(value, '__class__', None)
keyword = 'data'
if not cls or cls.__name__ == 'type':
keyword = 'class'
elif cls.__name__ in ('function', 'instancemethod'):
keyword = 'function'
elif cls.__name__ == 'module':
keyword = 'module'
print " * .. %14s :: %s" % ('auto' + keyword, symbol)
sphinx_doc_re = re.compile(r'''
^.. \s+ ((?:py:|auto)(?:module|class|function|attribute)|data) # keyword
\s* :: \s* ([\w\.]+) # symbol
''', re.MULTILINE | re.VERBOSE)
def get_sphinx_documented_symbols(rst):
doc = file(os.path.join(api_doc, rst)).read()
symbols, keywords = [], []
for k, s in sphinx_doc_re.findall(doc):
symbols.append(s.split('.')[-1])
keywords.append(k)
return symbols, keywords
def get_default_symbols(module, only_documented):
public = get_public_symbols(module) - get_imported_symbols(module)
# eliminate modules
all = []
for symbol in public:
try:
__import__(symbol)
except ImportError:
all.append(symbol)
# only keep symbols having a docstring
if only_documented:
documented = []
for symbol in all:
value = getattr(module, symbol)
if value.__doc__ and (not getattr(value, '__class__', None) or
value.__doc__ != value.__class__.__doc__):
documented.append(symbol)
all = documented
return all
def get_public_symbols(m):
return set(symbol for symbol in dir(m) if not symbol.startswith('_'))
import_from_re = re.compile(r'''
^ \s* from \s+ ([\w\.]+) \s+ import \s+ # module
( \* # all symbols
| %s (?: [\s\\]* , [\s\\]* %s)* # list of symbols
| \( \s* %s (?: \s* , \s* %s)* \s* \) # list of symbols in parenthesis
)
''' % ((r'(?:\w+|\w+\s+as\s+\w+)',) * 4), re.MULTILINE | re.VERBOSE)
remove_original_re = re.compile(r'\w+\s+as', re.MULTILINE)
def get_imported_symbols(module):
src_filename = module.__file__.replace('\\', '/').replace('.pyc', '.py')
if src_filename.endswith('/__init__.py'):
return set()
src = file(src_filename).read()
imported = set()
for mod, symbol_list in import_from_re.findall(src):
symbol_list = symbol_list.strip()
if symbol_list == '*':
try:
imported_module = __import__(mod, globals(), {}, ['__all__'])
symbols = set(getattr(imported_module, '__all__', None) or
get_public_symbols(imported_module))
except ImportError:
print "Warning: 'from %s import *' couldn't be resolved" % (
mod,)
continue
else:
if symbol_list and symbol_list[0] == '(' and symbol_list[-1] == ')':
symbol_list = symbol_list[1:-1]
symbols = set(remove_original_re.sub('', symbol_list)
.replace('\\', '').replace(',', ' ').split())
imported |= symbols
return imported
if __name__ == '__main__':
main(sys.argv)
|
Python
| 0.000205
|
@@ -2040,16 +2040,244 @@
mented)%0A
+ no_apidoc = getattr(module, '__no_apidoc__', None)%0A if no_apidoc:%0A if isinstance(no_apidoc, basestring):%0A no_apidoc = %5Bs.strip() for s in no_apidoc.split()%5D%0A all = list(set(all) - set(no_apidoc))%0A
symb
|
fba155006299572eea76d7d478b13f8d269e612e
|
update city getter
|
emonitor/modules/streets/city.py
|
emonitor/modules/streets/city.py
|
from sqlalchemy.orm.collections import attribute_mapped_collection
from emonitor.extensions import db, cache
class City(db.Model):
"""City class"""
__tablename__ = 'cities'
__table_args__ = {'extend_existing': True}
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(30))
dept = db.Column('dept', db.Integer, db.ForeignKey('departments.id'))
mapname = db.Column(db.String(30))
default = db.Column(db.Integer)
subcity = db.Column(db.Text)
color = db.Column(db.String(6), default="000000")
osmid = db.Column(db.Integer, default=0)
osmname = db.Column(db.String(30), default="")
streets = db.relationship("Street", collection_class=attribute_mapped_collection('id'), cascade="all, delete-orphan")
department = db.relationship("Department", collection_class=attribute_mapped_collection('id'))
def __init__(self, name, dept, mapname, default, subcity, color, osmid, osmname):
self.name = name
self.dept = dept
self.mapname = mapname
self.default = default
self.subcity = subcity
self.color = color
self.osmid = osmid
self.osmname = osmname
@property
def serialize(self):
return dict(id=self.id, name=self.name)
def getSubCityList(self):
"""
Get list of subcities
:return: list of strings with subcity names
"""
try:
return [s for s in self.subcity.split("\r\n") if s.strip() != ""]
except:
return []
def getSubCityListLine(self):
try:
return ", ".join([s for s in self.subcity.split("\r\n") if s.strip() != ""])
except:
return ""
def getColorName(self):
return '#%s' % self.color
def __repr__(self):
return '<City %r>' % self.name
@cache.memoize()
def getStreets(self):
"""
Get sorted list of streets
:return: list of :py:class:`emonitor.modules.streeets.city.City`
"""
return sorted(self.streets.values(), key=lambda x: x.name)
def addStreet(self, street):
"""
Add street to current city
:param street: :py:class:`emonitor.modules.streeets.street.Street`
"""
#cache.delete_memoized('getStreets', self)
if street.id in self.streets:
self.streets[street.id] = street
else:
self.streets[street.id] = street
db.session.commit()
# static part
@staticmethod
def getCities():
"""
Get list of all cities
:return: list of :py:class:`emonitor.modules.streetes.city.City`
"""
return db.session.query(City).order_by(City.default.desc(), City.name).all()
@staticmethod
def getCitiesDict():
"""
Get cities as dict
:return: dict of :py:class:`emonitor.modules.streets.city.City`, id as key
"""
ret = {}
for city in db.session.query(City).order_by('id'):
ret[city.id] = city
if city.default == 1:
ret[0] = city
return ret
@staticmethod
def get_byid(cityid):
"""
Get city by id
:param cityid: id of city
:return: :py:class:`emonitor.modules.streets.city.City`
"""
return db.session.query(City).filter_by(id=cityid).first() or None
@staticmethod
def get_byname(cityname):
"""
Get city by name
:param cityname: name of city
:return: :py:class:`emonitor.modules.streets.city.City`
"""
city = db.session.query(City).filter_by(name=cityname).first()
#if city[0]:
# return city[0]
#return None
return city or None
@staticmethod
def getDefaultCity():
"""
Get default city (default=1)
:return: :py:class:`emonitor.modules.streets.city.City`
"""
city = db.session.query(City).filter_by(default=1).first()
return city or None
|
Python
| 0.000001
|
@@ -2568,19 +2568,23 @@
tCities(
+id=0
):%0A
-
@@ -2696,32 +2696,56 @@
ty%60%0A %22%22%22%0A
+ if id == 0:%0A
return d
@@ -2813,16 +2813,103 @@
).all()%0A
+ else:%0A return db.session.query(City).filter_by(id=id).one() or None%0A
|
3f190e10707895a357a9167add44fa8ae0a3cc31
|
Tidy up ebay messaging call
|
erpnext_ebay/ebay_do_requests.py
|
erpnext_ebay/ebay_do_requests.py
|
# -*- coding: utf-8 -*-
"""eBay requests which are not read-only, and can affect live eBay data.
Excludes item revision calls.
"""
from ebaysdk.exception import ConnectionError
from erpnext_ebay.ebay_constants import HOME_SITE_ID
from erpnext_ebay.ebay_get_requests import (
ebay_logger, get_trading_api, handle_ebay_error, test_for_message)
from erpnext_ebay.erpnext_ebay.doctype.ebay_manager_settings.ebay_manager_settings\
import use_sandbox
def trading_api_call(api_call, input_dict, site_id=HOME_SITE_ID,
force_sandbox_value=None, escape_xml=True):
"""Perform a TradingAPI call with an input dictionary."""
try:
api = get_trading_api(site_id=site_id, api_call=api_call,
warnings=True, timeout=20,
force_sandbox_value=force_sandbox_value,
escape_xml=escape_xml)
response = api.execute(api_call, input_dict)
except ConnectionError as e:
handle_ebay_error(e, input_dict)
return response.dict()
def AddMemberMessageAAQToPartner(user_id, item_id, body, subject,
message_details=None):
"""Send a message to a buyer or seller."""
if message_details is None:
message_details = {}
message_dict = {
'ItemID': item_id,
'MemberMessage': {
'Body': body,
'QuestionType': 'Shipping',
'RecipientID': user_id,
'Subject': subject
},
}
message_dict['MemberMessage'].update(message_details)
return trading_api_call('AddMemberMessageAAQToPartner', message_dict,
force_sandbox_value=True)
def add_item(item_code, item_details=None):
"""Add an item for testing purposes."""
# Check we are using the Sandbox
if not use_sandbox('AddItem'):
raise ValueError('Must use sandbox!')
if item_details is None:
item_details = {}
item_dict = {
'Country': 'GB',
'Currency': 'GBP',
'Description': '<p>This is a test item.</p>',
'DispatchTimeMax': 3,
'ListingDuration': 'GTC',
'ListingType': 'FixedPriceItem',
'Location': 'A galaxy far, far away',
'PaymentMethods': ['CashOnPickup', 'PayPal'],
'PayPalEmailAddress': 'test@example.com',
'PictureDetails': {
'PictureURL': ['https://picsum.photos/id/1020/500']
},
'PrimaryCategory': {
'CategoryID': '29223'
},
'Quantity': 1,
'ReturnPolicy': {
'ReturnsAcceptedOption': 'ReturnsAccepted'
},
'ShipToLocations': ['None'],
'Site': 'UK',
'SKU': item_code,
'StartPrice': 10.0,
'Title': 'TestItem: Test Item erpnext_ebay'
}
item_dict.update(item_details)
return trading_api_call('AddItem', {'Item': item_dict},
force_sandbox_value=True)
|
Python
| 0.00001
|
@@ -1062,38 +1062,33 @@
)%0A%0A%0Adef
-AddMemberMessageAAQToP
+ebay_message_to_p
artner(u
@@ -1115,29 +1115,24 @@
y, subject,%0A
-
@@ -1212,17 +1212,134 @@
r seller
-.
+ using%0A the AddMemberMessageAAQToPartner call.%0A%0A Note that HTML cannot be used in the body of this message.%0A
%22%22%22%0A%0A
@@ -1758,62 +1758,8 @@
dict
-,%0A force_sandbox_value=True
)%0A%0A%0A
@@ -2117,16 +2117,25 @@
ion': '%3C
+!%5BCDATA%5B%3C
p%3EThis i
@@ -2152,16 +2152,19 @@
tem.%3C/p%3E
+%5D%5D%3E
',%0A
|
e83be594507c994069d20d5f2cd86c52905a52a6
|
Fix personal brain damage.
|
lib/plugin/disk_utilization.py
|
lib/plugin/disk_utilization.py
|
import datetime
import logging
import os
import snmpy.plugin
import subprocess
class disk_utilization(snmpy.plugin.TablePlugin):
def __init__(self, conf):
conf['table'] = [
{'dev': 'string'},
{'wait': 'integer'},
{'util': 'integer'},
]
snmpy.plugin.TablePlugin.__init__(self, conf);
def update(self):
os.environ['LC_TIME'] = 'POSIX'
disk = {}
date = datetime.datetime.now() - datetime.timedelta(minutes=20)
comm = [self.conf.get('sar_command', '/usr/bin/sar'), '-d', '-f', self.conf.get('sysstat_log', '/var/log/sysstat/sa%02d') % date.day, '-s', date.strftime('%H:%M:00')]
logging.debug('running sar command: %s', ' '.join(comm))
for line in subprocess.check_output(comm, stderr=open(os.devnull, 'w')).split('\n'):
logging.debug('line: %s', line)
part = line.split()
if part and part[0] != 'Average:' and part[1].startswith('dev'):
disk[part[-9]] = [int(float(part[-3])), int(float(part[-1]))]
self.clear()
for line in open('/proc/diskstats'):
name = 'dev{}-{}'.format(*line.split()[0:2])
self.append([line.split()[2]] + disk.get(name, [0, 0]))
|
Python
| 0.000001
|
@@ -345,9 +345,8 @@
onf)
-;
%0A%0A
|
65981662c7c0500a8428b5c332465cea32c813da
|
Use admin_authenticate for a non-SRP flow
|
lizard_auth_server/backends.py
|
lizard_auth_server/backends.py
|
"""Custom Django authentication backend
Copyright note: copied almost verbatim from backend.py in
https://github.com/metametricsinc/django-warrant
(BSD licensed)
"""
from boto3.exceptions import Boto3Error
from botocore.exceptions import ClientError
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from django.utils.six import iteritems
from warrant import Cognito
def cognito_to_dict(attr_list, mapping):
user_attrs = dict()
for i in attr_list:
name = mapping.get(i.get("Name"))
if name:
value = i.get("Value")
user_attrs[name] = value
return user_attrs
class CognitoUser(Cognito):
user_class = get_user_model()
# Mapping of Cognito User attribute name to Django User attribute name
COGNITO_ATTR_MAPPING = getattr(
settings,
"COGNITO_ATTR_MAPPING",
{
"email": "email",
"given_name": "first_name",
"family_name": "last_name",
},
)
def get_user_obj(self, username=None, attribute_list=[], metadata={}, attr_map={}):
user_attrs = cognito_to_dict(attribute_list, CognitoUser.COGNITO_ATTR_MAPPING)
django_fields = [f.name for f in CognitoUser.user_class._meta.get_fields()]
extra_attrs = {}
for k, v in user_attrs.items():
if k not in django_fields:
extra_attrs.update({k: user_attrs.pop(k, None)})
# The original code used COGNITO_CREATE_UNKNOWN_USERS, but in our case
# we always need the user (for the local session) so we always create
# it if missing. There's no update of attributes as we don't care
# about that after migration to cognito. We *do* set ``migrated_at``.
user, created = CognitoUser.user_class.objects.get_or_create(username=username)
if created:
logger.info("Created local user %s as they exist on cognito.", user)
for k, v in iteritems(user_attrs):
setattr(user, k, v)
user.migrated_at = django.utils.timezone.now()
user.save()
return user
class CognitoBackend(ModelBackend):
UNAUTHORIZED_ERROR_CODE = "NotAuthorizedException"
USER_NOT_FOUND_ERROR_CODE = "UserNotFoundException"
COGNITO_USER_CLASS = CognitoUser
def authenticate(self, username=None, password=None):
"""
Authenticate a Cognito User
:param username: Cognito username
:param password: Cognito password
:return: returns User instance of AUTH_USER_MODEL or None
"""
cognito_user = CognitoUser(
settings.COGNITO_USER_POOL_ID,
settings.COGNITO_APP_ID,
access_key=getattr(settings, "AWS_ACCESS_KEY_ID", None),
secret_key=getattr(settings, "AWS_SECRET_ACCESS_KEY", None),
username=username,
)
try:
cognito_user.authenticate(password)
except (Boto3Error, ClientError) as e:
return self.handle_error_response(e)
user = cognito_user.get_user()
return user
def handle_error_response(self, error):
error_code = error.response["Error"]["Code"]
if error_code in [
AbstractCognitoBackend.UNAUTHORIZED_ERROR_CODE,
AbstractCognitoBackend.USER_NOT_FOUND_ERROR_CODE,
]:
return None
raise error
|
Python
| 0
|
@@ -2962,16 +2962,22 @@
to_user.
+admin_
authenti
@@ -2991,16 +2991,188 @@
ssword)%0A
+ # %5E%5E%5E This uses ADMIN_NO_SRP_AUTH, but that's the old name for%0A # ADMIN_USER_PASSWORD_AUTH (which we need), so it will probably be%0A # OK.%0A
|
4ceeed0eceff9d75b0bc3047c9a8e2fcb6877e31
|
Fix tasks reading of different course_id
|
lms/djangoapps/ecoapi/tasks.py
|
lms/djangoapps/ecoapi/tasks.py
|
from celery.task import task
from instructor.offline_gradecalc import student_grades , offline_grade_calculation
#TODO: add a better task management to prevent concurrent task execution with some course_id
@task()
def offline_calc(course_id):
offline_grade_calculation(course_id)
|
Python
| 0.999881
|
@@ -1,13 +1,12 @@
-%0A
from celery.
@@ -111,139 +111,451 @@
on %0A
-%0A#TODO: add a better task management to prevent concurrent task execution with some course_id%0A%0A@task()%0Adef offline_calc(course_id):
+from opaque_keys.edx.keys import CourseKey%0Afrom opaque_keys import InvalidKeyError%0Afrom opaque_keys.edx.locations import SlashSeparatedCourseKey%0A#TODO: add a better task management to prevent concurrent task execution with some course_id%0A%0A@task()%0Adef offline_calc(course_id):%0A try:%0A %09course_key = CourseKey.from_string(course_id)%0A except InvalidKeyError:%0A course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)%09
%0A
@@ -580,20 +580,21 @@
culation(course_
-id
+key
)%0A
|
3511f1627ff7ff1bfe5db75d83b661b990eda6d2
|
Update transformation.py
|
transformations/add_hashtags/transformation.py
|
transformations/add_hashtags/transformation.py
|
from interfaces.SentenceOperation import SentenceOperation
from tasks.TaskTypes import TaskType
import random
from spacy import load
def extract_dep_nodes(dep_parse, be_class_verb):
"""method for extracting VERB, NSUBJ phrase and DOBJ phrase dependency nodes"""
verb = ""
nsubj_phrase = []
dobj_phrase = []
for token in dep_parse:
if token.dep_ == "ROOT":
if token.text.lower() not in be_class_verb:
verb = token.text
for token in dep_parse:
if token.dep_ == "dobj" and token.head.dep_ == "ROOT":
dobj_phrase.append(token.text.title())
elif token.dep_ == "dobj" and token.head.head.dep_ == "dobj":
dobj_phrase.append(token.text.title())
for token in dep_parse:
if token.dep_ == "nsubj" and token.head.dep_ == "ROOT":
nsubj_phrase.append(token.text.title())
elif token.dep_ == "nsubj" or token.head.head.dep_ == "nsubj":
nsubj_phrase.append(token.text.title())
return verb.title(), "".join(nsubj_phrase), "".join(dobj_phrase)
def generate_hashtag_from_noun_chunk(chunk_list, subj_obj_list):
"""method for generating hastags from noun chunks"""
hash_tag_list = []
if not chunk_list:
return None
else:
for chunk in chunk_list:
if chunk.lower() not in subj_obj_list:
chunk_words = [word.title() for word in chunk.split(" ")]
hash_tag_list.append("#"+"".join(chunk_words))
return hash_tag_list
def extract_noun_chunks_hashtag(dep_parse, subj_obj_list):
"""Method for extracting noun chunks from dependency parse"""
chunk_list = []
for chunk in dep_parse.noun_chunks:
if len(str(chunk.text.split(" ")))>0:
chunk_list.append(chunk.text)
return generate_hashtag_from_noun_chunk(chunk_list, subj_obj_list)
def extract_hashtags(sentence, nlp, be_class_verb, subj_obj_list):
# method for gathering all hashtags
dep_parse = nlp(sentence)
verb, nsubj, dobj = extract_dep_nodes(dep_parse, be_class_verb)
hash_tag_list = []
for dep_n in [verb, nsubj, dobj]:
if(dep_n != ""):
hash_tag_list.append("#"+dep_n)
if verb != "" and dobj != "":
hash_tag_list.append("#"+verb+dobj)
noun_chunks_hashtags = extract_noun_chunks_hashtag(dep_parse, subj_obj_list)
if noun_chunks_hashtags is not None:
for ht in noun_chunks_hashtags:
if ht not in hash_tag_list:
hash_tag_list.append(ht)
return verb, hash_tag_list
def get_hash_tags(sentence, be_class_verb, subj_obj_list, seed=0, max_outputs=1, nlp=None):
"""method for appending hashtags to sentence"""
verb, hashtag_list = extract_hashtags(sentence, nlp, be_class_verb, subj_obj_list)
transformation_list = []
for _ in range(max_outputs):
random.seed(0)
num = random.randint(1, len(hashtag_list))
selected_hastag = random.sample(hashtag_list, num)
trans_sent = sentence + " ".join(selected_hastag)
transformation_list.append(trans_sent)
return transformation_list
class HashtagGeneration(SentenceOperation):
tasks = [
TaskType.TEXT_CLASSIFICATION,
TaskType.TEXT_TO_TEXT_GENERATION,
TaskType.SENTIMENT_ANALYSIS,
]
languages = ["en"]
heavy = False
keywords = ["noise", "rule-based", "highly-accurate"]
def __init__(self, seed=0, max_outputs=1):
super().__init__(seed)
self.max_outputs=max_outputs
self.nlp = load('en_core_web_sm')
self.be_class_verb = ["is", "am", "are", "was", "were", "will", "shall"]
self.subj_obj_list = ["i", "you", "we", "they", "he", "she"]
def generate(self, sentence: str):
transformed_sentences = get_hash_tags(sentence, self.be_class_verb, self.subj_obj_list,
self.seed, self.max_outputs, self.nlp)
return transformed_sentences
# if __name__ == "__main__":
# import json
# from TestRunner import convert_to_snake_case
#
# tf = HashtagGeneration()
# test_cases = []
# input_sent = ["I love domino's pizza .",
# "Virat Kohli made a big hundred against Australian team .",
# "Many people like T20 cricket these days .",
# "Attention is all you need .",
# "Natural Language Processing research is awesome ."
# ]
#
# for i,sentence in enumerate(input_sent):
# transformed_sentence = tf.generate(sentence)
# test_cases.append({
# "class": tf.name(),
# "inputs": {"sentence": sentence},
# "outputs": [],}
# )
# for trans_sentence in transformed_sentence:
# test_cases[i]["outputs"].append({"sentence":trans_sentence})
# json_file = {"type": convert_to_snake_case("add_hashtags"), "test_cases": test_cases}
# print(json.dumps(json_file))
# for ip in input_sent:
# #random.seed(0)
# print(ip)
# res = tf.generate(ip)
# print(res)
|
Python
| 0.000001
|
@@ -3387,19 +3387,18 @@
high
-ly-accurate
+-precision
%22%5D%0A%0A
|
58e58ffcb58fc077c755c612a07a5634bbccbe4e
|
Annotate chemdner chemical entities
|
src/client.py
|
src/client.py
|
# coding=utf-8
from __future__ import division, unicode_literals
import sys
import requests
def main():
if sys.argv[1] == '0':
text = "Administration of a higher dose of indinavir should be considered when coadministering with megestrol acetate."
elif sys.argv[1] == "1":
text = "Primary Leydig cells obtained from bank vole testes and the established tumor Leydig cell line (MA-10) have been used to explore the effects of 4-tert-octylphenol (OP). Leydig cells were treated with two concentrations of OP (10(-4)M, 10(-8)M) alone or concomitantly with anti-estrogen ICI 182,780 (1M). In OP-treated bank vole Leydig cells, inhomogeneous staining of estrogen receptor (ER) within cell nuclei was found, whereas it was of various intensity among MA-10 Leydig cells. The expression of ER mRNA and protein decreased in both primary and immortalized Leydig cells independently of OP dose. ICI partially reversed these effects at mRNA level while at protein level abrogation was found only in vole cells. Dissimilar action of OP on cAMP and androgen production was also observed. This study provides further evidence that OP shows estrogenic properties acting on Leydig cells. However, its effect is diverse depending on the cellular origin. "
elif sys.argv[1] == "2":
text = "Azole class of compounds are well known for their excellent therapeutic properties. Present paper describes about the synthesis of three series of new 1,2,4-triazole and benzoxazole derivatives containing substituted pyrazole moiety (11a-d, 12a-d and 13a-d). The newly synthesized compounds were characterized by spectral studies and also by C, H, N analyses. All the synthesized compounds were screened for their analgesic activity by the tail flick method. The antimicrobial activity of the new derivatives was also performed by Minimum Inhibitory Concentration (MIC) by the serial dilution method. The results revealed that the compound 11c having 2,5-dichlorothiophene substituent on pyrazole moiety and a triazole ring showed significant analgesic and antimicrobial activity."
elif sys.argv[1] == "3":
text = "Primary Leydig cells obtained from bank vole testes and the established tumor Leydig cell line (MA-10) have been used to explore the effects of 4-tert-octylphenol (OP)."
elif sys.argv[1] == "4":
text = "Loss-of-function mutations in progranulin (GRN) cause ubiquitin- and TAR DNA-binding protein 43 (TDP-43)-positive frontotemporal dementia (FTLD-U), a progressive neurodegenerative disease affecting approximately 10% of early-onset dementia patients. Common variation in the miR-659 binding-site of GRN is a major risk factor for TDP43-positive frontotemporal dementia. In support of these findings, the neuropathology of homozygous rs5848 T-allele carriers frequently resembled the pathological FTLD-U subtype of GRN mutation carriers. "
else:
text = sys.argv[1]
data = {"text": text, "format": "json"}
# r = requests.post('http://10.10.4.63:8080/iice/chemical/entities', json=data)
# Submit new document
r = requests.post('http://10.10.4.63:8080/ibent/DOC{}'.format(sys.argv[1]), json=data)
print r.url, ":"
print r.text
# Fetch document
r = requests.get('http://10.10.4.63:8080/ibent/DOC{}'.format(sys.argv[1]))
print r.url, ":"
print r.text
r = requests.get('http://10.10.4.63:8080/ibent/entities/DOC{}/mirtex_train_mirna_sner'.format(sys.argv[1]))
print r.url, ":"
print r.text
# if len(sys.argv) > 2 and sys.argv[2] == "int":
# data = r.json()
# headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
# rel = requests.post('http://10.10.4.63:8080/iice/chemical/interactions', json=data)
# print
# print rel.json()
if __name__ == "__main__":
main()
|
Python
| 0.00003
|
@@ -3131,18 +3131,23 @@
-#
+print %22
Submit n
@@ -3149,32 +3149,33 @@
mit new document
+%22
%0D%0A r = re
@@ -3314,18 +3314,23 @@
-#
+print %22
Fetch do
@@ -3335,16 +3335,17 @@
document
+%22
%0D%0A
@@ -3464,32 +3464,64 @@
print r.text%0D%0A%0D%0A
+ print %22Annotate miRNA%22%0D%0A
r = requ
@@ -3668,16 +3668,213 @@
r.text%0D
+%0A%0D%0A print %22Annotate chemical%22%0D%0A r = requests.get('http://10.10.4.63:8080/ibent/entities/DOC%7B%7D/chemdner_train_all'.format(sys.argv%5B1%5D))%0D%0A print r.url, %22:%22%0D%0A print r.text%0D
%0A
|
c9ef1c40bb8b0179f19991d27309008c1805d6a6
|
add a skeletal twisted client (-t)
|
src/client.py
|
src/client.py
|
import sys
import time
import select
import socket
# local imports
import event
import message
import mars_math
RECV_SIZE = 4096 # should be way more than enough
class Client(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.event_queue = event.EventQueue()
self.mtime = 0 # martian time, in milliseconds
self.vector = None
def log(self, s):
print s
def connect(self):
'''Creates self.sock and initializes it'''
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
def handle_message(self, msg):
'''Handles a message from the "server"'''
mess = message.parse_message(msg)
self.log('handle_message: %s' % mess)
if mess['type'] == 'initial':
self.vector = None # this is the only time the vector can be None
self.time_limit = mess['time_limit']
self.min_sens = mess['min_sens']
self.max_sens = mess['max_sens']
self.max_speed = mess['max_speed']
self.max_turn = mess['max_turn']
self.max_hard_turn = mess['max_hard_turn']
# this is a special case -- everything else should fall through and
# take an action based on the current state. for the initial
# message we wait until we get telemetry data (which happens
# IMMEDIATELY, i.e. at mtime = 0)
return
elif mess['type'] == 'telemetry':
self.mtime = mess['time_stamp']
pos = mars_math.Point(mess['x_pos'], mess['y_pos'])
ang = mars_math.Angle(mess['direction']['radians'])
self.vector = mars_math.Vector(pos, mess['speed'], ang)
elif mess['type'] == 'something else':
pass
# accelerate!
self.send_message('a;')
def send_message(self, msg):
self.sock.send(msg)
def schedule_event(self, callback, args, delta_t):
future_time = time.time() + delta_t
self.event_queue.insert(event.Event(callback, args, future_time))
def scheduler_wait(self):
self.log('scheduler_wait')
delta_t = self.event_queue.next_time()
got_message, _, _ = select.select([self.sock], [], [], delta_t)
if got_message:
data = self.sock.recv(RECV_SIZE)
if not data:
self.finish() # server has closed its connection
messages = [msg.strip() for msg in data.split(';') if msg.strip()]
for msg in messages:
self.handle_message(msg)
else:
event = self.event_queue.pop()
event.execute()
def run(self):
'''Runs the client'''
self.connect()
# loop in the scheduler
while True:
self.scheduler_wait()
def finish(self):
'''Runs when the server shuts down'''
self.log('Finishing...')
sys.exit(0)
if __name__ == '__main__':
icfp_client = Client(sys.argv[1], int(sys.argv[2]))
icfp_client.run()
|
Python
| 0
|
@@ -158,16 +158,116 @@
enough%0A%0A
+from twisted.internet import reactor%0Afrom twisted.internet.protocol import Protocol, ClientFactory%0A%0A
class Cl
@@ -2715,36 +2715,631 @@
0)%0A%0A
-if __name__ == '__main__':%0A%09
+class TwistedClient(Protocol): %0A def connectionMade(self): %0A print %22connection made%22%0A%0Aclass TwistedClientFactory(ClientFactory):%0A protocol = TwistedClient%0A%0A def clientConnectionFailed(self, connector, reason):%0A print %22connection failed:%22, reason%0A reactor.stop()%0A%0A def clientConnectionLost(self, reason):%0A print %22connection lost%22, reason%0A reactor.stop() %0A%0Aif __name__ == '__main__':%0A if '-t' in sys.argv:%0A sys.argv.remove('-t') %0A twisted = True%0A else:%0A twisted = False%0A%0A host = sys.argv%5B1%5D%0A port = int(sys.argv%5B2%5D)%0A%0A if not twisted:%0A
icfp
@@ -3386,17 +3386,24 @@
gv%5B2%5D))%0A
-%09
+
icfp_cli
@@ -3412,8 +3412,143 @@
t.run()%0A
+ else:%0A clientFactory = TwistedClientFactory()%0A reactor.connectTCP(host, port, clientFactory)%0A reactor.run()%0A%0A%0A
|
6d84f7eb25352c50e40950d0585c33bd1193649e
|
fix bug in init
|
sfa/util/osxrn.py
|
sfa/util/osxrn.py
|
import re
from sfa.util.xrn import Xrn
from sfa.util.config import Config
class OSXrn(Xrn):
def __init__(self, name=None, type=None, *args, **kwds):
config = Config()
if name is not None:
self.type = type
self.hrn = config.SFA_INTERFACE_HRN + "." + name
self.hrn_to_urn()
self.name = self.get_name()
def get_name(self):
self._normalize()
leaf = self.leaf
sliver_id_parts = leaf.split(':')
name = sliver_id_parts[0]
name = re.sub('[^a-zA-Z0-9_]', '', name)
return name
|
Python
| 0
|
@@ -137,22 +137,24 @@
e, *
-args, *
*kwds):
+%0A
%0A
@@ -325,16 +325,71 @@
o_urn()%0A
+ else:%0A Xrn.__init__(self, **kwds)
%0A
|
927adddca399fdfdb358f4b6128595816930ef7c
|
add stageClassifierQueue to stage.py
|
shablona/stage.py
|
shablona/stage.py
|
from datetime import datetime
from . import config
class Stage:
""""""
def __init__(self, target_space):
self.target_space = target_space
self.data_queues = {}
for stream in config.data_streams:
self.data_queues[stream] = []
if stream == 'nims-simulator':
self.data_queues[stream] = {}
def processDataBeforeStage(self, stream, data):
"""Performs whatever preprocessing necessitated for data from a
particular stream, adds data to appropriate target list, then returns
list of indices to post-processing data.
Assumes 'nims-simulator' passes a list inside a list with different tracks.
"""
# TODO: Point target.indices[stream] to return value when determine target
if stream == 'adcp':
data = [datetime.fromtimestamp(data[0]), data[1], data[2]]
self.target_space.input_data[stream].append(data)
return len(self.target_space.input_data[stream]) - 1
elif stream == 'pamguard':
# comm format matches desired, no need to change
self.target_space.input_data[stream].append(data)
return len(self.target_space.input_data[stream]) - 1
elif stream == 'nims':
pass
elif stream == 'nims-simulator':
indices = {}
timestamp = data[0]
for track in data[1]:
new_data = [timestamp, track['id'], track['pings_visible'],
track['first_ping'], track['target_strength'], track['width'],
track['height'], track['size_sq_m'], track['speed_mps'],
track['min_angle_m'], track['min_range_m'], track['max_angle_m'],
track['max_range_m'], track['last_pos_angle'], track['last_pos_range']]
self.target_space.input_data[stream].append(new_data)
indices[track['id']] = len(self.target_space.input_data[stream]) - 1
elif stream in config.data_streams:
raise ValueError("No stage processing functionality exists for" \
" data stream {0}.".format(stream))
else:
raise ValueError("Error processing data for stage. Stream {0} not" \
" defined in config file.".format(stream))
return indices
def addDataToStage(self, stream, data):
"""Calls processing function for data based on stream then adds data to
stream-specific queue.
"""
if stream not in config.data_streams:
raise ValueError("Error adding data to stage. Stream {0} not \
defined in config file.".format(stream))
stageIndices = self.processDataBeforeStage(stream, data)
if stream == 'nims-simulator':
for track_id in stageIndices:
if track_id not in self.data_queues[stream]:
self.data_queues[stream][track_id] = []
self.data_queues[stream][track_id].append(stageIndices[track_id])
else:
self.data_queues[stream].append(stageIndices)
def streamDataToTarget(self, stream, data):
"""Appends or creates a Target instance based on current staged data."""
# TODO: Ask Emma "how to tell that repeating data is attached to same target?"
# For NIMS, we know target_id is the same. PAMGuard? Start and end time?
pass
def triggerClassificationIfEligible(self):
"""Calls classifier.fit() if eligible given specified rules."""
# TODO: Vague documentation, any way to be more specific?
for track_id in self.data_queues['nims-simulator']:
if len(self.data_queues['nims-simulator'][track_id]) >=
config.data_streams_classifier_triggers['nims-simulator_max_pings']:
# create/update Target
if self.target_space.input_data
# remove from stage
self.data_queues['nims-simulator'][track_id] = []
# trigger classification
|
Python
| 0.000001
|
@@ -3168,20 +3168,22 @@
def
-streamDataTo
+createOrUpdate
Targ
@@ -3191,36 +3191,35 @@
t(self, stream,
-data
+key
):%0A %22%22%22Ap
@@ -3462,20 +3462,40 @@
-pass
+self.data_queues%5Bstream%5D
%0A%0A de
@@ -3976,16 +3976,34 @@
put_data
+%5B'nims-simulator'%5D
%0A
@@ -4089,32 +4089,32 @@
%5Btrack_id%5D = %5B%5D%0A
-
@@ -4138,8 +4138,755 @@
ication%0A
+%0A%0Aclass StageClassifierQueue:%0A %22%22%22%22%22%22%0A%0A def __init__(classifier, stage, prioritization='lifo'):%0A self.classifier = classifier%0A self.stage = stage%0A self.queue = %5B%5D%0A%0A def addTargetToQueue(target):%0A %22%22%22Adds a target object to the 'to be classified' queue using the%0A prioritization scheme defined for the class.%0A%0A Last target in list will be considered front of queue (first to be popped).%0A %22%22%22%0A if prioritization == 'lifo':%0A self.queue.append(target)%0A%0A def fitClassifications():%0A %22%22%22%22%22%22%0A while True:%0A target = self.queue.pop()%0A X = target_space.classifier_features%5Btarget.data_indices%5B'classifier'%5D%5D%0A self.classifier.fit(X)%0A
|
4067b94c212d1a41624b729a1aa114e6dca64d05
|
Remove redundant cmp=compare()
|
sipa/flatpages.py
|
sipa/flatpages.py
|
# -*- coding: utf-8 -*-
from flask import abort
from babel.core import UnknownLocaleError, Locale
from flask.ext.flatpages import FlatPages
from .babel import babel, locale_preferences
def compare(x, y):
if x.rank is None:
return -1
if y.rank is None:
return 1
if x.rank < y.rank:
return -1
else:
return 1
class Node:
def __init__(self, parent, id):
self.parent = parent
self.id = id
class Article(Node):
def __init__(self, parent, id):
super(Article, self).__init__(parent, id)
self.localized_pages = {}
self.default_page = None
@property
def rank(self):
try:
return self.localized_page.meta['rank']
except KeyError:
return 100
def __getattr__(self, attr):
try:
if attr is 'html':
return self.localized_page.html
else:
return self.localized_page.meta[attr]
except KeyError:
raise AttributeError()
@property
def localized_page(self):
available_locales = list(self.localized_pages.keys())
for locale in locale_preferences():
# Locale is unfortunately not hashable
# so locale in self.localized_pages does not work
for available_locale in available_locales:
if available_locale == locale:
localized_page = self.localized_pages.get(available_locale)
return localized_page
return self.default_page
class Category(Node):
def __init__(self, parent, id):
super(Category, self).__init__(parent, id)
self.categories = {}
self.articles = {}
def articles_itterator(self):
return iter(sorted(list(self.articles.values()), cmp=compare))
def __getattr__(self, attr):
try:
return getattr(self.articles['index'], attr, False)
except KeyError:
raise AttributeError()
def add_category(self, id):
category = self.categories.get(id)
if category is not None:
return category
category = Category(self, id)
self.categories[id] = category
return category
def add_article(self, page_name, page):
components = page_name.split('.')
if len(components) == 1:
article_id = page_name
locale = babel.default_locale
else:
try:
article_id = '.'.join(components[:-1])
locale = Locale(components[-1])
except UnknownLocaleError:
article_id = page_name
locale = babel.default_locale
article = self.articles.get(article_id)
if article is None:
article = Article(self, article_id)
article.default_page = page
self.articles[article_id] = article
article.localized_pages[locale] = page
if locale == babel.default_locale:
article.default_page = page
class CategorizedFlatPages:
def __init__(self):
self.flat_pages = FlatPages()
self.root_category = Category(None, '<root>')
def init_app(self, app):
self.flat_pages.init_app(app)
self._set_categories()
def __iter__(self):
return iter(sorted(list(self.root_category.categories.values()),
cmp=compare))
def get(self, category_id, article_id):
category = self.root_category.categories.get(category_id)
if category is None:
return None
return category.articles.get(article_id)
def get_articles_of_category(self, category_id):
barticles = []
category = self.root_category.categories.get(
category_id)
if category:
for a in list(category.articles.values()):
if a.id != 'index':
barticles.append(a)
return barticles
def get_or_404(self, category_id, article_id):
page = self.get(category_id, article_id)
if page is None:
abort(404)
return page
def _set_categories(self):
for page in self.flat_pages:
components = page.path.split('/')
parent = self.root_category
for category_id in components[:-1]:
parent = parent.add_category(category_id)
page_name = components[-1]
parent.add_article(page_name, page)
def reload(self):
self.flat_pages.reload()
self._set_categories()
cf_pages = CategorizedFlatPages()
|
Python
| 0.002022
|
@@ -1815,21 +1815,8 @@
s())
-, cmp=compare
))%0A%0A
@@ -3365,41 +3365,8 @@
s())
-,%0A cmp=compare
))%0A%0A
|
3aba768c7a3c11f2941db36d0292cd5810433596
|
fix python2.7.9
|
src/api/util/timeutils.py
|
src/api/util/timeutils.py
|
from datetime import datetime
def total_seconds(td):
# Keep backward compatibility with Python 2.6 which doesn't have
# this method
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10**6
def convert_to_epoch(timestamp):
diff = (timestamp - datetime(1970, 1, 1))
seconds = int(total_seconds(diff))
return seconds
# Original fix for Py2.6: https://github.com/mozilla/mozdownload/issues/73
def total_seconds(dt):
# Keep backward compatibility with Python 2.6 which doesn't have
# this method
if hasattr(datetime, 'total_seconds'):
return dt.total_seconds()
else:
return (dt.microseconds + (dt.seconds + dt.days * 24 * 3600) * 10**6) / 10**6
|
Python
| 0.998339
|
@@ -1,18 +1,4 @@
-from datetime
impo
@@ -9,17 +9,16 @@
atetime%0A
-%0A
def tota
@@ -320,16 +320,191 @@
stamp):%0A
+ if (type(timestamp) is datetime.date):%0A timestamp = datetime.datetime.fromordinal(timestamp.toordinal())%0A timestamp = timestamp.replace(tzinfo=None)%0A
%09diff =
@@ -516,16 +516,25 @@
stamp -
+datetime.
datetime
@@ -780,16 +780,16 @@
method%0A
-
%09if hasa
@@ -800,16 +800,25 @@
datetime
+.datetime
, 'total
|
f98b30583fb9fca4674ad93afd242ffae7ac9f36
|
Fix tests
|
spacy/tests/conftest.py
|
spacy/tests/conftest.py
|
# coding: utf-8
from __future__ import unicode_literals
from ..en import English
from ..de import German
from ..es import Spanish
from ..it import Italian
from ..fr import French
from ..pt import Portuguese
from ..nl import Dutch
from ..sv import Swedish
from ..hu import Hungarian
from ..fi import Finnish
from ..bn import Bengali
from ..tokens import Doc
from ..strings import StringStore
from ..lemmatizer import Lemmatizer
from ..attrs import ORTH, TAG, HEAD, DEP
from io import StringIO, BytesIO
from pathlib import Path
import os
import pytest
LANGUAGES = [English, German, Spanish, Italian, French, Portuguese, Dutch,
Swedish, Hungarian, Finnish, Bengali]
@pytest.fixture(params=LANGUAGES)
def tokenizer(request):
lang = request.param
return lang.Defaults.create_tokenizer()
@pytest.fixture
def en_tokenizer():
return English.Defaults.create_tokenizer()
@pytest.fixture
def en_vocab():
return English.Defaults.create_vocab()
@pytest.fixture
def en_parser():
return English.Defaults.create_parser()
@pytest.fixture
def de_tokenizer():
return German.Defaults.create_tokenizer()
@pytest.fixture(scope='module')
def fr_tokenizer():
return French.Defaults.create_tokenizer()
@pytest.fixture
def hu_tokenizer():
return Hungarian.Defaults.create_tokenizer()
@pytest.fixture
def fi_tokenizer():
return Finnish.Defaults.create_tokenizer()
@pytest.fixture
def sv_tokenizer():
return Swedish.Defaults.create_tokenizer()
@pytest.fixture
def bn_tokenizer():
return Bengali.Defaults.create_tokenizer()
@pytest.fixture
def stringstore():
return StringStore()
@pytest.fixture
def en_entityrecognizer():
return English.Defaults.create_entity()
@pytest.fixture
def lemmatizer(path):
return English.Defaults.ceate_lemmatizer()
@pytest.fixture
def text_file():
return StringIO()
@pytest.fixture
def text_file_b():
return BytesIO()
# only used for tests that require loading the models
# in all other cases, use specific instances
@pytest.fixture(scope="session")
def EN():
return English()
@pytest.fixture(scope="session")
def DE():
return German()
def pytest_addoption(parser):
parser.addoption("--models", action="store_true",
help="include tests that require full models")
parser.addoption("--vectors", action="store_true",
help="include word vectors tests")
parser.addoption("--slow", action="store_true",
help="include slow tests")
def pytest_runtest_setup(item):
for opt in ['models', 'vectors', 'slow']:
if opt in item.keywords and not item.config.getoption("--%s" % opt):
pytest.skip("need --%s option to run" % opt)
|
Python
| 0.000003
|
@@ -1754,20 +1754,16 @@
matizer(
-path
):%0A r
|
d6b69f7d5868597426f7718165d4933af72e154d
|
Fix typo in command-line
|
spreadsplug/pdfbeads.py
|
spreadsplug/pdfbeads.py
|
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Johannes Baiter <johannes.baiter@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, unicode_literals
import logging
import os
import subprocess
import time
from spreads.plugin import HookPlugin, OutputHookMixin
from spreads.util import MissingDependencyException, find_in_path
if not find_in_path('pdfbeads'):
raise MissingDependencyException("Could not find executable `pdfbeads` in"
" $PATH. Please install the appropriate"
" package(s)!")
logger = logging.getLogger('spreadsplug.pdfbeads')
class PDFBeadsPlugin(HookPlugin, OutputHookMixin):
__name__ = 'pdfbeads'
def output(self, path):
logger.info("Assembling PDF.")
path = path.absolute()
img_dir = path / 'data' / 'done'
pdf_file = path / 'data' / ' out' / "{0}.pdf".format(path.name)
img_files = [unicode(x.name) for x in sorted(img_dir.glob('*.tif'))]
cmd = ["pdfbeads", "-d"] + img_files + ["-o", unicode(pdf_file)]
logger.debug("Running " + " ".join(cmd))
# NOTE: pdfbeads only finds *html files for the text layer in the
# working directory...
os.chdir(unicode(img_dir))
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
last_count = 0
while proc.poll() is None:
current_count = sum(1 for x in img_dir.glob('*.jbig2'))
if current_count > last_count:
last_count = current_count
self.on_progressed.send(
self, progress=float(current_count)/len(img_files))
time.sleep(.1)
logger.debug("Output:\n{0}".format(proc.stdout.read()))
|
Python
| 0.000006
|
@@ -1519,17 +1519,16 @@
ata' / '
-
out' / %22
|
7d6e7133fec83e4828053e3e273811dfbcd22572
|
Rename a member variable
|
sqliteschema/_schema.py
|
sqliteschema/_schema.py
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import absolute_import, unicode_literals
import six
from tabledata import TableData
from ._const import MAX_VERBOSITY_LEVEL, SQLITE_SYSTEM_TABLE_LIST, SchemaHeader
from ._logger import logger
class SQLiteTableSchema(object):
@property
def table_name(self):
return self.__table_name
def __init__(self, table_name, schema_data):
self.__table_name = table_name
self.__schema_data = schema_data
if table_name in schema_data:
return
if table_name in SQLITE_SYSTEM_TABLE_LIST:
logger.debug("ignore sqlite system table: {:s}".format(table_name))
return
raise ValueError("'{}' table not included in the schema".format(table_name))
def __eq__(self, other):
return self.as_dict() == other.as_dict()
def __ne__(self, other):
return self.as_dict() != other.as_dict()
def as_dict(self):
return {self.table_name: self.__schema_data[self.table_name]}
def as_tabledata(self, verbosity_level=0):
value_matrix = []
for attribute in self.__schema_data[self.__table_name]:
value_matrix.append([
attribute.get(attr_name)
for attr_name in self.__get_target_schema_attr_name_list(verbosity_level)
])
return TableData(
table_name=self.__table_name,
header_list=self.__get_target_schema_attr_name_list(verbosity_level),
row_list=value_matrix)
def dumps(self, output_format=None, verbosity_level=MAX_VERBOSITY_LEVEL):
if output_format in ["text", "txt"]:
return self.__dumps_text(verbosity_level)
try:
import pytablewriter as ptw
except ImportError as e:
logger.error(e)
return None
if not output_format:
output_format = ptw.TableFormat.RST_GRID_TABLE.name_list[0]
writer = ptw.TableWriterFactory.create_from_format_name(output_format)
writer.stream = six.StringIO()
writer._dp_extractor.const_value_mapping = {True: "X", False: ""}
writer.from_tabledata(self.as_tabledata(verbosity_level=verbosity_level))
writer.write_table()
return writer.stream.getvalue()
def __get_target_schema_attr_name_list(self, verbosity_level):
if verbosity_level <= 0:
return (SchemaHeader.ATTR_NAME, SchemaHeader.DATA_TYPE)
return (SchemaHeader.ATTR_NAME, SchemaHeader.DATA_TYPE, SchemaHeader.PRIMARY_KEY,
SchemaHeader.NOT_NULL, SchemaHeader.UNIQUE, SchemaHeader.INDEX)
def __dumps_text(self, verbosity_level):
if verbosity_level <= 0:
return self.table_name
attr_map_list = self.as_dict()[self.table_name]
if verbosity_level == 1:
attr_desc_list = [attr_map.get(SchemaHeader.ATTR_NAME) for attr_map in attr_map_list]
return "{:s} ({:s})".format(self.table_name, ", ".join(attr_desc_list))
if verbosity_level == 2:
attr_desc_list = [
"{:s} {:s}".format(
attr_map.get(SchemaHeader.ATTR_NAME), attr_map.get(SchemaHeader.DATA_TYPE))
for attr_map in attr_map_list
]
return "{:s} ({:s})".format(self.table_name, ", ".join(attr_desc_list))
if verbosity_level >= 3:
attr_desc_list = []
for attr_map in attr_map_list:
attr_item_list = [
attr_map.get(SchemaHeader.ATTR_NAME),
attr_map.get(SchemaHeader.DATA_TYPE),
]
for key in [SchemaHeader.PRIMARY_KEY, SchemaHeader.NOT_NULL, SchemaHeader.UNIQUE]:
if attr_map.get(key):
attr_item_list.append(key)
attr_desc_list.append(" ".join(attr_item_list))
if verbosity_level == 3:
return "{:s} ({:s})".format(self.table_name, ", ".join(attr_desc_list))
if verbosity_level >= 4:
return "\n".join(
[
"{:s} (".format(self.table_name),
] + [
",\n".join([" {:s}".format(line) for line in attr_desc_list])
] + [
")"
])
return None
|
Python
| 0.000056
|
@@ -515,20 +515,19 @@
_schema_
-data
+map
= schem
@@ -1053,28 +1053,27 @@
lf.__schema_
-data
+map
%5Bself.table_
@@ -1192,20 +1192,19 @@
_schema_
-data
+map
%5Bself.__
|
20cf0f2b6647b7a03fb5a9808f9d854975feb651
|
add shebang line to demo.py
|
demo.py
|
demo.py
|
from prettytask import Task, TaskGroup, Error, prompt
def main():
with Task("A quick task"):
pass
with Task("A task with a custom success message") as task:
task.ok("that went well!")
with Task("A task that fails") as task:
raise Error
with Task("A task that fails with a custom error"):
raise Error("crash and burn...")
try:
with Task("A task that fails with some other exception"):
x = 1 / 0
except ZeroDivisionError:
print(" ... the exception was reraised and caught as expected ...")
with TaskGroup("This marks the start of a set of tasks"):
with Task("Here's one"):
pass
with Task("Another one that fails"):
raise Error
with Task("Finally a third one") as task:
task.ok("all done!")
x = prompt("What is your name?", type=str, stripped=True, default="Foo")
print("Hello, {} ({})".format(x, type(x)))
y = prompt("What is your age?", type=int, default=42, retries=3)
print("Got it: {} years ({})".format(y, type(y)))
z = prompt("What is your favourite color?", choices=["red", "green", "blue"], default="green")
print("Color: {} ({})".format(z, type(z)))
w = prompt("Are we done?", type=bool, default=True)
print("Done? {} ({})".format(w, type(w)))
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -1,8 +1,31 @@
+#!/usr/bin/env python3%0A
from pre
|
6684f08beaaf297eb6a0249ee17a6d90770b93e8
|
Update 1.0 protocol version spec in test_versions.py
|
bokeh/server/protocol/tests/test_versions.py
|
bokeh/server/protocol/tests/test_versions.py
|
###############################################################################
# #
# #
# #
# ******************************* #
# ****** !!! IMPORTANT !!! ****** #
# ******************************* #
# #
# #
# #
# ANY update to this file MUST be accompanied by the "PROTOCOL" tag. #
# #
# #
# #
# #
###############################################################################
from __future__ import absolute_import
import bokeh.server.protocol.versions as versions
def test_available_versions():
assert set(versions.spec.keys()) == {'1.0'}
def test_version_1_0():
assert versions.spec['1.0'] == (
("ACK", 1),
("OK", 1),
("ERROR", 1),
('SERVER-INFO-REPLY', 1),
('SERVER-INFO-REQ', 1),
("WORKING", 1),
)
|
Python
| 0
|
@@ -1660,9 +1660,120 @@
1),%0A
-)
+ ('PULL-DOC-REQ', 1),%0A ('PULL-DOC-REPLY', 1),%0A ('PUSH-DOC', 1),%0A ('PATCH-DOC', 1)%0A )%0A
|
f2ba354f862db5ee054bd09044a06a037c836b59
|
Add for_object argument and implementation to update_schedule
|
localized_recurrence/models.py
|
localized_recurrence/models.py
|
from datetime import datetime, timedelta
from dateutil.relativedelta import relativedelta
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from timezone_field import TimeZoneField
import fleming
import pytz
from .fields import DurationField
INTERVAL_CHOICES = (
('DAY', 'Day'),
('WEEK', 'Week'),
('MONTH', 'Month')
)
class LocalizedRecurrenceQuerySet(models.query.QuerySet):
def update_schedule(self, time=None):
"""Update the schedule times for all the recurrences in the queryset.
"""
time = time or datetime.utcnow()
for recurrence in self:
recurrence.next_scheduled = recurrence.utc_of_next_schedule(time)
recurrence.previous_scheduled = time
recurrence.save()
class LocalizedRecurrenceManager(models.Manager):
def get_queryset(self):
return LocalizedRecurrenceQuerySet(self.model)
def __getattr__(self, name):
"""
Written to allow both:
- LocalizedRecurrence.objects.update_schedule()
- LocalizedRecurrence.get(id=my_recurrence).update_schedule()
"""
return getattr(self.get_queryset(), name)
class LocalizedRecurrence(models.Model):
"""The information necessary to act on events in users local times.
"""
interval = models.CharField(max_length=18, default='DAY', choices=INTERVAL_CHOICES)
offset = DurationField(default=timedelta(0))
timezone = TimeZoneField(default='UTC')
previous_scheduled = models.DateTimeField(default=datetime(1970, 1, 1))
next_scheduled = models.DateTimeField(default=datetime(1970, 1, 1))
objects = LocalizedRecurrenceManager()
def utc_of_next_schedule(self, current_time):
local_time = fleming.convert_to_tz(current_time, self.timezone)
local_scheduled_time = replace_with_offset(local_time, self.offset, self.interval)
utc_scheduled_time = fleming.convert_to_tz(local_scheduled_time, pytz.utc, return_naive=True)
if utc_scheduled_time <= current_time:
additional_time = {
'DAY': timedelta(days=1),
'WEEK': timedelta(weeks=1),
'MONTH': relativedelta(months=1)
}
utc_scheduled_time = fleming.add_timedelta(
utc_scheduled_time, additional_time[self.interval], within_tz=self.timezone)
return utc_scheduled_time
class RecurrenceForObject(models.Model):
"""Updates to a recurrence for different objects.
"""
recurrence = models.ForeignKey('LocalizedRecurrence')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
previous_scheduled = models.DateTimeField(default=datetime(1970, 1, 1))
next_scheduled = models.DateTimeField(default=datetime(1970, 1, 1))
def replace_with_offset(dt, offset, interval):
"""Replace components of a datetime with those of a timedelta.
This replacement is done within the given interval. This means the
the final result, will the be a datetime, at the desired offset
given the interval.
"""
hours, minutes, seconds = offset.seconds // 3600, (offset.seconds // 60) % 60, offset.seconds % 60
interval = interval.lower()
if interval == 'day':
dt_out = dt.replace(hour=hours, minute=minutes, second=seconds)
elif interval == 'week':
dt_out = dt + timedelta(days=offset.days - dt.weekday())
dt_out = dt_out.replace(hour=hours, minute=minutes, second=seconds)
elif interval == 'month':
# TODO:
# - Modify so it works with the last day of the month
# - As per: http://stackoverflow.com/questions/42950/get-last-day-of-the-month-in-python
# - Add test for: e.g. February 30th.
dt_out = dt.replace(day=offset.days + 1, hour=hours, minute=minutes, second=seconds)
else:
raise ValueError('{i} is not a proper interval value'.format(i=interval))
return dt_out
|
Python
| 0
|
@@ -116,31 +116,24 @@
enttypes
-.fields
import
GenericF
@@ -124,33 +124,23 @@
import
-G
+g
eneric
-ForeignKey
%0Afrom dj
@@ -512,16 +512,33 @@
ime=None
+, for_object=None
):%0A
@@ -610,16 +610,685 @@
eryset.%0A
+%0A Args:%0A time - The time the schedule was checked. If None, defaults%0A to utcnow.%0A%0A for_object - Any instance of a django model. Allows a single%0A recurrence to be updated for multiple%0A users/entities/objects/etc.%0A%0A Side Effects:%0A If %60for_object%60 is None, updates the %60next_scheduled%60 and%0A %60previous_scheduled%60 fields for every recurrence in the%0A queryset.%0A%0A If %60for_object%60 is not None, creates or updates the%0A %60next_scheduled%60 and %60previous_scheduled%60 fields on a%0A %60RecurrenceForObject%60 instance associated with each%0A recurrence in the queryset.%0A%0A
@@ -1332,16 +1332,51 @@
tcnow()%0A
+ if for_object is None:%0A
@@ -1399,16 +1399,20 @@
n self:%0A
+
@@ -1481,16 +1481,20 @@
e(time)%0A
+
@@ -1538,32 +1538,36 @@
ime%0A
+
recurrence.save(
@@ -1568,16 +1568,395 @@
.save()%0A
+ else:%0A for recurrence in self:%0A obj, created = RecurrenceForObject.objects.get_or_create(%0A recurrence=recurrence,%0A content_object=for_object%0A )%0A obj.next_scheduled = recurrence.utc_of_next_schedule(time)%0A obj.previous_scheduled = time%0A obj.save()%0A
%0A%0Aclass
@@ -3858,16 +3858,24 @@
bject =
+generic.
GenericF
|
7b87b9c3abcd7833c83e9e1eab073fcc358af2ff
|
remove debug print
|
awslimitchecker/services/directoryservice.py
|
awslimitchecker/services/directoryservice.py
|
"""
awslimitchecker/services/directoryservice.py
The latest version of this package is available at:
<https://github.com/jantman/awslimitchecker>
################################################################################
Copyright 2015-2017 Jason Antman <jason@jasonantman.com>
This file is part of awslimitchecker, also known as awslimitchecker.
awslimitchecker is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
awslimitchecker is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with awslimitchecker. If not, see <http://www.gnu.org/licenses/>.
The Copyright and Authors attributions contained herein may not be removed or
otherwise altered, except to add the Author attribution of a contributor to
this work. (Additional Terms pursuant to Section 7b of the AGPL v3)
################################################################################
While not legally required, I sincerely request that anyone who finds
bugs please submit them at <https://github.com/jantman/pydnstest> or
to me via email, and that you send any contributions or improvements
either as a pull request on GitHub, or to me via email.
################################################################################
AUTHORS:
Jason Antman <jason@jasonantman.com> <http://www.jasonantman.com>
################################################################################
"""
import abc # noqa
import logging
from .base import _AwsService
from ..limit import AwsLimit
logger = logging.getLogger(__name__)
class _DirectoryserviceService(_AwsService):
service_name = 'Directory Service'
api_name = 'ds' # AWS API name to connect to (boto3.client)
def find_usage(self):
"""
Determine the current usage for each limit of this service,
and update corresponding Limit via
:py:meth:`~.AwsLimit._add_current_usage`.
"""
logger.debug("Checking usage for service %s", self.service_name)
self.connect()
for lim in self.limits.values():
lim._reset_usage()
resp = self.conn.get_directory_limits()
directoryLimits = resp['DirectoryLimits']
print(self.limits)
self.limits['CloudOnlyDirectories']._add_current_usage(
directoryLimits['CloudOnlyDirectoriesCurrentCount'],
aws_type='AWS::DirectoryService'
)
self.limits['CloudOnlyMicrosoftAD']._add_current_usage(
directoryLimits['CloudOnlyMicrosoftADCurrentCount'],
aws_type='AWS::DirectoryService'
)
self.limits['ConnectedDirectories']._add_current_usage(
directoryLimits['ConnectedDirectoriesCurrentCount'],
aws_type='AWS::DirectoryService'
)
self._have_usage = True
logger.debug("Done checking usage.")
def get_limits(self):
"""
Return all known limits for this service, as a dict of their names
to :py:class:`~.AwsLimit` objects.
:returns: dict of limit names to :py:class:`~.AwsLimit` objects
:rtype: dict
"""
if self.limits != {}:
return self.limits
limits = {}
limits['CloudOnlyDirectories'] = AwsLimit(
'CloudOnlyDirectories',
self,
200,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DirectoryService',
)
limits['CloudOnlyMicrosoftAD'] = AwsLimit(
'CloudOnlyMicrosoftAD',
self,
200,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DirectoryService',
)
limits['ConnectedDirectories'] = AwsLimit(
'ConnectedDirectories',
self,
200,
self.warning_threshold,
self.critical_threshold,
limit_type='AWS::DirectoryService',
)
self.limits = limits
return limits
def _update_limits_from_api(self):
"""
Call the service's API action to retrieve limit/quota information, and
update AwsLimit objects in ``self.limits`` with this information.
"""
logger.debug('Setting DirectoryService limits from API')
self.connect()
resp = self.conn.get_directory_limits()
directoryLimits = resp['DirectoryLimits']
self.limits['CloudOnlyDirectories']._set_api_limit(
directoryLimits['CloudOnlyDirectoriesLimit']
)
self.limits['CloudOnlyMicrosoftAD']._set_api_limit(
directoryLimits['CloudOnlyMicrosoftADLimit']
)
self.limits['ConnectedDirectories']._set_api_limit(
directoryLimits['ConnectedDirectoriesLimit']
)
def required_iam_permissions(self):
"""
Return a list of IAM Actions required for this Service to function
properly. All Actions will be shown with an Effect of "Allow"
and a Resource of "*".
:returns: list of IAM Action strings
:rtype: list
"""
return [
"ds:GetDirectoryLimits",
]
|
Python
| 0.000008
|
@@ -2613,35 +2613,8 @@
s'%5D%0A
- print(self.limits)%0A
|
a6aec17cff730914c0901db9e9ab9bb4da660306
|
Switch elm-formato to post save
|
elm_format.py
|
elm_format.py
|
from __future__ import print_function
import subprocess
import os, os.path
import re
import sublime, sublime_plugin
class ElmFormatCommand(sublime_plugin.TextCommand):
def run(self, edit):
settings = sublime.load_settings('Elm Language Support.sublime-settings')
path = settings.get('elm_paths', '')
if path:
old_path = os.environ['PATH']
os.environ['PATH'] = os.path.expandvars(path + ';$PATH')
command = ['elm-format', self.view.file_name(), '--yes']
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
if path:
os.environ['PATH'] = old_path
output, errors = p.communicate()
if settings.get('debug', False):
string_settings = sublime.load_settings('Elm User Strings.sublime-settings')
print(string_settings.get('logging.prefix', '') + '(elm-format) ' + str(output.strip()), '\nerrors: ' + str(errors.strip()))
if str(errors.strip()):
print('Your PATH is: ', os.environ['PATH'])
class ElmFormatOnSave(sublime_plugin.EventListener):
def on_pre_save(self, view):
sel = view.sel()[0]
region = view.word(sel)
scope = view.scope_name(region.b)
if scope.find('source.elm') != -1:
settings = sublime.load_settings('Elm Language Support.sublime-settings')
if settings.get('elm_format_on_save', True):
regex = settings.get('elm_format_filename_filter', '')
if not (len(regex) > 0 and re.search(regex, view.file_name()) is not None):
view.run_command('elm_format')
|
Python
| 0
|
@@ -1038,18 +1038,19 @@
def on_p
-re
+ost
_save(se
|
d679f7dbedd3decc7cd4abc782d4c0fae0b872ea
|
Enable the ability to import H264 SubMe, MotionEstimationMethod and Trellis
|
bitmovin/resources/enums/__init__.py
|
bitmovin/resources/enums/__init__.py
|
from .status import Status
from .aac_channel_layout import AACChannelLayout
from .ac3_channel_layout import AC3ChannelLayout
from .aws_cloud_region import AWSCloudRegion
from .badapt import BAdapt
from .cloud_region import CloudRegion
from .crop_filter_unit import CropFilterUnit
from .google_cloud_region import GoogleCloudRegion
from .h264_level import H264Level
from .h264_profile import H264Profile
from .h265_level import H265Level
from .h265_profile import H265Profile
from .vp9_aq_mode import VP9AQMode
from .vp9_arnr_type import VP9ARNRType
from .vp9_quality import VP9Quality
from .max_ctu_size import MaxCTUSize
from .motion_search import MotionSearch
from .mv_prediction_mode import MVPredictionMode
from .tu_inter_depth import TUInterDepth
from .tu_intra_depth import TUIntraDepth
from .encoder_version import EncoderVersion
from .selection_mode import SelectionMode
from .acl_permission import ACLPermission
from .fmp4_representation_type import FMP4RepresentationType
from .webm_representation_type import WebMRepresentationType
from .id3_tag_position_mode import ID3TagPositionMode
from .deinterlace_mode import DeinterlaceMode
from .picture_field_parity import PictureFieldParity
from .audio_mix_filter_channel_layout import AudioMixFilterChannelLayout
from .audio_mix_filter_channel_type import AudioMixFilterChannelType
from .ftp_transfer_version import FTPTransferVersion
from .thumbnail_unit import ThumbnailUnit
from .pixel_format import PixelFormat
from .font import Font
from .chroma_location import ChromaLocation
from .color_primaries import ColorPrimaries
from .color_range import ColorRange
from .color_space import ColorSpace
from .color_transfer import ColorTransfer
from .input_color_range import InputColorRange
from .input_color_space import InputColorSpace
from .set_rai_on_au import SetRaiOnAu
from .h264_b_pyramid import H264BPyramid
from .h264_nal_hrd import H264NalHrd
from .mp2_channel_layout import MP2ChannelLayout
from .h264_partition import H264Partition
from .h264_interlace_mode import H264InterlaceMode
from .s3_sig_version import S3SignatureVersion
from .watermark_unit import WatermarkUnit
from .he_aac_signaling import HeAacSignaling
from .interlace_mode import InterlaceMode
from .vertical_low_pass_filtering_mode import VerticalLowPassFilteringMode
from .scaling_algorithm import ScalingAlgorithm
from .encoding_mode import EncodingMode
from .audio_video_sync_mode import AudioVideoSyncMode
from .stream_mode import StreamMode
from .playready_method import PlayReadyMethod
|
Python
| 0
|
@@ -2516,8 +2516,151 @@
yMethod%0A
+from .h264_trellis import H264Trellis%0Afrom .h264_sub_me import H264SubMe%0Afrom .h264_motion_estimation_method import H264MotionEstimationMethod%0A
|
eec2d410ad39ff12e6b60fcff9f51540719b6063
|
fix crash when scan is not made and force + scan is no
|
modules/processing/irma.py
|
modules/processing/irma.py
|
# Copyright (C) 2014-2016 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import logging
import time
import urlparse
try:
import requests
HAVE_REQUESTS = True
# Disable requests/urllib3 debug & info messages.
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
except ImportError:
HAVE_REQUESTS = False
from lib.cuckoo.common.abstracts import Processing
from lib.cuckoo.common.exceptions import CuckooOperationalError
from lib.cuckoo.common.utils import sha256_file
log = logging.getLogger(__name__)
class Irma(Processing):
"""Gets antivirus signatures from IRMA for various results.
Currently obtains IRMA results for the target sample.
"""
# IRMA statuses https://github.com/quarkslab/irma-cli/blob/master/irma/apiclient.py
IRMA_FINISHED_STATUS = 50
def _request_json(self, url, **kwargs):
"""Wrapper around doing a request and parsing its JSON output."""
try:
r = requests.get(url, timeout=self.timeout, **kwargs)
return r.json() if r.status_code == 200 else {}
except (requests.ConnectionError, ValueError) as e:
raise CuckooOperationalError(
"Unable to fetch IRMA results: %r" % e.message
)
def _post_json(self, url, **kwargs):
"""Wrapper around doing a post and parsing its JSON output."""
try:
r = requests.post(url, timeout=self.timeout, **kwargs)
return r.json() if r.status_code == 200 else {}
except (requests.ConnectionError, ValueError) as e:
raise CuckooOperationalError(
"Unable to fetch IRMA results: %r" % e.message
)
def _scan_file(self, filepath, force):
# Initialize scan in IRMA.
init = self._post_json(urlparse.urljoin(self.url, "/api/v1.1/scans"))
log.debug("Scanning file: %s", filepath)
# Post file for scanning.
files = {
"files": open(filepath, "rb"),
}
url = urlparse.urljoin(
self.url, "/api/v1.1/scans/%s/files" % init.get("id")
)
self._post_json(url, files=files,)
# launch posted file scan
params = {
"force": force,
}
url = urlparse.urljoin(
self.url, "/api/v1.1/scans/%s/launch" % init.get("id")
)
requests.post(url, json=params)
result = None
while result is None or result.get("status") != self.IRMA_FINISHED_STATUS:
log.debug("Polling for results for ID %s", init.get("id"))
url = urlparse.urljoin(
self.url, "/api/v1.1/scans/%s" % init.get("id")
)
result = self._request_json(url)
time.sleep(1)
return
def _get_results(self, sha256):
# Fetch list of scan IDs.
results = self._request_json(
urlparse.urljoin(self.url, "/api/v1.1/files/%s" % sha256)
)
if not results.get("items"):
log.info("File %s hasn't been scanned before", sha256)
return
result_id = results["items"][-1]["result_id"]
return self._request_json(
urlparse.urljoin(self.url, "/api/v1.1/results/%s" % result_id)
)
def run(self):
"""Runs IRMA processing
@return: full IRMA report.
"""
if not HAVE_REQUESTS:
raise CuckooOperationalError(
"The IRMA processing module requires the requests "
"library (install with `pip install requests`)"
)
self.key = "irma"
""" Fall off if we don't deal with files """
if self.results.get("info").get("category") != "file":
log.debug("IRMA supports only file scanning !")
return {}
self.url = self.options.get("url")
self.timeout = int(self.options.get("timeout", 60))
self.scan = int(self.options.get("scan", 0))
self.force = int(self.options.get("force", 0))
sha256 = sha256_file(self.file_path)
results = self._get_results(sha256)
if self.force or (not results and self.scan):
log.info("File scan requested: %s", sha256)
self._scan_file(self.file_path, self.force)
results = self._get_results(sha256) or {}
""" FIXME! could use a proper fix here
that probably needs changes on IRMA side aswell
--
related to https://github.com/elastic/elasticsearch/issues/15377
entropy value is sometimes 0 and sometimes like 0.10191042566270775
other issue is that results type changes between string and object :/
"""
for idx, result in enumerate(results["probe_results"]):
if result["name"] == "PE Static Analyzer":
log.debug("Ignoring PE results at index {0}".format(idx))
results["probe_results"][idx]["results"] = "... scrapped ..."
return results
|
Python
| 0.000039
|
@@ -4244,24 +4244,109 @@
%0A if
+not self.force and not self.scan and not results:%0A return %7B%7D%0A elif
self.force o
|
27f11ce9b11d1f2dbaa2188e9df6d038710e6619
|
Fix broken import from refactored code
|
jreport/jreport.py
|
jreport/jreport.py
|
import datetime
import functools
import pprint
import string
import colors
import dateutil.parser
import requests
from urlobject import URLObject
import yaml
from .util import paginated_get
class JObj(object):
def __init__(self, obj):
self.obj = obj
def __repr__(self):
return u"jreport.{cls}({obj!r})".format(
cls=self.__class__.__name__, obj=self.obj,
)
def __getitem__(self, key):
val = self.obj
for k in key.split("."):
val = val[k]
return val
def __setitem__(self, key, value):
assert "." not in key
self.obj[key] = value
def get(self, *args, **kwargs):
return self.obj.get(*args, **kwargs)
def __contains__(self, item):
return item in self.obj
def format(self, fmt):
return string.Formatter().vformat(fmt, (), JFormatObj(self.obj))
def pprint(self):
pprint.pprint(self.obj)
class JFormatObj(object):
def __init__(self, obj):
self.obj = obj
def __repr__(self):
return u"jreport.{cls}({obj!r})".format(
cls=self.__class__.__name__, obj=self.obj,
)
def __getitem__(self, key):
if key == "":
return ""
if key.startswith("'"):
return key.strip("'")
return Formattable(self.obj[key])
@functools.total_ordering
class Formattable(object):
def __init__(self, v):
self.v = v
def __repr__(self):
return u"jreport.{cls}({v!r})".format(
cls=self.__class__.__name__, v=self.v,
)
def __eq__(self, other):
return self.v == other.v
def __lt__(self, other):
return self.v < other.v
def __str__(self):
return str(self.v)
def __getattr__(self, a):
return Formattable(self.v[a])
def __format__(self, spec):
v = self.v
for spec in spec.split(':'):
try:
v = format(v, spec)
except ValueError:
# Hmm, must be a custom one.
if spec.startswith("%"):
v = format(dateutil.parser.parse(v), spec)
elif spec in colors.COLORS:
v = colors.color(unicode(v), fg=spec)
elif spec in colors.STYLES:
v = colors.color(v, style=spec)
elif spec == "ago":
v = ago(v)
elif spec == "oneline":
v = " ".join(v.split())
elif spec == "pad":
v = " " + v + " "
elif spec == "spacejoin":
v = " ".join(v)
else:
raise Exception("Don't know formatting {!r}".format(spec))
return v
def english_units(num, unit, brief):
if brief:
return "{num}{unit}".format(num=num, unit=unit[0])
else:
s = "" if num == 1 else "s"
return "{num} {unit}{s}".format(num=num, unit=unit, s=s)
def ago(v, detail=2, brief=True):
"""Convert a datetime string into a '4 hours ago' string."""
then = dateutil.parser.parse(v)
then = then.replace(tzinfo=None)
now = datetime.datetime.utcnow()
delta = now-then
chunks = []
if delta.days:
chunks.append(english_units(delta.days, "day", brief))
hours, minutes = divmod(delta.seconds, 60*60)
minutes, seconds = divmod(minutes, 60)
if hours:
chunks.append(english_units(hours, "hour", brief))
if minutes:
chunks.append(english_units(minutes, "minute", brief))
if seconds:
chunks.append(english_units(seconds, "second", brief))
return " ".join(chunks[:detail])
class JReport(object):
def __init__(self, debug=""):
# If there's an auth.yaml, use it!
self.auth = {}
try:
auth_file = open("auth.yaml")
except IOError:
pass
else:
with auth_file:
self.auth = yaml.load(auth_file)
self.debug = debug or ""
if "http" in self.debug:
# Yuck, but this is what requests says to do.
import httplib
httplib.HTTPConnection.debuglevel = 1
def __repr__(self):
return u"jreport.{cls}({debug!r})".format(
cls=self.__class__.__name__, debug=self.debug,
)
def _prep(self, url, auth, params):
url = URLObject(url).set_query_params(params or {})
if not auth:
auth = tuple(self.auth.get(url.hostname, {}).get("auth", ()))
return url, auth
def get_json_array(self, url, auth=None, params=None):
url, auth = self._prep(url, auth, params)
debug = ("json" in self.debug)
return [JObj(item) for item in paginated_get(url, debug=debug, auth=auth)]
def get_json_object(self, url, auth=None, params=None):
url, auth = self._prep(url, auth, params)
result = requests.get(url, auth=auth).json()
if "json" in self.debug:
pprint.pprint(result)
return JObj(result)
|
Python
| 0.000006
|
@@ -161,13 +161,15 @@
rom
-.util
+helpers
imp
|
2e3e7e1bf92e342e0ed14c672b7c5a600f0ba3a2
|
Fix ros__parameters in game settings script
|
bitbots_utils/bitbots_utils/game_settings.py
|
bitbots_utils/bitbots_utils/game_settings.py
|
#!/usr/bin/env python3
import sys
import yaml
import os
# path to the game settings yaml and to the game setting options
SETTING_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
"bitbots_utils", "config", "game_settings.yaml")
OPTIONS_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
"bitbots_utils","config", "game_settings_options.yaml")
def provide_config(path):
"""
reads out the yaml you are asking for with the path parameter
:param path: filepath for your yaml
:return: config as dict
"""
if os.path.exists(path):
try:
with open(path, 'r') as f:
config = yaml.load(f, Loader=yaml.UnsafeLoader)
except yaml.YAMLError as exc:
print("Error in configuration file:", exc)
else:
config = {}
print("The config yaml with path {}, does not exist.".format(path))
return config
def ask_for_config_option(name: object, definition: object, current_value: object = None, explanation: object = None) -> object:
"""
:param name: name of the config-option-value e.g. robot number
:param definition: possible options for the value, type of input
:param current_value: the already set value
:param explanation: describes options
:return: new chosen value for this config option, can be the old one
"""
print('=============== {} ==============='.format(name))
if type(definition) is range:
definition = list(definition)
print("Options: {}".format(definition))
print("Explanations: {}".format(explanation))
if current_value is not None:
input_prompt = 'Value ({}): '.format(current_value)
else:
input_prompt = 'Value: '
value_is_valid = False
while not value_is_valid:
new_value = input(input_prompt).lower()
if new_value == '':
if current_value is not None:
new_value = current_value
value_is_valid = True
else:
value_is_valid = check_new_value(new_value, definition)
def_type = type(definition[0])
return def_type(new_value)
def check_new_value(new_value: str, definition) -> bool:
"""
checks with definition if new value is a valid input
:param new_value: input to set as new value
:param definition: valid options for new value
:return: true if valid, false if not
"""
definitiontype = type(definition[0])
try:
new_value = definitiontype(new_value) # casts value to the type of
except:
print("{} could not be converted to a {}. Are you sure it is in the right format?".format(new_value,definitiontype))
if new_value in definition:
return True
else:
# print(new_value, definition)
print(' {} no valid option'.format(new_value))
return False
def main():
config = provide_config(SETTING_PATH)
#config = config['parameter_blackboard']['ros_parameters']
ros_parameters = config['parameter_blackboard']['ros_parameters']
if ros_parameters is None:
ros_parameters = {}
config['parameter_blackboard']['ros_parameters'] = ros_parameters
options = provide_config(OPTIONS_PATH)
for key in options.keys():
if key in ros_parameters.keys():
ros_parameters[key] = ask_for_config_option(key, options[key]['options'], ros_parameters[key],
options[key]['explanation'])
else:
value = ask_for_config_option(key, options[key]['options'], None,
options[key]['explanation'])
ros_parameters.update({key : value})
with open(SETTING_PATH, 'w') as f:
yaml.dump(config, f, default_flow_style=False)
if __name__ == '__main__':
main()
|
Python
| 0.000086
|
@@ -3136,32 +3136,33 @@
ackboard'%5D%5B'ros_
+_
parameters'%5D%0A
@@ -3253,32 +3253,33 @@
ackboard'%5D%5B'ros_
+_
parameters'%5D = r
|
1d6d97a0f03723d4f2872f1c7912fb3c1b6ff5dd
|
Revert "[ADD] account_financial_report to mozaik requirements"
|
mozaik_all/__manifest__.py
|
mozaik_all/__manifest__.py
|
# Copyright 2018 ACSONE SA/NV
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
{
"name": "Mozaik: All Modules Loader",
"summary": """
Loads all Mozaik modules""",
"version": "14.0.1.1.9",
"license": "AGPL-3",
"author": "ACSONE SA/NV",
"website": "https://github.com/OCA/mozaik",
"category": "Political Association",
"depends": [
# 'disable_tracking_installation',
# 'disable_user_welcome_message',
"inherit_abstract_view",
# 'ir_rule_child_of',
# 'mail_job_priority',
"mozaik_abstract_model",
"mozaik_account",
"mozaik_address",
"mozaik_address_local_street",
"mozaik_ama_attachment",
"mozaik_ama_indexed_on_website",
"mozaik_automatic_supporter",
"mozaik_mass_mailing_automation",
"mass_mailing_distribution_list",
"mozaik_communication",
"mozaik_committee",
"mozaik_duplicate",
"mozaik_dynamical_time_filter",
"mozaik_event_chatter",
"mozaik_event_description",
"mozaik_event_export",
"mozaik_event_involvement_category",
"mozaik_event_question_involvement_category",
"mozaik_event_membership_request_involvement",
"mozaik_event_partner_firstname",
"mozaik_event_publish_date",
"mozaik_event_registration_add_zip",
"mozaik_event_security",
"mozaik_event_thesaurus",
"mozaik_event_tickbox_question",
"mozaik_involvement",
"mozaik_involvement_followup",
"mozaik_mass_mailing_access_rights",
"mozaik_mass_mailing_automation",
"mozaik_mass_mailing_bounce_counter",
"mozaik_mass_mailing_dynamic_placeholder",
"mozaik_mass_mailing_immediate_sending",
"mozaik_mass_mailing_int_instance",
"mozaik_mass_mailing_mail_creation",
"mozaik_mass_mailing_multi_sending",
"mozaik_mass_mailing_sending_cron",
"mozaik_mass_mailing_template",
"mozaik_membership",
"mozaik_membership_request_sensitive_data",
"mozaik_partner_assembly",
"mozaik_partner_button_sms",
"mozaik_partner_disabled",
"mozaik_partner_fields",
"mozaik_partner_global_opt_out",
"mozaik_partner_unemployed",
"mozaik_partner_website",
# 'mozaik_partner_unauthorized',
"mozaik_person",
"mozaik_person_deceased",
# 'mozaik_relation_coordinate',
"mozaik_security",
"mozaik_structure",
# 'mozaik_subscription_price',
"mozaik_thesaurus",
"mozaik_tools",
"mozaik_virtual_assembly_instance",
"mozaik_virtual_partner_mandate",
"mozaik_virtual_partner_involvement",
"mozaik_virtual_partner_instance",
"mozaik_virtual_partner_mass_mailing",
"mozaik_virtual_partner_membership",
"mozaik_virtual_partner_relation",
# 'partner_usual_firstname',
"mozaik_mandate",
"mozaik_mandate_partner_fields",
"mozaik_mandate_female_label",
"mozaik_mandate_category_sequence",
"mozaik_mandate_show_website",
"mozaik_membership_card",
"mozaik_membership_mandate",
"mozaik_membership_price",
"mozaik_membership_request",
"mozaik_membership_request_autovalidate",
"mozaik_membership_request_from_registration",
"mozaik_membership_request_protected_values",
"mozaik_petition",
"mozaik_petition_membership_request_involvement",
"mozaik_petition_involvement_category",
"mozaik_petition_question_involvement_category",
"mozaik_petition_thesaurus",
"mozaik_retrocession_mode",
"mozaik_survey_chatter",
"mozaik_survey_involvement_category",
"mozaik_survey_export_csv",
"mozaik_survey_security",
"mozaik_survey_membership_request_involvement",
"mozaik_survey_publish_date",
"mozaik_survey_question_involvement_category",
"mozaik_survey_scoring",
"mozaik_survey_thesaurus",
"mozaik_website_event_track",
"mozaik_membership_payment",
"mozaik_membership_payment_stripe",
# "mass_mail_queue_job",
# OCA/account-financial-reporting
"account_financial_report",
],
"data": [
# 'views/mail_followers.xml',
"views/res_partner.xml",
],
"installable": True,
"external_dependencies": {"python": ["openupgradelib"]},
}
|
Python
| 0
|
@@ -4251,86 +4251,8 @@
b%22,%0A
- # OCA/account-financial-reporting%0A %22account_financial_report%22,%0A
|
b4a2bf0ee660aab40a885cd8b84c18c8b4a8580b
|
make host, ip and type dynamic
|
mpf/core/bcp/bcp_server.py
|
mpf/core/bcp/bcp_server.py
|
"""Bcp server for clients which connect and disconnect randomly."""
import asyncio
from mpf.core.bcp.bcp_socket_client import BCPClientSocket
class BcpServer():
"""Server socket which listens for incoming BCP clients."""
def __init__(self, machine):
self.machine = machine
self._server = None
@asyncio.coroutine
def start(self):
"""Start the server."""
self._server = yield from self.machine.clock.start_server(
self._accept_client, '127.0.0.1', 5051, loop=self.machine.clock.loop)
@asyncio.coroutine
def stop(self, loop):
"""Stop the BCP server, i.e. closes the listening socket(s)."""
if self.server:
self.server.close()
yield from self.server.wait_closed()
self.server = None
@asyncio.coroutine
def _accept_client(self, client_reader, client_writer):
"""Accept an connection and create client."""
client = BCPClientSocket(self.machine, None, self.machine.bcp.interface)
client.accept_connection(client_reader, client_writer)
self.machine.bcp.transport.register_transport(client)
|
Python
| 0
|
@@ -80,67 +80,67 @@
cio%0A
-%0Afrom mpf.core.bcp.bcp_socket_client import BCPClientSocket
+import logging%0A%0Afrom mpf.core.utility_functions import Util
%0A%0A%0Ac
@@ -253,16 +253,32 @@
machine
+, ip, port, type
):%0A
@@ -320,21 +320,145 @@
elf.
-_server = Non
+log = logging.getLogger('BCPServer')%0A self._server = None%0A self._ip = ip%0A self._port = port%0A self._type = typ
e%0A%0A
@@ -636,25 +636,28 @@
nt,
-'127.0.0.1', 5051
+self._ip, self._port
, lo
@@ -685,39 +685,16 @@
.loop)%0A%0A
- @asyncio.coroutine%0A
def
@@ -702,22 +702,16 @@
top(self
-, loop
):%0A
@@ -789,24 +789,25 @@
if self.
+_
server:%0A
@@ -815,24 +815,25 @@
self.
+_
server.close
@@ -839,71 +839,23 @@
e()%0A
-
+%0A
-yield from self.server.wait_closed()%0A%0A
self.
+_
serv
@@ -1014,32 +1014,96 @@
-client = BCPClientSocket
+self.log.info(%22New client connected.%22)%0A client = Util.string_to_class(self._type)
(sel
@@ -1139,18 +1139,8 @@
.bcp
-.interface
)%0A
|
35f9db005fc95f6d95d1559f81137381fa43e7ad
|
Add new locale.
|
mrburns/settings/server.py
|
mrburns/settings/server.py
|
import os
import socket
from django.utils.translation import ugettext_lazy as _
from .base import * # noqa
SERVER_ENV = os.getenv('DJANGO_SERVER_ENV')
SECRET_KEY = os.getenv('SECRET_KEY')
DEBUG = TEMPLATE_DEBUG = False
ALLOWED_HOSTS = [
# the server's IP (for monitors)
socket.gethostbyname(socket.gethostname()),
]
if SERVER_ENV == 'prod':
ALLOWED_HOSTS.extend([
'webwewant.mozilla.org',
'glow.cdn.mozilla.net',
'glow-origin.cdn.mozilla.net',
])
STATIC_URL = 'https://glow.cdn.mozilla.net/static/'
LANGUAGES = (
('cs', _('Czech')),
('de', _('German')),
('en', _('English')),
('es', _('Spanish')),
('fr', _('French')),
('he', _('Hebrew')),
('hu', _('Hungarian')),
('id', _('Indonesian')),
('it', _('Italian')),
('ja', _('Japanese')),
('ko', _('Korean')),
('lt', _('Lithuanian')),
('nl', _('Dutch')),
('pl', _('Polish')),
('pt-br', _('Brazilian Portuguese')),
('ro', _('Romanian')),
('ru', _('Russian')),
('sk', _('Slovak')),
('sl', _('Slovenian')),
('sq', _('Albanian')),
('sr', _('Serbian')),
('zh-cn', _('Simplified Chinese')),
('zh-tw', _('Traditional Chinese')),
)
elif SERVER_ENV == 'dev':
ALLOWED_HOSTS.append('webwewant.allizom.org')
CACHES = {
# DB 1 is for the site cache
'default': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': 'unix:/var/run/redis/redis.sock:1',
'OPTIONS': {
'PARSER_CLASS': 'redis.connection.HiredisParser',
}
},
# DB 0 is for the glow data
'smithers': {
'BACKEND': 'redis_cache.cache.RedisCache',
'LOCATION': 'unix:/var/run/redis/redis.sock:0',
'OPTIONS': {
'PARSER_CLASS': 'redis.connection.HiredisParser',
}
}
}
DJANGO_REDIS_IGNORE_EXCEPTIONS = False
ENABLE_REDIS = True
# Sentry
INSTALLED_APPS += ('raven.contrib.django.raven_compat',)
RAVEN_CONFIG = {
'dsn': os.getenv('SENTRY_DSN'),
}
|
Python
| 0.000001
|
@@ -1295,24 +1295,50 @@
Chinese')),%0A
+ ('xx', 'Pirate'),%0A
)%0Aelif S
|
a59dcbe8df5b933006dcc554962bdeef674c4383
|
Remove print statements and re-raise errors
|
mstranslator/translator.py
|
mstranslator/translator.py
|
import requests
import urllib
import sys
class Config:
"""Config to be given to an instance of translator to do the Authorization."""
def __init__(self, translator_client_id, translator_client_secret):
assert translator_client_id is not None
assert type(translator_client_id) is str
assert translator_client_secret is not None
assert type(translator_client_secret) is str
self.translator_client_id = translator_client_id
self.translator_client_secret = translator_client_secret
class Translator:
"""An instance of this class can be used to detect language and translate text."""
def __init__(self, config):
self.config = config
def __get_access_token(self):
data = {"client_id": self.config.translator_client_id,
"client_secret": self.config.translator_client_secret,
"scope": 'http://api.microsofttranslator.com',
"grant_type": 'client_credentials'}
resp = requests.post(url='https://datamarket.accesscontrol.windows.net/v2/OAuth2-13', data=urllib.urlencode(data))
return resp.json()["access_token"]
def __authorization_header(self):
access_token = get_access_token()
print "Using token", access_token
return "Bearer" + " " + access_token
def detect_language(self, text):
text = text.encode('utf-8')
print "Detecting language for text", text
authorization_header = self.__authorization_header()
headers = {"Authorization": authorization_header}
data = {"text": text}
resp = requests.get(url='http://api.microsofttranslator.com/v2/Http.svc/Detect', params=data, headers=headers)
try:
t = resp.text.encode('utf-8')
# Different unicodes for different languages are not parsed correctly
# with xml module.
detected_language_code = t.split('>')[1].split('<')[0]
print "Language detected: ", detected_language_code
return (detected_language_code, authorization_header)
except Exception as e:
print "Could not parse XML", resp.text
print e
def translate(self, text, from_language, to_language, authorization_header):
if authorization_header is None:
authorization_header = self.__authorization_header()
text = text.encode('utf-8')
print "Translating text", text
headers = {"Authorization": authorization_header}
data = {"text": text,
"from": from_language,
"to": to_language}
resp = requests.get(url='http://api.microsofttranslator.com/v2/Http.svc/Translate', params=data, headers=headers)
try:
t = resp.text.encode('utf-8')
translatedText = t.split('>')[1].split('<')[0]
print "Got translation: ", translatedText
return translatedText
except Exception as e:
print "Could not parse XML", resp.text
print e
if __name__ == '__main__':
print translate(sys.argv[1], sys.argv[2], sys.argv[3])
|
Python
| 0.000007
|
@@ -1211,50 +1211,8 @@
n()%0A
- print %22Using token%22, access_token%0A
@@ -1252,16 +1252,16 @@
s_token%0A
+
%0A def
@@ -1330,58 +1330,8 @@
8')%0A
- print %22Detecting language for text%22, text%0A
@@ -1833,72 +1833,8 @@
%5B0%5D%0A
- print %22Language detected: %22, detected_language_code%0A
@@ -1942,46 +1942,27 @@
-print %22Could not parse XML%22, resp.text
+sys.stderr.write(e)
%0A
@@ -1970,22 +1970,20 @@
-print
+rais
e%0A%0A d
@@ -2204,47 +2204,8 @@
8')%0A
- print %22Translating text%22, text%0A
@@ -2588,62 +2588,8 @@
%5B0%5D%0A
- print %22Got translation: %22, translatedText%0A
@@ -2661,22 +2661,33 @@
-print
+sys.stderr.write(
%22Could n
@@ -2702,11 +2702,21 @@
XML
-%22,
+ %7B0%7D%22.format(
resp
@@ -2720,16 +2720,18 @@
esp.text
+))
%0A
@@ -2735,22 +2735,20 @@
-print
+rais
e%0A%0Aif __
|
61d8ced0d46bb0e351b8c488814b75b1de2ddab3
|
Update Ejemplos.py
|
Ago-Dic-2018/Ejemplos/Ejemplos.py
|
Ago-Dic-2018/Ejemplos/Ejemplos.py
|
import collections
potenciaPares = 2
potenciaImpares = 3
# print(2 / 3)
#for i in range(0, 10):
#if i % 2:
# Estilo de formateo 1:
# print("Impar: %d" % (i))
# Estilo de formateo 2:
# print("El impar #{} ^ {} es = {}".format(i, potenciaImpares, i ** potenciaImpares))
#else:
# Estilo de formateo 1:
# print("Par: %d" % (i))
# Estilo de formateo 2:
# print("El par #{} ^ {} es = {}".format(i, potenciaPares, i ** potenciaPares))
# Corchetes [] para las listas
miListilla = [1, 'uai', 3, 'lista', 'puede', 'tener', 'de', 'todo', 'por ejemplo', [7, 777, 77]]
#print(miListilla)
#Hágalo con slicing
#print("Hola"[:2])
#print(miListilla[-2][1])
#for i in miListilla:
# if isinstance(i, collections.Iterable):
# for j in i:
# if j == 777:
# print("El número de la suerte :D esssss -> {}!!!".format(j))
miListilla.append(777)
miListilla.insert(2, [])
miListilla.insert(2, [])
miListilla.insert(2, 3)
miListilla.remove([])
miListilla[1] = 'Hey!'
miNuevaListilla = miListilla[:]
miNuevaListilla.reverse()
print(miNuevaListilla)
print(miListilla)
miTupla = (1, 2, 3)
miTupla[1] = []
print(miTupla)
|
Python
| 0
|
@@ -1165,16 +1165,37 @@
, 2, 3)%0A
+# Aqu%C3%AD falla ehhh -%3E
miTupla%5B
@@ -1213,12 +1213,1506 @@
nt(miTupla)%0A
+%0A# Sumatoria de Gauss y N%C3%BAmeros triangulares%0A# (N * (N + 1)) / 2%0A# print(int((3 * (3 + 1)) / 2))%0A%0A# Listas comprimidas%0Aprint(%5B2 ** i for i in range(0, 10)%5D)%0A%0Afrase = %22Hola Buenas noches A!!!%22%0Aprint(%5Bi.upper() for i in frase if i in 'AEIOUaeiou'%5D)%0A%0A# Diccionarios%0Adiccionario = %7B%0A 'llave': 'valor'%0A%7D%0A%0AelDiccionario = %7B%0A 'A': %5B%0A 'aguacate',%0A 'armadura',%0A 'avanzar'%0A %5D,%0A 'E': %5B%0A 'enfermo',%0A 'error',%0A 'elote'%0A %5D,%0A 'I': %5B%0A 'informaci%C3%B3n',%0A 'imagen',%0A 'invisible'%0A %5D,%0A 'O': %5B%0A 'oreja',%0A 'oso',%0A 'olor'%0A %5D,%0A 'U': %5B%0A 'umbral',%0A 'unicornio',%0A 'uva'%0A %5D%0A%7D%0A%0A# Recorre los elementos del diccionario%0Afor llave, valor in elDiccionario.items():%0A print(%22Llave %7B%7D =%3E Valor %7B%7D%22.format(llave, valor))%0A%0A# Recorre las tuplas que devuelve la funci%C3%B3n items()%0Afor llave in elDiccionario.items():%0A print(llave)%0A%0A# Recorre las llaves ordenadas e imprime el elemento%5Bllave%5D%0Afor llave in sorted(elDiccionario.keys()):%0A print(elDiccionario%5Bllave%5D)%0A%0A%0Adef formatea(item):%0A if isinstance(item, list):%0A listaCopia = item%5B:%5D%0A listaCopia.append('agregao')%0A return %22Una copia diferente -%3E %7B%7D%22.format(listaCopia)%0A%0A return %22Ac%C3%A1 lo regresamos formateado -%3E %7B%7D%22.format(item)%0A%0AmiLista = %5B1, 2%5D%0Aprint(formatea(miLista))%0Aprint(miLista)%0AmiLista2 = %5B3, 4%5D%0Aprint(formatea(miLista2))%0Aprint(miLista2)%0A%0Aprint(formatea('String'))%0Aprint(formatea(64361349713976972364691))%0A
|
4b545d2e72080537672bb4ebb990708cad678344
|
Debug Google Cloud Run support
|
entrypoint.py
|
entrypoint.py
|
#!/usr/bin/python3
#
# Define containerized environment for running Diosix on Qemu
#
# On Google Cloud Run: Creates HTTP server on port 8080
# or whatever was specified using the PORT system variable.
# Outputs via the HTTP port. This requires K_SERVICE to be set.
#
# On all other environments: Log to stdout
#
# syntax: entrypoint.py <command>
#
# Author: Chris Williams <diodesign@tuta.io>
#
import os
import sys
global command_result
from flask import Flask
if __name__ == "__main__":
if not os.environ.get('K_SERVICE'):
print('Running locally')
stream = os.popen('. $HOME/.cargo/env && cd /build/diosix && {}'.format(' '.join(sys.argv[1:])))
output = stream.read()
output
else:
print('Running HTTP service {} {} {} for Google Cloud', os.environ.get('K_SERVICE'), os.environ.get('K_REVISION'), os.environ.get('K_CONFIGURATION'))
app = Flask(__name__)
@app.route('/')
def ContainerService():
return 'Container built. Use docker images and docker run in the Google Cloud shell to run this container.\n'
app.run(debug=True,host='0.0.0.0',port=int(os.environ.get('PORT', 8080)))
|
Python
| 0
|
@@ -571,25 +571,17 @@
-stream = os.popen
+os.system
('.
@@ -660,54 +660,8 @@
)))%0A
- output = stream.read()%0A output%0A
|
5cd3bd342b3259b0b10a5e4ff56f4e85e4bb209d
|
Fix patch_middleware_config context manager error handling:
|
Allura/allura/tests/decorators.py
|
Allura/allura/tests/decorators.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import sys
import re
from functools import wraps
import contextlib
from ming.orm.ormsession import ThreadLocalORMSession
from tg import tmpl_context as c
from mock import patch
import tg
from paste.deploy.converters import asbool
from allura import model as M
import allura.config.middleware
def with_user_project(username):
def _with_user_project(func):
@wraps(func)
def wrapped(*args, **kw):
user = M.User.by_username(username)
c.user = user
n = M.Neighborhood.query.get(name='Users')
shortname = 'u/' + username
p = M.Project.query.get(shortname=shortname, neighborhood_id=n._id)
if not p:
n.register_project(shortname, user=user, user_project=True)
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
return func(*args, **kw)
return wrapped
return _with_user_project
@contextlib.contextmanager
def NullContextManager():
yield
def with_tool(project_shortname, ep_name, mount_point=None, mount_label=None,
ordinal=None, post_install_hook=None, username='test-admin',
**override_options):
def _with_tool(func):
@wraps(func)
def wrapped(*args, **kw):
c.user = M.User.by_username(username)
p = M.Project.query.get(shortname=project_shortname)
c.project = p
if mount_point and not p.app_instance(mount_point):
c.app = p.install_app(
ep_name, mount_point, mount_label, ordinal, **override_options)
if post_install_hook:
post_install_hook(c.app)
if asbool(tg.config.get('smtp.mock')):
smtp_mock = patch('allura.lib.mail_util.smtplib.SMTP')
else:
smtp_mock = NullContextManager()
with smtp_mock:
while M.MonQTask.run_ready('setup'):
pass
ThreadLocalORMSession.flush_all()
ThreadLocalORMSession.close_all()
elif mount_point:
c.app = p.app_instance(mount_point)
return func(*args, **kw)
return wrapped
return _with_tool
with_discussion = with_tool('test', 'Discussion', 'discussion')
with_link = with_tool('test', 'Link', 'link')
with_tracker = with_tool('test', 'Tickets', 'bugs')
with_wiki = with_tool('test', 'Wiki', 'wiki')
with_url = with_tool('test', 'ShortUrl', 'url')
class raises(object):
'''
Test helper in the form of a context manager, to assert that something raises an exception.
After completion, the 'exc' attribute can be used to do further inspection of the exception
'''
def __init__(self, ExcType):
self.ExcType = ExcType
self.exc = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_t):
if exc_type:
self.exc = exc_val
if issubclass(exc_type, self.ExcType):
# ok
return True
else:
# root exception will be raised, untouched
return False
else:
raise AssertionError('Did not raise %s' % self.ExcType)
def without_module(*module_names):
def _without_module(func):
@wraps(func)
def wrapped(*a, **kw):
with patch.dict(sys.modules, {m: None for m in module_names}):
return func(*a, **kw)
return wrapped
return _without_module
class patch_middleware_config(object):
'''
Context manager that patches the configuration used during middleware
setup for Allura
'''
def __init__(self, new_configs):
self.new_configs = new_configs
def __enter__(self):
self._make_app = allura.config.middleware.make_app
def make_app(global_conf, full_stack=True, **app_conf):
app_conf.update(self.new_configs)
return self._make_app(global_conf, full_stack, **app_conf)
allura.config.middleware.make_app = make_app
return self
def __exit__(self, exc_type, exc_val, exc_t):
allura.config.middleware.make_app = self._make_app
return self
@contextlib.contextmanager
def audits(*messages, **kwargs):
"""
Asserts all the messages exist in audit log
:param messages: regex strings
:param bool user: if this is a user log
"""
M.AuditLog.query.remove()
yield
if kwargs.get('user'):
actor = kwargs.get('actor', '.*')
ip_addr = kwargs.get('ip_addr', '.*')
user_agent = kwargs.get('user_agent', '.*')
preamble = '(Done by user: {}\n)?IP Address: {}\nUser-Agent: {}\n'.format(actor, ip_addr, user_agent)
else:
preamble = ''
for message in messages:
found = M.AuditLog.query.find(dict(message=re.compile(preamble + message))).count()
if not found:
hints = ''
all = M.AuditLog.query.find().all()
if len(all) < 10:
hints += '\nin these AuditLog messages:\n\t' + '\n\t'.join(a.message for a in all)
if message != re.escape(message):
hints += '\nYou may need to escape the regex chars in the text you are matching'
raise AssertionError('Could not find "%s"%s' % (message, hints))
@contextlib.contextmanager
def out_audits(*messages, **kwargs):
"""
Asserts none the messages exist in audit log. "without audits"
:param messages: list of regex strings
:param bool user: if this is a user log
"""
M.AuditLog.query.remove()
yield
if kwargs.get('user'):
actor = kwargs.get('actor', '.*')
ip_addr = kwargs.get('ip_addr', '.*')
preamble = '(Done by user: {}\n)?IP Address: {}\n'.format(actor, ip_addr)
else:
preamble = ''
for message in messages:
assert not M.AuditLog.query.find(dict(
message=re.compile(preamble + message))).count(), 'Found unexpected: "%s"' % message
# not a decorator but use it with LogCapture() context manager
def assert_logmsg_and_no_warnings_or_errors(logs, msg):
"""
:param testfixtures.logcapture.LogCapture logs: LogCapture() instance
:param str msg: Message to look for
"""
found_msg = False
for r in logs.records:
if msg in r.getMessage():
found_msg = True
if r.levelno > logging.INFO:
raise AssertionError('unexpected log {} {}'.format(r.levelname, r.getMessage()))
assert found_msg, 'Did not find {} in logs: {}'.format(msg, '\n'.join([r.getMessage() for r in logs.records]))
|
Python
| 0
|
@@ -5227,36 +5227,16 @@
ake_app%0A
- return self%0A
%0A%0A@conte
|
63e0596561e3c9ed7c87ab24ffa6b29366ce2a8f
|
fix pep8
|
myuw/dao/notice_mapping.py
|
myuw/dao/notice_mapping.py
|
"""
This module provides utility the following functions:
1. categorize notices based on the dictionary defined
in notice_categorization.py;
2. apply show/hide on notices besed on their category and timing;
3. convert notice object into json format
"""
import logging
from datetime import datetime, timedelta
from myuw.dao.notice_categorization import NOTICE_CATEGORIES
from myuw.dao.term import get_comparison_datetime_with_tz
logger = logging.getLogger(__name__)
UNKNOWN_CATEGORY_NAME = "Uncategorized"
def categorize_notices(notices):
for notice in notices:
map_notice_category(notice)
notices[:] = [n for n in notices if n.custom_category != "not a notice"]
# Removing uncategorized notices for MUWM-2343
notices[:] = [n for n in notices if
n.custom_category != UNKNOWN_CATEGORY_NAME]
return notices
def map_notice_category(notice):
"""
Set the custom_category, is_critical, location_tags for
the given notice based on the NOTICE_CATEGORIES defined
in myuw.dao.notice_categorization
"""
key = notice.notice_category + "_" + notice.notice_type
categorization = NOTICE_CATEGORIES.get(key, None)
if categorization is not None:
notice.custom_category = categorization["myuw_category"]
notice.is_critical = categorization["critical"]
notice.location_tags = categorization["location_tags"]
else:
notice.custom_category = UNKNOWN_CATEGORY_NAME
notice.is_critical = False
notice.location_tags = None
return notice
def equals_myuwid(notice, value):
myuw_id = notice.notice_category + "_" + notice.notice_type
return myuw_id == value
def apply_showhide(request, notices):
"""
Some financial aid notices have additional show/hide logic
depending on the open/close dates of the notice.
This function will apply the show/hide logic on each notice,
update the notice atttibutes accordingly.
"""
now = get_comparison_datetime_with_tz(request)
for notice in notices:
if notice.notice_category != "StudentFinAid":
continue
if equals_myuwid(notice, "StudentFinAid_AidPriorityDate"):
# not critical after the first week and
# before last two weeks
if is_after_eof_days_after_open(now, notice, 15) and\
is_before_bof_days_before_close(now, notice, 15):
notice.is_critical = False
return notices
def get_open_date(notice):
"""
@return the datetime object of the notice begin date value
in utc timezone
"""
for attribute in notice.attributes:
if attribute.data_type == "date" and\
attribute.name.endswith("Begin"):
return attribute._date_value
def get_close_date(notice):
"""
@return the datetime object of the notice end date value
in utc timezone
"""
for attribute in notice.attributes:
if attribute.data_type == "date" and\
attribute.name.endswith("End"):
return attribute._date_value
def is_after_eof_days_after_open(now, notice, n_days):
"""
@return true if it is after "n_days" after the notice open datetime
"""
return now > get_open_date(notice) + timedelta(days=n_days)
def is_before_bof_days_before_close(now, notice, n_days):
"""
@return true if it is before "n_days" prior to the notice close datetime
"""
return now < get_close_date(notice) - timedelta(days=n_days)
def get_est_reg_info(request, notice):
ret = { "is_my_1st_reg_day": False,
"my_reg_has_opened": False
}
now = get_comparison_datetime_with_tz(request)
for attribute in notice.attributes:
if attribute.data_type == "date" and\
attribute.name == "Date":
ret["is_my_1st_reg_day"] =\
(now.date() == attribute._date_value.date())
reg_start = attribute._date_value + timedelta(hours=6)
ret["my_reg_has_opened"] = (now >= reg_start)
return ret
def get_json_for_notices(request, notices):
"""
@return the json data of notices with the specific show/hide logic
applied on the corresponding notices.
"""
notice_json = []
if not notices:
return notice_json
for notice in apply_showhide(request, notices):
if notice.notice_category == "StudentFinAid" and\
notice.notice_type.endswith("Short") and\
notice.long_notice is not None:
data = notice.long_notice.json_data(
include_abbr_week_month_day_format=True)
data['short_content'] = notice.notice_content
data['category'] = notice.custom_category
data['is_critical'] = False
data['id_hash'] = notice.id_hash
data['is_read'] = notice.is_read
data['location_tags'] = notice.location_tags
else:
data = notice.json_data(include_abbr_week_month_day_format=True)
data['category'] = notice.custom_category
data['sws_category'] = notice.notice_category
data['is_critical'] = notice.is_critical
data['id_hash'] = notice.id_hash
data['is_read'] = notice.is_read
data['location_tags'] = notice.location_tags
if "est_reg_date" in notice.location_tags:
est_reg = get_est_reg_info(request, notice)
data["is_my_1st_reg_day"] =\
est_reg["is_my_1st_reg_day"]
data["my_reg_has_opened"] =\
est_reg["my_reg_has_opened"]
notice_json.append(data)
return notice_json
|
Python
| 0.000001
|
@@ -3551,17 +3551,16 @@
ret = %7B
-
%22is_my_1
@@ -3590,17 +3590,16 @@
-
%22my_reg_
@@ -3616,28 +3616,16 @@
%22: False
-%0A
%7D%0A no
|
73143ebf17e7af6503da0136fdd8c3bdf0674f06
|
fix address indexing of qld data
|
eheritage/injest/qld.py
|
eheritage/injest/qld.py
|
from lxml import etree
def parse_ahpi_xml(path):
"""
Parses the AHPI XML export format of the queensland heritage register
and calls a function with each heritage place.
:param path: The location of a heritage_places xml file.
"""
ns = {'hp': 'http://www.heritage.gov.au/ahpi/heritage_places'}
tree = etree.parse(path)
root = tree.getroot()
for hp_element in root.xpath('//hp:heritage_place', namespaces=ns):
hp = {
"date_created": hp_element.get('date_created'),
"date_modified": hp_element.get('date_modified'),
"id": hp_element.get('id'),
"name": hp_element.xpath('hp:name', namespaces=ns)[0].text,
"addresses": {
"address": hp_element.xpath('hp:address', namespaces=ns)[0].text,
"lga_name": hp_element.xpath('hp:lga', namespaces=ns)[0].text,
"suburb": hp_element.xpath('hp:town', namespaces=ns)[0].text,
"state": hp_element.xpath('hp:state', namespaces=ns)[0].text,
"country": hp_element.xpath('hp:country', namespaces=ns)[0].text,
},
"state": hp_element.xpath('hp:state', namespaces=ns)[0].text,
"category": hp_element.xpath('hp:category', namespaces=ns)[0].text,
"significance": hp_element.xpath('hp:sos', namespaces=ns)[0].text,
"description": hp_element.xpath('hp:description', namespaces=ns)[0].text,
"url": hp_element.xpath('hp:url', namespaces=ns)[0].text,
}
try:
# import ipdb; ipdb.set_trace()
lat = hp_element.xpath('hp:latitude', namespaces=ns)[0].text
lon = hp_element.xpath('hp:longitude', namespaces=ns)[0].text
hp['geolocation'] = {
"lat": float(lat), "lon": float(lon)
}
except TypeError:
print "error parsing lat/lon %s/%s" % (lat, lon)
pass
yield hp
if __name__ == "__main__":
def myPrint(arg): print arg
hp_filename = "/mnt/groups/maenad/activities/e-Heritage/QLD/heritage_list.xml"
result = parse_ahpi_xml(hp_filename, myPrint)
print result
|
Python
| 0
|
@@ -716,16 +716,17 @@
esses%22:
+%5B
%7B%0A
@@ -1131,16 +1131,17 @@
%7D
+%5D
,%0A
|
6c3929806a19fbaac0c17887e697bba7ddeaa92d
|
create cache dir if it does not exist
|
micronota/commands/database.py
|
micronota/commands/database.py
|
# ----------------------------------------------------------------------------
# Copyright (c) 2015--, micronota development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from os import makedirs
from os.path import join
from importlib import import_module
import click
from ..cli import cmd, AliasedGroup
from .. import db
@cmd.group(cls=AliasedGroup)
@click.pass_context
def cli(ctx):
'''Database operations.'''
pass
@cli.command('prepare')
@click.argument('databases', nargs=-1)
@click.option('-d', '--cache_dir', required=True,
type=click.Path(file_okay=False),
help=('The directory to cache the downloaded files so that file '
'do not need to be downloaded again if it exists there.'))
@click.option('-f', '--force', is_flag=True,
help='Force overwrite.')
@click.pass_context
def create_db(ctx, databases, cache_dir, force):
'''Prepare database.
Download the files for the specified DATABASES and manipulate
them as proper format for micronota.'''
# this cmd is 2-level nested, so double "parent"
grandparent_ctx = ctx.parent.parent
config = grandparent_ctx.config
func_name = 'prepare_db'
for d in databases:
submodule = import_module('.%s' % d, db.__name__)
f = getattr(submodule, func_name)
out_d = join(config.db_dir, d)
makedirs(out_d, exist_ok=True)
f(out_d, cache_dir, force=force)
|
Python
| 0.000001
|
@@ -1374,16 +1374,54 @@
are_db'%0A
+ makedirs(cache_dir, exist_ok=True)
%0A for
|
b31a13c457b045fa8f1e6a2f99f862fe1675926c
|
Fix broken tests
|
nodes/tests/test_models.py
|
nodes/tests/test_models.py
|
import sure
from mock import MagicMock
from django.test import TestCase
from django.contrib.auth.models import User
from projects.tests.factories import ProjectFactory
from ..exceptions import TaskAlreadyPerformed
from .. import models
from . import factories
from .base import WithKeysMixin
class ProjectKeysCase(WithKeysMixin, TestCase):
"""Project keys case"""
def test_generate_keys_on_save(self):
"""Test generate keys on save"""
keys = factories.ProjectKeysFactory()
keys.private_key.should.be.ok
keys.public_key.should.be.ok
def test_register_key_on_github(self):
"""Test register key on github"""
factories.ProjectKeysFactory()
models.Project.repo.create_key.call_count.should.be.equal(1)
def test_file_paths(self):
"""Test file paths"""
keys = factories.ProjectKeysFactory()
keys.file_paths.should.be.ok
def test_create_when_project_enabled(self):
"""Test create keys when project enabled"""
project = ProjectFactory.create(run_here=True)
models.ProjectKeys.objects.filter(project=project).exists()\
.should.be.true
def test_not_create_duplicated_keys_on_save(self):
"""Test not create duplicated keys on save"""
project = ProjectFactory.create(run_here=True)
project.save()
models.ProjectKeys.objects.filter(project=project).count()\
.should.be.equal(1)
class NodeTaskCase(WithKeysMixin, TestCase):
"""Node task case"""
def setUp(self):
super(NodeTaskCase, self).setUp()
self._mock_connect_to_node()
self._mock_get_covio()
self._mock_user_github()
def _mock_connect_to_node(self):
"""Mock connect to node"""
self._orig_connect_to_node = models.connect_to_node
models.connect_to_node = MagicMock()
self.node = MagicMock(id='node_id')
self.node.execute.return_value = MagicMock(
script='in', stdout='out', stderr='err',
)
models.connect_to_node.return_value.__enter__.return_value = self.node
def _mock_get_covio(self):
"""Mock get_covio"""
self._orig_get_covio = models.Project.get_covio
models.Project.get_covio = MagicMock(return_value={'image': 'raw'})
def _mock_user_github(self):
"""Mock user github"""
self._orig_github_token = User.github_token
User.github_token = 'token'
def _create_task(self, **kwargs):
"""Create NodeTask and keys"""
task = factories.NodeTaskFactory(**kwargs)
factories.ProjectKeysFactory(project=task.project)
return task
def tearDown(self):
super(NodeTaskCase, self).tearDown()
models.connect_to_node = self._orig_connect_to_node
models.Project.get_covio = self._orig_get_covio
User.github_token = property(self._orig_github_token)
def test_perform_task(self):
"""Test perform task"""
task = self._create_task()
task.perform()
task.state.should.be.equal(models.NodeTask.STATE_FINISHED)
def test_set_input_and_outputs(self):
"""Test set inputs and outputs"""
task = self._create_task()
task.perform()
task.input.should.be.equal('in')
task.stdout.should.be.equal('out')
task.stderr.should.be.equal('err')
def test_perform_only_new(self):
"""Test perform only new tasks"""
task = self._create_task(state=models.NodeTask.STATE_ACTIVE)
task.perform.when.called_with().should.throw(TaskAlreadyPerformed)
def test_set_failed_state_when_failed(self):
"""Test set failed state when failed"""
self.node.execute.side_effect = Exception()
task = self._create_task()
task.perform()
task.state.should.be.equal(models.NodeTask.STATE_FAILED)
|
Python
| 0.000555
|
@@ -666,37 +666,23 @@
-factories.
Project
-Keys
Factory(
)%0A
@@ -669,32 +669,45 @@
ProjectFactory(
+run_here=True
)%0A models
@@ -2536,43 +2536,94 @@
-task = factories.NodeTaskFactory(**
+defaults = %7B%0A 'project__run_here': True,%0A %7D%0A defaults.update(
kwar
@@ -2633,16 +2633,23 @@
%0A
+ task =
factori
@@ -2655,47 +2655,34 @@
ies.
-ProjectKeysFactory(project=task.project
+NodeTaskFactory(**defaults
)%0A
|
76f07a6e070959e137c1f9df2717da38d91abe15
|
include font name in drops dir name
|
nototools/grab_download.py
|
nototools/grab_download.py
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base code for copying and unpacking font drops from vendors.
See grab_mt_download.py and grab_adobe_download.py"""
__author__ = "dougfelt@google.com (Doug Felt)"
import argparse
import os
import os.path
import re
import shutil
import zipfile
from fontTools import ttLib
import notoconfig
def grab_files(dst, files, src_vendor, name_date_re, extract_fn):
"""Get date from each filename in files, create a folder for it, under
dst/drops, then extract the files to it."""
# The zip indicates that the corresponding drop is good and built from it. But
# we might have messed up along the way, so:
# - if we have a drop and a zip, assume it's already handled
# - if we have a drop but no zip, assume the drop needs to be rebuilt from the zip
# - if we have a zip and no drop
# - if we have new zip, complain
# - else rebuild the drop from the old zip
# - else build the drop, and if successful, save the zip
for f in files:
if not os.path.exists(f):
print 'file \'%s\' does not exist, skipping' % f
continue
filename = os.path.basename(f)
result = name_date_re.match(filename)
if not result:
print 'could not parse %s, skipping' % f
continue
name = result.group(1)
date = '_'.join([d for d in result.group(2,3,4)])
drop_dir = os.path.join(dst, 'drops', date)
zip_dir = os.path.join(dst, 'zips')
zip_filename = os.path.join(zip_dir, filename)
if os.path.exists(drop_dir):
if os.path.exists(zip_filename):
print 'already have a %s drop and zip for %s' % (src_vendor, filename)
continue
else:
# clean up, assume needs rebuild
shutil.rmtree(drop_dir)
else:
if os.path.exists(zip_filename):
if os.path.realpath(f) != os.path.realpath(zip_filename):
print 'already have a zip file named %s for %s' % (zip_filename, f)
continue
os.mkdir(drop_dir)
extract_fn(drop_dir, f)
if not os.path.exists(zip_filename):
print 'writing %s to %s' % (f, zip_filename)
shutil.copy2(f, zip_filename)
def matching_files_in_dir(src, namere):
"""Iterate over files in src with names matching namere, returning the list."""
filelist = []
for f in os.listdir(src):
path = os.path.join(src, f)
if not os.path.isfile(path):
continue
if not re.search(namere, f):
continue
filelist.append(path)
if not filelist:
print "no files in %s matched '%s'" % (src, namere)
return filelist
def invoke_main(src_vendor, name_date_re, extract_fn, default_params = {}):
"""Grab the files.
src_vendor is a string, currently either Adobe or Monotype.
name_date_re is a regex, it should extract name, year, month, and day fields from the filename
extract_fn is a fn to to extract a file, it takes two args, a dest dir and the zip file name.
default_params are default values for argparse. They can be:
- default_srcdir
- default_dstdir
- default_regex
The default regex and the name_date_re are superficially similar, but different in
purpose. The default_regex is used to select files under the src directory. The
name_date_re is used to extract the date from the file name. Both apply to the
file name, but the default_regex can be anything, while name_date_re needs to select
four groups, where the 2nd, 3rd, and 4th are the year, month, and day (yes this is
brittle, but all of this is).
The dest directory must exist and should have 'zips' and 'drops' subdirs."""
if not src_vendor:
print 'must define src_vendor'
return
if not name_date_re:
print 'must define name_date_re'
return
if not extract_fn:
print 'must define extract_fn'
return
default_srcdir = default_params.get('default_srcdir')
default_dstdir = default_params.get('default_dstdir')
default_regex = default_params.get('default_regex')
parser = argparse.ArgumentParser(description='Copy and extract drop from %s.' %
src_vendor)
parser.add_argument('-dd', '--dstdir', help='destination directory (default %s)' %
default_dstdir, default=default_dstdir, metavar='dst')
parser.add_argument('-sd', '--srcdir', help='source directory (default %s)' %
default_srcdir, default=default_srcdir, metavar='src')
parser.add_argument('--name', help='file name regex to match (default \'%s\')' %
default_regex, default=default_regex, metavar='re')
parser.add_argument('--srcs', help='source files (if defined, use instead of srcdir+name)',
nargs="*", metavar='zip')
args = parser.parse_args()
if not os.path.exists(args.dstdir):
print '%s does not exists or is not a directory' % args.dstdir
return
if not args.srcs:
if not os.path.isdir(args.srcdir):
print '%s does not exist or is not a directory' % args.srcdir
return
filelist = matching_files_in_dir(args.srcdir, args.name)
else:
filelist = args.srcs
grab_files(args.dstdir, filelist, src_vendor, name_date_re, extract_fn)
|
Python
| 0.000001
|
@@ -1949,16 +1949,29 @@
'drops',
+ name + '_' +
date)%0A%0A
|
f4f5f91fac676b05f552f1a3d13e58dab63ec619
|
Refresh an instance's maintenance mode status after en/dis-abling it
|
stackdriver/instance.py
|
stackdriver/instance.py
|
import datetime
class Maintenance(object):
__source = None
__instance = None
def __init__(self, source, instance):
self.__source = source
self.__instance = instance
@property
def is_enabled(self):
return self.__source['maintenance']
@property
def reason(self):
return self.__source.get('reason', None)
@property
def user(self):
return self.__source.get('username', None)
@property
def expires(self):
try:
timestamp = self.__source['schedule']['expires_epoch']
return datetime.datetime.fromtimestamp(timestamp)
except KeyError:
return None
def enable(self, reason, expires=None):
body = {
'username': self.__instance._Instance__client.authorization[0],
'reason': reason,
'maintenance': True
}
if expires:
delta = expires - datetime.datetime(1970, 1, 1)
expires_epoch = int(delta.total_seconds())
body['schedule'] = {
'expires_epoch': expires_epoch
}
endpoint = 'instances/{id}/maintenance/'.format(id=self.__instance.id)
response = self.__instance._Instance__client.request(method='PUT',
endpoint=endpoint,
body=body)
return (response.status_code == 200)
def disable(self, reason, expires=None):
body = {
'username': self.__instance._Instance__client.authorization[0],
'reason': reason,
'maintenance': False
}
if expires:
delta = expires - datetime.datetime(1970, 1, 1)
expires_epoch = int(delta.total_seconds())
body['schedule'] = {
'expires_epoch': expires_epoch
}
endpoint = 'instances/{id}/maintenance/'.format(id=self.__instance.id)
response = self.__instance._Instance__client.request(method='PUT',
endpoint=endpoint,
body=body)
return (response.status_code == 200)
class Instance(object):
__source = None
__client = None
def __init__(self, source, client):
self.__source = source
self.__client = client
@property
def id(self):
return self.__source['id']
@property
def instance_id(self):
return self.__source['instance_id']
@property
def name(self):
return self.__source['name']
@property
def provider(self):
return self.__source['provider']
@property
def provider_region(self):
return self.__source['provider_region']
@property
def provider_zone(self):
return self.__source['provider_zone']
@property
def provider_account(self):
return self.__source['provider_account']
@property
def instance_type(self):
return self.__source['instance_type']
@property
def interfaces(self):
return self.__source['interfaces']
@property
def image(self):
return self.__source['image']
@property
def tags(self):
return {tag['name']: tag['value'] for tag in self.__source['tags']}
@property
def launched(self):
timestamp = self.__source['launch_epoch']
return datetime.datetime.fromtimestamp(timestamp)
@property
def last_monitored(self):
timestamp = self.__source['monitor_epoch']
return datetime.datetime.fromtimestamp(timestamp)
@property
def state(self):
return self.__source['state']
@property
def agent_version(self):
return self.__source['agent_version']
@property
def extractor_version(self):
return self.__source['extractor_version']
@property
def maintenance(self):
try:
return self.__source['maintenance_mode']
except KeyError:
endpoint = 'instances/{id}/maintenance/'.format(id=self.id)
response = self.__client.request(endpoint=endpoint).json()
self.__source['maintenance_mode'] = Maintenance(response['data'],
self)
return self.__source['maintenance_mode']
|
Python
| 0
|
@@ -673,24 +673,257 @@
eturn None%0A%0A
+ def _refresh(self):%0A endpoint = 'instances/%7Bid%7D/maintenance/'.format(id=self.__instance.id)%0A response = self.__instance._Instance__client.request(endpoint=endpoint)%0A%0A self.__source = response.json()%5B'data'%5D%0A%0A
def enab
@@ -946,32 +946,32 @@
expires=None):%0A
-
body = %7B
@@ -1651,32 +1651,57 @@
body=body)%0A%0A
+ self._refresh()%0A%0A
return (
@@ -2400,32 +2400,32 @@
point=endpoint,%0A
-
@@ -2473,32 +2473,57 @@
body=body)%0A%0A
+ self._refresh()%0A%0A
return (
|
bb531100350045ab9c9e0397d17dfb90a953261c
|
version bump
|
dynamic_rest/__init__.py
|
dynamic_rest/__init__.py
|
__version__ = "1.3.0"
|
Python
| 0.000001
|
@@ -16,7 +16,7 @@
1.3.
-0
+1
%22%0A
|
07874ee51375b7597d79288e85acc68294d4b007
|
customize the JSON dump for Event objects
|
oabutton/apps/web/views.py
|
oabutton/apps/web/views.py
|
from django.shortcuts import render_to_response
from django.conf import settings
from django.core.context_processors import csrf
from oabutton.common import SigninForm
def homepage(req):
# Need to lazy import the Event model so that tests work with
# mocks
c = {}
c.update(csrf(req))
from oabutton.apps.bookmarklet.models import Event
evt_count = Event.objects.count()
json_data = Event.objects.all().to_json()
c.update({'count': evt_count,
'events': json_data,
'hostname': settings.HOSTNAME,
'signin_form': SigninForm()})
return render_to_response('web/index.jade', c)
|
Python
| 0
|
@@ -161,16 +161,28 @@
ninForm%0A
+import json%0A
%0A%0Adef ho
@@ -410,19 +410,33 @@
-json_
data =
+ %5B%5D%0A%0A for evt in
Eve
@@ -455,17 +455,357 @@
ll()
-.to_json(
+:%0A data.append(%7B'doi': evt.doi, %0A 'coords': dict(evt.coords),%0A 'accessed': evt.accessed.strftime(%22%25b %25d, %25Y%22),%0A 'user_name': evt.user_name,%0A 'user_profession': evt.user_profession,%0A 'description': evt.description,%0A 'story': evt.story,%0A 'url': evt.url,%0A %7D
)%0A%0A
@@ -869,13 +869,20 @@
json
-_
+.dumps(
data
+)
,%0A
|
3e332088c25ce0515b68d17c6f38bd02756cc4a3
|
add state executive branches too
|
openstates/jurisdiction.py
|
openstates/jurisdiction.py
|
from pupa.scrape import Jurisdiction, Organization
from openstates.base import OpenstatesBaseScraper
from openstates.people import OpenstatesPersonScraper
from openstates.events import OpenstatesEventScraper
from openstates.bills import OpenstatesBillScraper
POSTS = {
'ak': {'lower': range(1, 41), 'upper': (chr(n) for n in range(65, 85))},
'al': {'lower': range(1, 106), 'upper': range(1, 36)},
'nc': {'lower': range(1, 121), 'upper': range(1, 51)},
}
def chamber_name(state, chamber):
if state in ('ne', 'dc', 'pr'):
raise ValueError(state)
if chamber == 'lower':
if state in ('ca', 'ny', 'wi'):
return 'State Assembly'
elif state in ('md', 'va', 'wv'):
return 'House of Delegates'
elif state == 'nv':
return 'Assembly'
elif state == 'nj':
return 'General Assembly'
else:
return 'House of Representatives' # 41 of these
elif chamber == 'upper':
if state in ('ca', 'ga', 'la', 'ms', 'ny', 'or', 'pa', 'wa', 'wi'):
return 'State Senate'
else:
return 'Senate'
def make_jurisdiction(a_state):
osbs = OpenstatesBaseScraper(None, None)
metadata = osbs.api('metadata/{}?'.format(a_state))
# timezone
# chambers.title
leg_sessions = []
for td in metadata['terms']:
for s in td['sessions']:
session = {'identifier': s,
'name': metadata['session_details'][s]['display_name'],
'start_date': metadata['session_details'][s].get('start_date', '')[:10],
'end_date': metadata['session_details'][s].get('end_date', '')[:10],
}
leg_sessions.append(session)
# make scrapers
class PersonScraper(OpenstatesPersonScraper):
state = a_state
class BillScraper(OpenstatesBillScraper):
state = a_state
class EventScraper(OpenstatesEventScraper):
state = a_state
class StateJuris(Jurisdiction):
division_id = 'ocd-division/country:us/state:' + a_state
classification = 'government'
name = metadata['name']
scrapers = {'people': PersonScraper,
'bills': BillScraper,
#'events': EventScraper,
}
parties = [{'name': 'Republican'},
{'name': 'Democratic'},
{'name': 'Independent'},
]
legislative_sessions = leg_sessions
def get_organizations(self):
legislature = Organization(metadata['legislature_name'], classification='legislature')
yield legislature
for otype in ('upper', 'lower'):
if otype in metadata['chambers']:
org = Organization(metadata['name'] + ' ' + chamber_name(a_state, otype),
classification=otype, parent_id=legislature._id)
for post in POSTS[a_state][otype]:
org.add_post(str(post), metadata['chambers'][otype]['title'])
yield org
return StateJuris
|
Python
| 0.000002
|
@@ -2686,16 +2686,158 @@
islature
+%0A executive = Organization(metadata%5B'name'%5D + ' Executive Branch',%0A classification='executive')
%0A%0A
|
629f20c21aa0bdd7d28e3c0884e5c4a1e5f07893
|
Add doseq to WFS request qyery urlencode
|
owslib/feature/__init__.py
|
owslib/feature/__init__.py
|
# =============================================================================
# OWSLib. Copyright (C) 2012 Jachym Cepicky
#
# Contact email: jachym.cepicky@gmail.com
#
# =============================================================================
from __future__ import (absolute_import, division, print_function)
from owslib.crs import Crs
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
import logging
from owslib.util import log
from owslib.feature.schema import get_schema
class WebFeatureService_(object):
"""Base class for WebFeatureService implementations"""
def getBBOXKVP (self,bbox,typename):
"""Formate bounding box for KVP request type (HTTP GET)
@param bbox: (minx,miny,maxx,maxy[,srs])
@type bbox: List
@param typename: feature name
@type typename: String
@returns: String properly formated according to version and
coordinate reference system
"""
srs = None
# srs of the bbox is specified in the bbox as fifth paramter
if len(bbox) == 5:
srs = self.getSRS(bbox[4],typename[0])
# take default srs
else:
srs = self.contents[typename[0]].crsOptions[0]
# 1.1.0 and 2.0.0 have same encoding
if self.version in ["1.1.0","2.0.0"]:
# format bbox parameter
if srs.encoding == "urn" :
if srs.axisorder == "yx":
return "%s,%s,%s,%s,%s" % \
(bbox[1],bbox[0],bbox[3],bbox[2],srs.getcodeurn())
else:
return "%s,%s,%s,%s,%s" % \
(bbox[0],bbox[1],bbox[2],bbox[3],srs.getcodeurn())
else:
return "%s,%s,%s,%s,%s" % \
(bbox[0],bbox[1],bbox[2],bbox[3],srs.getcode())
# 1.0.0
else:
return "%s,%s,%s,%s,%s" % \
(bbox[0],bbox[1],bbox[2],bbox[3],srs.getcode())
def getSRS(self, srsname, typename):
"""Returns None or Crs object for given name
@param typename: feature name
@type typename: String
"""
if not isinstance(srsname, Crs):
srs = Crs(srsname)
else:
srs = srsname
try:
index = self.contents[typename].crsOptions.index(srs)
# Return the Crs string that was pulled directly from the
# GetCaps document (the 'id' attribute in the Crs object).
return self.contents[typename].crsOptions[index]
except ValueError:
options = ", ".join(map(lambda x: x.id, self.contents[typename].crsOptions))
log.warning("Requested srsName '%s' not available for requested typename '%s'. \
Options are: %s. " % (srs.getcode(), typename, options))
return None
def getGETGetFeatureRequest(self, typename=None, filter=None, bbox=None, featureid=None,
featureversion=None, propertyname=None, maxfeatures=None,storedQueryID=None, storedQueryParams=None,
outputFormat=None, method='Get', startindex=None):
"""Formulate proper GetFeature request using KVP encoding
----------
typename : list
List of typenames (string)
filter : string
XML-encoded OGC filter expression.
bbox : tuple
(left, bottom, right, top) in the feature type's coordinates == (minx, miny, maxx, maxy)
featureid : list
List of unique feature ids (string)
featureversion : string
Default is most recent feature version.
propertyname : list
List of feature property names. '*' matches all.
maxfeatures : int
Maximum number of features to be returned.
method : string
Qualified name of the HTTP DCP method to use.
outputFormat: string (optional)
Requested response format of the request.
startindex: int (optional)
Start position to return feature set (paging in combination with maxfeatures)
There are 3 different modes of use
1) typename and bbox (simple spatial query)
2) typename and filter (==query) (more expressive)
3) featureid (direct access to known features)
"""
storedQueryParams = storedQueryParams or {}
base_url = next((m.get('url') for m in self.getOperationByName('GetFeature').methods if m.get('type').lower() == method.lower()))
base_url = base_url if base_url.endswith("?") else base_url+"?"
request = {'service': 'WFS', 'version': self.version, 'request': 'GetFeature'}
# check featureid
if featureid:
request['featureid'] = ','.join(featureid)
elif bbox:
request['bbox'] = self.getBBOXKVP(bbox,typename)
elif filter:
request['query'] = str(filter)
if typename:
typename = [typename] if type(typename) == type("") else typename
if int(self.version.split('.')[0]) >= 2:
request['typenames'] = ','.join(typename)
else:
request['typename'] = ','.join(typename)
if propertyname:
request['propertyname'] = ','.join(propertyname)
if featureversion:
request['featureversion'] = str(featureversion)
if maxfeatures:
if int(self.version.split('.')[0]) >= 2:
request['count'] = str(maxfeatures)
else:
request['maxfeatures'] = str(maxfeatures)
if startindex:
request['startindex'] = str(startindex)
if storedQueryID:
request['storedQuery_id']=str(storedQueryID)
for param in storedQueryParams:
request[param]=storedQueryParams[param]
if outputFormat is not None:
request["outputFormat"] = outputFormat
data = urlencode(request)
return base_url+data
def get_schema(self, typename):
"""
Get layer schema compatible with :class:`fiona` schema object
"""
return get_schema(self.url, typename, self.version)
|
Python
| 0
|
@@ -6058,16 +6058,28 @@
(request
+, doseq=True
)%0A%0A
|
371ddf2c4beb79b82b1154abfa1efdd6bc5e379a
|
Change version to 0.5.dev
|
elasticutils/_version.py
|
elasticutils/_version.py
|
# follow pep-386
# Examples:
# * 0.3 - released version
# * 0.3a1 - alpha version
# * 0.3.dev - version in developmentv
__version__ = '0.4'
__releasedate__ = '20120731'
|
Python
| 0.000001
|
@@ -140,9 +140,13 @@
'0.
-4
+5.dev
'%0A__
@@ -166,14 +166,6 @@
= '
-20120731
'%0A
|
fb71dbaa34f51df1125c9d8d9e6e41cdc0260b29
|
Change default iterations
|
fuzz.py
|
fuzz.py
|
from __future__ import print_function
import argparse,binascii,os,pprint,traceback,sys
from random import randrange
from dnslib import DNSRecord,DNSQuestion,QTYPE,DNSError
def fuzz_delete(b):
""" Delete byte """
f = b[:]
del f[randrange(len(b))]
return f
def fuzz_add(b):
""" Add byte """
f = b[:]
f.insert(randrange(len(b)),randrange(256))
return f
def fuzz_change(b):
""" Change byte """
f = b[:]
f[randrange(len(b))] = randrange(256)
return f
def fname(f):
try:
return f.func_name
except AttributeError:
return f.__name__
if __name__ == '__main__':
a = argparse.ArgumentParser(description="DNS Fuzzer")
a.add_argument("--server","-s",default="8.8.8.8",
help="DNS server address[:port] (default:8.8.8.8:53)")
a.add_argument("--query","-q",default="google.com",
help="DNS query (default:google.com)")
a.add_argument("--type","-t",default="A",
help="DNS query type (default:A)")
a.add_argument("--debug","-d",action='store_true',default=False,
help="Print debug output")
a.add_argument("--number","-n",type=int,default=100,
help="Number of iterations")
a.add_argument("--tcp",action='store_true',default=False,
help="Use TCP (default: UDP)")
args = a.parse_args()
def p(*s):
if args.debug:
print(*s)
uncaught = 0
exceptions = []
address,_,port = args.server.partition(':')
port = int(port or 53)
question = DNSRecord(q=DNSQuestion(args.query,getattr(QTYPE,args.type)))
packet = bytearray(question.send(address,port,tcp=args.tcp))
original = DNSRecord.parse(packet)
p("Original:")
p(original.toZone(prefix=" | "))
for f in (fuzz_delete,fuzz_add,fuzz_change):
for i in range(args.number):
try:
fuzzed_pkt = f(packet)
fuzzed = DNSRecord.parse(fuzzed_pkt)
if original != fuzzed:
diff = original.diff(fuzzed)
p("[%s:parsed ok] >>> %d Diff Errors" % (fname(f),len(diff)))
p(pprint.pformat(diff))
except DNSError as e:
p("[%s:exception] >>> %s" % (fname(f),str(e)))
except Exception as e:
raise
uncaught += 1
exceptions.append((binascii.hexlify(fuzzed_pkt),traceback.format_exc(limit=1)))
p(traceback.format_exc())
p("-----------------------")
print("Uncaught Exceptions: %d" % uncaught)
if exceptions:
pprint.pprint(exceptions)
|
Python
| 0
|
@@ -1196,25 +1196,19 @@
ault=100
+0
,%0A
-
@@ -1237,16 +1237,31 @@
erations
+ (default:1000)
%22)%0A a
|
fda7fa04943575c01f3c5d3d19bb07b9efddbcc8
|
remove MIDI
|
game.py
|
game.py
|
__author__ = 'Florian Tautz'
import pygame
import pygame.midi
class Game:
def __init__(self):
pygame.init()
pygame.midi.init()
midi_port = pygame.midi.get_default_output_id()
self._midi = pygame.midi.Output(midi_port)
self._midi.set_instrument(56)
size = self._width, self._height = 1920, 1200
self._speed = [2, 2]
self._screen = pygame.display.set_mode(size, pygame.FULLSCREEN|
pygame.DOUBLEBUF|
pygame.HWSURFACE)
self._ball = pygame.image.load("ball.png")
self._ball_rect = self._ball.get_rect()
self._last_update = 0
def run(self):
while True:
for event in pygame.event.get():
self.handle_event(event)
self.update(pygame.time.get_ticks())
self.draw()
def update(self, game_time):
time_passed = game_time - self._last_update
if time_passed < 10:
return
self._last_update = game_time
v = 0.2
dx = self._speed[0] * (time_passed / (1/v))
dy = self._speed[1] * (time_passed / (1/v))
self._ball_rect = self._ball_rect.move(dx, dy)
# keep ball in bounds
if self._ball_rect.left < 0 or self._ball_rect.right > self._width:
self._speed[0] *= -1
if self._ball_rect.top < 0 or self._ball_rect.bottom > self._height:
self._speed[1] *= -1
def draw(self):
self._screen.fill((255, 255, 255))
self._screen.blit(self._ball, self._ball_rect)
pygame.display.flip()
def handle_event(self, event):
if event.type == pygame.QUIT:
self.stop()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
self.stop()
if event.key == pygame.K_f:
self._midi.note_on(72, 127) # c
if event.key == pygame.K_g:
self._midi.note_on(74, 127) # d
if event.key == pygame.K_h:
self._midi.note_on(76, 127) # e
if event.key == pygame.K_j:
self._midi.note_on(77, 127) # f
if event.key == pygame.K_k:
self._midi.note_on(79, 127) # g
if event.key == pygame.K_l:
self._midi.note_on(81, 127) # a
if event.type == pygame.KEYUP:
if event.key == pygame.K_f:
self._midi.note_off(72, 127) # c
if event.key == pygame.K_g:
self._midi.note_off(74, 127) # d
if event.key == pygame.K_h:
self._midi.note_off(76, 127) # e
if event.key == pygame.K_j:
self._midi.note_off(77, 127) # f
if event.key == pygame.K_k:
self._midi.note_off(79, 127) # g
if event.key == pygame.K_l:
self._midi.note_off(81, 127) # a
def stop(self):
del self._midi
pygame.midi.quit()
exit()
|
Python
| 0.000085
|
@@ -41,27 +41,8 @@
ame%0A
-import pygame.midi%0A
%0A%0Acl
@@ -100,180 +100,8 @@
it()
-%0A pygame.midi.init()%0A midi_port = pygame.midi.get_default_output_id()%0A self._midi = pygame.midi.Output(midi_port)%0A self._midi.set_instrument(56)
%0A%0A
@@ -1690,1179 +1690,28 @@
p()%0A
- if event.key == pygame.K_f:%0A self._midi.note_on(72, 127) # c%0A if event.key == pygame.K_g:%0A self._midi.note_on(74, 127) # d%0A if event.key == pygame.K_h:%0A self._midi.note_on(76, 127) # e%0A if event.key == pygame.K_j:%0A self._midi.note_on(77, 127) # f%0A if event.key == pygame.K_k:%0A self._midi.note_on(79, 127) # g%0A if event.key == pygame.K_l:%0A self._midi.note_on(81, 127) # a%0A if event.type == pygame.KEYUP:%0A if event.key == pygame.K_f:%0A self._midi.note_off(72, 127) # c%0A if event.key == pygame.K_g:%0A self._midi.note_off(74, 127) # d%0A if event.key == pygame.K_h:%0A self._midi.note_off(76, 127) # e%0A if event.key == pygame.K_j:%0A self._midi.note_off(77, 127) # f%0A if event.key == pygame.K_k:%0A self._midi.note_off(79, 127) # g%0A if event.key == pygame.K_l:%0A self._midi.note_off(81, 127) # a%0A%0A def stop(self):%0A del self._midi%0A pygame.midi.quit()
+%0A def stop(self):
%0A
|
ba6b5c50e5ea1875e117d72675fb58092325b193
|
add moves: left, right and down
|
game.py
|
game.py
|
#using python2
import Tkinter
from visual import Visual
from relief import Relief
from figure import Figure
from random import randint
class Game:
def __init__(self):
self.root= Tkinter.Tk()
self.vis= Visual(self.root)
self.relief= Relief()
self.figure= None
self.root.after_idle(self.tick)
self.root.bind('<KeyPress>', self.press_key)
self.root.mainloop()
def tick(self):
self.root.after(200, self.tick)
if not self.figure:
self.figure= Figure()
if self.relief.have_collision(self.figure.get_all()):
print 'generate collision with relief'
self.root.quit()
self.figure.down_move()
if self.try_stand_figure():
self.figure= None
if self.relief.overload():
print 'You Fail'
self.root.quit()
self.vis.reset()
self.vis.draw(self.relief.get_all(), 'powder blue')
if self.figure:
self.vis.draw(self.figure.get_all(), 'gray')
def press_key(self, event):
print 'pressed key'
def try_stand_figure(self):
if self.relief.have_collision(self.figure.get_all()):
self.figure.rollback()
self.relief.extend(self.figure.get_all())
self.relief.remove_filled_lines()
return True
return False
Game()
|
Python
| 0.999992
|
@@ -264,16 +264,56 @@
= None%0A%0A
+%09%09self.relief.extend(%5B(0,0), (0,3)%5D)%0A%09%09%0A
%09%09self.r
@@ -446,17 +446,17 @@
t.after(
-2
+3
00, self
@@ -789,13 +789,53 @@
)%0A%09%09
-%09%0A%09%09%09
+%0A%09%09self.redraw()%09%0A%09%09%09%0A%09%09%0A%09%0A%09def redraw(self):
%0A%09%09s
@@ -894,19 +894,11 @@
), '
-powder blue
+red
')%0A%09
@@ -961,15 +961,13 @@
'gr
-ay')%0A%09%09
+een')
%0A%09%09%0A
@@ -1001,31 +1001,516 @@
:%0A%09%09
-print 'pressed key'%0A%09%0A%09
+inp= event.char.upper()%0A%09%09%0A%09%09if inp == 'D': %0A%09%09%09self.figure.right_move()%0A%09%09%09if self.relief.have_collision(self.figure.get_all()):%0A%09%09%09%09self.figure.rollback()%0A%09%09%09else:%0A%09%09%09%09self.redraw()%0A%09%09%09%0A%09%09elif inp == 'A': %0A%09%09%09self.figure.left_move()%0A%09%09%09if self.relief.have_collision(self.figure.get_all()):%0A%09%09%09%09self.figure.rollback()%0A%09%09%09else:%0A%09%09%09%09self.redraw()%0A%09%09%09%09%0A%09%09elif inp == 'S': %0A%09%09%09self.figure.down_move()%0A%09%09%09if self.relief.have_collision(self.figure.get_all()):%0A%09%09%09%09self.figure.rollback()%0A%09%09%09else:%0A%09%09%09%09self.redraw()
%0A%09%0A%09
|
4aa248330d8fee41601b606db09bfe6f33547a63
|
Create a function to check which keys are being pressed and notify the server
|
game.py
|
game.py
|
import pygame
from PodSixNet.Connection import ConnectionListener, connection
from time import sleep
from pygame.locals import *
#Create a new class to hold our game object
#This extends the connection listener so that we can pump the server for messages
class OnlineGame(ConnectionListener):
#Constructor
def __init__(self):
#Initialize the game
pygame.init()
size = width, height = 600, 600
self.screen = pygame.display.set_mode(size)
pygame.display.set_caption("Online Game")
#Set the background colour
self.bg = (200, 200, 200)
#Create the players
self.p1 = pygame.image.load("player1.png")
self.p2 = pygame.image.load("player2.png")
self.p1_rect = self.p1.get_rect()
self.p2_rect = self.p2.get_rect()
self.p2_rect.x = width - self.p2_rect.width
#Initialize the gameID and player ID
self.gameID = None
self.player = None
#Create the game clock
self.clock = pygame.time.Clock()
#Fill the screen with our background colour
self.screen.fill(self.bg)
#Connect to the server
self.Connect()
#Set running to false
self.running = False
#While the game isn't running pump the server
while not self.running:
#Check if the user exited the game
self.check_exit()
self.Pump()
connection.Pump()
sleep(0.01)
#Update the caption
pygame.display.set_caption("Game ID: {} - Player: {}".format(self.gameID, self.player))
#Create the function to update the game
def update(self):
#Pump the server to check for updates
connection.Pump()
self.Pump()
#Check if the user exited
self.check_exit()
#Tick the game clock
self.clock.tick(60)
#Fill the background
self.screen.fill(self.bg)
#Draw the players
self.screen.blit(self.p1, self.p1_rect)
self.screen.blit(self.p2, self.p2_rect)
#Update the display
pygame.display.flip()
#Create a function to receive the start game signal
def Network_startgame(self, data):
#Get the game ID and player number from the data
self.gameID = data['gameID']
self.player = data['player']
#Set the game to running so that we enter the update loop
self.running = True
#Create a function that lets us check whether the user has clicked to exit (required to avoid crash)
def check_exit(self):
#Check if the user exited
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
#exit()
#If the file was run and not imported
if __name__ == "__main__":
#Create the game object
og = OnlineGame()
#Every tick update the game
while True:
og.update()
|
Python
| 0.000001
|
@@ -1596,16 +1596,777 @@
player))
+%0A %0A #Create a function to tell the server what keys are being pressed%0A def check_keys(self):%0A%09%09%0A%09%09#Get the keys that are being pressed%0A%09%09keys = pygamne.keys.get_pressed()%0A%09%09%0A%09%09#Check which keys were pressed%0A%09%09if keys%5BK_UP%5D:%0A%09%09%09#Send the server an update%0A%09%09%09self.Send(%7B%22action%22:%22move%22,%22key%22:%22UP%22,%22player%22:self.player,%22gameID%22:self.gameID%7D)%0A%09%09if keys%5BK_DOWN%5D:%0A%09%09%09#Send the server an update%0A%09%09%09self.Send(%7B%22action%22:%22move%22,%22key%22:%22DOWN%22,%22player%22:self.player,%22gameID%22:self.gameID%7D)%0A%09%09if keys%5BK_LEFT%5D:%0A%09%09%09#Send the server an update%0A%09%09%09self.Send(%7B%22action%22:%22move%22,%22key%22:%22LEFT%22,%22player%22:self.player,%22gameID%22:self.gameID%7D)%0A%09%09if keys%5BK_RIGHT%5D:%0A%09%09%09#Send the server an update%0A%09%09%09self.Send(%7B%22action%22:%22move%22,%22key%22:%22RIGHT%22,%22player%22:self.player,%22gameID%22:self.gameID%7D)
%0A%0A #C
|
df3b57394b387021984ee4ea5d2c3835144d5e39
|
Improve readability of class functions.
|
gate.py
|
gate.py
|
#!/usr/bin/env python3
from collections import namedtuple
import re
import xml.etree.ElementTree as ET
import skll
from collections import Counter
from pprint import pprint
class InputError(Exception):
pass
class Annotation:
def __init__(self, filename):
self.filename = filename
self.tree = ET.parse(self.filename)
self.root = self.tree.getroot()
def get_annotation_set_names(self):
annotation_set_names = []
for annotation_set in self.root.findall(".//AnnotationSet"):
annotation_set_names.append(annotation_set.get("Name"))
return annotation_set_names
def get_annotations(
self,
*,
annotation_type=None,
annotation_set=None
):
if annotation_set:
return self.root.findall(
''.join(
[
".//AnnotationSet[@Name='{}']".format(annotation_set),
"/Annotation[@Type='{}']".format(annotation_type)
]
)
)
elif annotation_type:
return self.root.findall(
".//Annotation[@Type='{}']".format(annotation_type)
)
else:
return self.root.findall(
".//Annotation"
)
class Schema:
def __init__(self, filename):
self.filename = filename
self.tree = ET.parse(self.filename)
self.root = self.tree.getroot()
self.namespace = {
'schema':'http://www.w3.org/2000/10/XMLSchema'
}
def get_attributes(self, annotation_type):
attributes = self.root.findall(
".//schema:element[@name='{}']"
"//schema:attribute".format(annotation_type),
namespaces=self.namespace
)
return attributes
def pair_annotations(
annotations1,
annotations2,
*,
annotation_type=None,
schema=None
):
annotations1_list = list(annotations1)
annotations2_list = list(annotations2)
# Build list of annotation pairs
annotation_pairs = []
for annotation1 in annotations1_list:
for annotation2 in annotations2_list:
# if annotation spans overlap
if ( ( int(annotation1.get('StartNode')) >= int(annotation2.get('StartNode'))
and int(annotation1.get('StartNode')) < int(annotation2.get('EndNode')) )
or ( int(annotation1.get('EndNode')) > int(annotation2.get('StartNode'))
and int(annotation1.get('EndNode')) <= int(annotation2.get('EndNode')) ) ):
annotation_pairs.append((annotation1, annotation2))
annotations2_list.remove(annotation2)
break
annotations1_list.clear() and annotations2_list.clear()
# Unpack Names and Values of each annotation
content_pairs = []
for pair in annotation_pairs:
new_pair = []
for annotation in pair:
annotation = { feature.findtext('./Name') : feature.findtext('./Value') for feature in list(annotation) }
new_pair.append(annotation)
content_pairs.append(new_pair)
content_pairs = tuple(content_pairs)
# Compile comparison sets for each annotation attribute
ComparisonSet = namedtuple('ComparisonSet', ['attribute', 'annotator1', 'annotator2'])
attributes = [ attribute.get('name') for attribute in schema.get_attributes(annotation_type) ]
comparison_sets = []
for attribute in attributes:
annotator1 = tuple( annotation_pair[0].get(attribute) for annotation_pair in content_pairs )
annotator2 = tuple( annotation_pair[1].get(attribute) for annotation_pair in content_pairs )
attribute_annotations = ComparisonSet(attribute, annotator1, annotator2)
comparison_sets.append(attribute_annotations)
# set of annotations that fit the given attribute (attribute_annotations)
return comparison_sets
def kappa(comparison_set, weights=None):
if len(comparison_set.annotator1) == len(comparison_set.annotator2):
new_comparison_set = comparison_set
if weights == None:
# skll.kappa accepts only int-like arguments,
# so, given a set of string annotations, each will
# be assigned a unique int id.
# this is only statistically accurate when calculating an unweighted kappa
# since only then do the distances between annotations not matter.
# store a set of annotations...
annotation_dict = {}
for annotations in [
comparison_set.annotator1,
comparison_set.annotator2
]:
for annotation in annotations:
annotation_dict.update({annotation : None})
# then assign ints as ids
id = 1
for k in annotation_dict:
annotation_dict.update({k : str(id)})
id += 1
def annotation_int(annotations):
for annotation in annotations:
if annotation in annotation_dict:
yield re.sub(
annotation,
annotation_dict.get(annotation),
annotation
)
# replace the annotation strings with int labels
new_comparison_set = new_comparison_set._replace(
annotator1=tuple(
annotation_int(comparison_set.annotator1)
),
annotator2=tuple(
annotation_int(comparison_set.annotator2)
)
)
annotator1 = new_comparison_set.annotator1
annotator2 = new_comparison_set.annotator2
else:
def annotation_int(annotations):
for annotation in annotations:
if annotation:
yield re.sub(
r'(\d+).*',
r'\1',
annotation
)
next()
else:
yield annotation
next()
new_comparison_set = new_comparison_set._replace(
annotator1=tuple(
annotation_int(comparison_set.annotator1)
),
annotator2=tuple(
annotation_int(comparison_set.annotator2)
)
)
annotator1 = new_comparison_set.annotator1
annotator2 = new_comparison_set.annotator2
kappa_score = skll.kappa(
annotator1,
annotator2,
weights=weights
)
kappa_length = len(new_comparison_set.annotator1)
return dict(
{
'score' : kappa_score,
'length' : kappa_length
}
)
|
Python
| 0.000001
|
@@ -646,24 +646,29 @@
annotations(
+self,
%0A sel
@@ -664,30 +664,27 @@
-self,%0A
+
*,%0A
@@ -671,27 +671,48 @@
+
*,%0A
+
anno
@@ -725,24 +725,40 @@
_type=None,%0A
+
anno
@@ -772,21 +772,16 @@
set=None
-%0A
):%0A
@@ -1892,21 +1892,16 @@
tations(
-%0A
annotati
@@ -1906,16 +1906,33 @@
tions1,%0A
+
anno
@@ -1941,27 +1941,61 @@
tions2,%0A
-*,%0A
+ *,%0A
annotati
@@ -2004,24 +2004,41 @@
_type=None,%0A
+
schema=N
@@ -2040,17 +2040,16 @@
ema=None
-%0A
):%0A%0A
|
95eeefa9b8cf7decd51265eaf624ff4551ac6a15
|
add feature to create a new app from command line, remove commands that are not implemented
|
glim.py
|
glim.py
|
from termcolor import colored
from glim.app import start as appify
# glim with use of click
import click
@click.group()
def glim():
pass
@click.command()
@click.option('--host', default = '127.0.0.1', help = 'enter ip')
@click.option('--port', default = '8080', help = 'enter port')
@click.option('--env', default = 'development', help = 'enter environment (development)')
def start(host, port, env):
print colored('glim %s server is running on %s:%s' % (env, host, port), 'green')
appify(host, port, env)
@click.command()
@click.argument('name')
def new(name):
print colored('Created new app %s' % name, 'blue')
@click.command()
@click.argument('name')
def model(name):
print colored('Creating new model %s' % name, 'blue')
@click.command()
@click.argument('name')
def controller(name):
print colored('Creating new controller %s' % name, 'blue')
@click.command()
def routes():
print colored('Dumping all routes ..', 'blue')
glim.add_command(start)
glim.add_command(new)
glim.add_command(model)
glim.add_command(controller)
glim.add_command(routes)
if __name__ == '__main__':
glim()
|
Python
| 0
|
@@ -97,16 +97,47 @@
rt click
+%0Aimport shutil, errno%0Aimport os
%0A%0A@click
@@ -566,40 +566,16 @@
d()%0A
-@click.argument('name')%0A
def new(
name
@@ -574,129 +574,198 @@
new(
-name
):%0A
-print colored('Created new app %25s' %25 name, 'blue')%0A%0A@click.command()%0A@click.argument('name')%0Adef model(name):%0A
+# resolve prototype path and its childs%0A proto_path = 'glim/proto/project'%0A cpath = os.path.dirname(os.path.realpath(__file__))%0A try:%0A copytree(proto_path, cpath)%0A
@@ -788,110 +788,54 @@
reat
-ing
+ed
new
-model %25s' %25 name, 'blue')%0A%0A@click.command()%0A@click.argument('name')%0Adef controller(name)
+glim app', 'blue')%0A except
:%0A
+
+
prin
@@ -849,205 +849,321 @@
ed('
-Creating new controller %25s' %25 name, 'blue')%0A%0A@click.command()%0Adef routes():%0A print colored('Dumping all routes ..', 'blue')%0A%0Aglim.add_command(start)%0Aglim.add_command(new)%0Aglim.add_command(model)
+App already exists', 'red')%0A%0Adef copytree(src, dst, symlinks=False, ignore=None):%0A for item in os.listdir(src):%0A s = os.path.join(src, item)%0A d = os.path.join(dst, item)%0A if os.path.isdir(s):%0A shutil.copytree(s, d, symlinks, ignore)%0A else:%0A shutil.copy2(s, d)%0A
%0Agli
@@ -1180,18 +1180,13 @@
and(
-controller
+start
)%0Agl
@@ -1204,14 +1204,11 @@
and(
-routes
+new
)%0A%0Ai
@@ -1239,12 +1239,13 @@
:%0A glim()
+%0A
|
c1f7a7a54c8dd6afd36bb9758cb6053aaf65674a
|
Fix missing `super` call
|
home.py
|
home.py
|
# coding: utf-8
import os
import sys
import zipfile
import gdata.alt.appengine
import gdata.service
from google.appengine.api import users
from google.appengine.ext import blobstore
from google.appengine.ext import webapp
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp.util import run_wsgi_app
import models
from importers import *
sys.setrecursionlimit(10000) # SDK fix
class Page(webapp.RequestHandler):
services = {
'Picasa': {
'scope': 'http://picasaweb.google.com/data/',
'purpose': 'Photos',
'settings': PicasaSettingsProvider,
'importer': PicasaImporter
},
'Calendar': {
'scope': 'http://www.google.com/calendar/feeds/',
'purpose': 'Events',
#'client': gdata.calendar.service.CalendarService
},
'Docs': {
'scope': 'http://docs.google.com/feeds/',
'purpose': 'Notes',
#'client': gdata.docs.client.DocsClient
}
}
def __init__(self):
self.user = users.get_current_user()
if self.user:
self.url = users.create_logout_url('/')
self.is_logged = True
else:
self.url = users.create_login_url('/services')
self.is_logged = False
self.values = {
'user': self.user,
'url': self.url,
'is_logged': self.is_logged
}
def get_user_info(self):
if self.is_logged:
return models.User.gql('WHERE name=:1', self.user).get()
else:
return None
def get_user_services(self):
user_info = self.get_user_info()
if user_info:
return user_info.services
def get_user_archive(self):
user_info = self.get_user_info()
if not user_info:
return None
blob_key = user_info.blob_key
archive_blob = blobstore.BlobReader(blob_key)
return zipfile.ZipFile(archive_blob, 'r') if archive_blob else None #returned file should be manually "closed", might be dangerous
def render(self, file, values = {}):
values.update(self.values)
path = posixpath.join(posixpath.dirname(__file__), 'templates/%s.html' % file)
self.response.out.write(template.render(path, values))
def write(self, string):
self.response.out.write(string)
class MainPage(Page):
def get(self):
if self.is_logged:
#TODO: Create a case block here for proper redirects: upload if blobstore is empty, services if no services activated, process if everything is OK
#TODO: redirection to upload page should be a separate routine due to special "create_upload_url" system
upload_url = blobstore.create_upload_url('/upload')
self.render('upload', {'upload_url': upload_url})
else:
self.render('index')
class ServicesPage(Page):
def get(self):
values = {'services': []}
for service in self.services:
values['services'].append({
'name': service,
'purpose': self.services[service]['purpose']
})
self.render('services', values)
def post(self):
selected_services = self.request.POST.getall('services')
user_info = self.get_user_info()
if not user_info:
user_info = models.User()
gdata_client = gdata.service.GDataService()
gdata.alt.appengine.run_on_appengine(gdata_client)
hostname = os.environ['SERVER_NAME']
port = os.environ['SERVER_PORT']
if port and port != '80':
hostname = hostname + ':' + port
save_token_url = 'http://' + hostname + '/token'
scopes = []
user_info.services = []
for service in selected_services:
user_info.services.append(service)
scopes.append(self.services[service]['scope'])
if scopes:
user_info.put()
self.redirect(gdata_client.GenerateAuthSubURL(save_token_url, scopes, secure = False, session = True).to_string())
else:
self.redirect('/services')
class TokenPage(Page):
def get(self):
gdata_client = gdata.service.GDataService()
gdata.alt.appengine.run_on_appengine(gdata_client)
auth_token = gdata.auth.extract_auth_sub_token_from_url(self.request.uri)
if not auth_token:
self.redirect('/services')
session_token = gdata_client.upgrade_to_session_token(auth_token)
if not session_token:
self.redirect('/services')
gdata_client.token_store.add_token(session_token)
self.redirect('/upload')
class UploadPage(blobstore_handlers.BlobstoreUploadHandler, Page):
def get(self):
if self.is_logged and self.get_user_services():
upload_url = blobstore.create_upload_url('/upload')
self.render('upload', {'upload_url': upload_url})
else:
self.redirect('/')
def post(self):
data = self.get_uploads('fbContents')
if self.is_logged and data:
blob_info = data[0]
user_info = self.get_user_info()
if user_info.blob_key:
blobstore.delete(user_info.blob_key)
user_info.blob_key = str(blob_info.key())
user_info.put()
self.redirect('/saved')
else:
self.redirect('/')
class SavedPage(Page): #Unnecesary?
def get(self):
self.render('saved')
class ProcessPage(Page):
def get(self):
archive = self.get_user_archive()
if archive:
template_values = {}
archive_files = archive.namelist()
for service in self.get_user_services():
settings_provider = self.services[service]['settings'](self.user, archive)
template_values[service] = settings_provider.template_vars
archive.close()
self.render('process', template_values)
def post(self): #TODO: Separate this part for "task" section
logging.info('Import request received for user %s', self.user)
archive = self.get_user_archive()
if archive:
archive_files = archive.namelist()
for service in self.get_user_services():
self.response.out.write('Importing to service %s...' % (service))
importer = self.services[service]['importer'](self.user, archive, self.request.POST)
import_result = importer.do_import()
if import_result == 0:
self.response.out.write('%s import completed successfully.' % (service))
else:
self.response.out.write('%s import completed with errors. (code %d)' % (service, import_result))
archive.close()
else:
self.redirect('/upload')
application = webapp.WSGIApplication(
[
('/', MainPage),
('/upload', UploadPage),
('/process', ProcessPage),
('/services', ServicesPage),
('/token', TokenPage),
('/saved', SavedPage),
],
debug = True
)
def main():
logging.getLogger().setLevel(logging.DEBUG)
run_wsgi_app(application)
if __name__ == "__main__":
main()
|
Python
| 0.998949
|
@@ -1107,32 +1107,70 @@
__init__(self):%0A
+ super(Page, self).__init__()%0A%0A
self.use
@@ -1194,25 +1194,24 @@
rent_user()%0A
-%0A
if s
@@ -1414,17 +1414,16 @@
= False%0A
-%0A
|
1d925809bd4b0329e137974382272c14d8021d53
|
make doctype continuous migration include attachments
|
corehq/doctypemigrations/continuous_migrate.py
|
corehq/doctypemigrations/continuous_migrate.py
|
import datetime
from corehq.util.couch import IterDB
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.chunked import chunked
import logging
def filter_doc_ids_by_doc_type(db, doc_ids, doc_types):
for doc_ids_chunk in chunked(doc_ids, 100):
keys = [[doc_type, doc_id]
for doc_id in doc_ids_chunk
for doc_type in doc_types]
results = db.view('all_docs/by_doc_type', keys=keys, reduce=False)
for result in results:
yield result['id']
def copy_docs(source_db, target_db, doc_ids):
"""
copy docs from source_db to target_db
by doc_id
"""
if not doc_ids:
return
with IterDB(target_db, new_edits=False) as iter_db:
for doc in iter_docs(source_db, doc_ids, attachments=True):
iter_db.save(doc)
if iter_db.errors_by_type:
logging.error('errors bulk saving in copy_docs: {!r}'
.format(iter_db.errors_by_type))
def _bulk_get_revs(target_db, doc_ids):
"""
return (_id, _rev) for every existing doc in doc_ids
if a doc id is not found in target_db, it is excluded from the result
"""
result = target_db.all_docs(keys=list(doc_ids)).all()
return [(row['id'], row['value']['rev']) for row in result if not row.get('error')]
def delete_docs(target_db, doc_ids):
"""
delete docs from database by doc _id and _rev
"""
if not doc_ids:
return
doc_id_rev_pairs = _bulk_get_revs(target_db, doc_ids)
with IterDB(target_db, new_edits=False) as iter_db:
for doc_id, doc_rev in doc_id_rev_pairs:
iter_db.delete({'_id': doc_id, '_rev': doc_rev})
if iter_db.errors_by_type:
logging.error('errors bulk saving in delete_docs: {!r}'
.format(iter_db.errors_by_type))
class ContinuousReplicator(object):
def __init__(self, source_db, target_db, doc_types,
max_changes_before_commit=100,
max_time_before_commit=datetime.timedelta(seconds=5)):
self.source_db = source_db
self.target_db = target_db
self.doc_types = doc_types
self.max_changes_before_commit = max_changes_before_commit
self.max_time_before_commit = max_time_before_commit
self._ids_to_save = None
self._ids_to_delete = None
self._reset()
def _reset(self):
self._last_commit_time = datetime.datetime.utcnow()
self._uncommitted_changes_count = 0
self._ids_to_save = set()
self._ids_to_delete = set()
def replicate_change(self, change):
if change.deleted:
self._ids_to_delete.add(change.id)
else:
self._ids_to_save.add(change.id)
self._uncommitted_changes_count += 1
def commit(self):
ids_to_save = filter_doc_ids_by_doc_type(
self.source_db, self._ids_to_save, self.doc_types)
copy_docs(self.source_db, self.target_db, ids_to_save)
delete_docs(self.target_db, self._ids_to_delete)
self._reset()
def _get_time_since_last_commit(self):
return datetime.datetime.utcnow() - self._last_commit_time
def should_commit(self):
return (self._uncommitted_changes_count > self.max_changes_before_commit or
self._get_time_since_last_commit() > self.max_time_before_commit)
|
Python
| 0
|
@@ -9,16 +9,86 @@
atetime%0A
+from corehq.doctypemigrations.bulk_migrate import _insert_attachments%0A
from cor
@@ -863,24 +863,78 @@
ents=True):%0A
+ doc = _insert_attachments(source_db, doc)%0A
|
fc7d83eda95aa20f0782644cd4076a51e60cc46d
|
Remove unused properties from models.isolate.Isolate.
|
dashboard/dashboard/pinpoint/models/isolate.py
|
dashboard/dashboard/pinpoint/models/isolate.py
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Model for storing information to look up isolates.
An isolate is a way to describe the dependencies of a specific build.
More about isolates:
https://github.com/luci/luci-py/blob/master/appengine/isolate/doc/client/Design.md
"""
import hashlib
from google.appengine.ext import ndb
def Get(builder_name, change, target):
"""Retrieve an isolate hash from the Datastore.
Args:
builder_name: The name of the builder that produced the isolate.
change: The Change the isolate was built at.
target: The compile target the isolate is for.
Returns:
The isolate hash as a string.
"""
key = ndb.Key(Isolate, _Key(builder_name, change, target))
entity = key.get()
if not entity:
raise KeyError('No isolate with builder %s, change %s, and target %s.' %
(builder_name, change, target))
return entity.isolate_hash
def Put(isolate_infos):
"""Add isolate hashes to the Datastore.
This function takes multiple entries to do a batched Datstore put.
Args:
isolate_infos: An iterable of tuples. Each tuple is of the form
(builder_name, change, target, isolate_hash).
"""
entities = []
for isolate_info in isolate_infos:
builder_name, change, target, isolate_hash = isolate_info
entity = Isolate(
builder_name=builder_name,
change=change,
target=target,
isolate_hash=isolate_hash,
id=_Key(builder_name, change, target))
entities.append(entity)
ndb.put_multi(entities)
class Isolate(ndb.Model):
builder_name = ndb.StringProperty(required=True)
change = ndb.PickleProperty(required=True)
target = ndb.StringProperty(required=True)
isolate_hash = ndb.StringProperty(required=True)
def _Key(builder_name, change, target):
# The key must be stable across machines, platforms,
# Python versions, and Python invocations.
string = '\n'.join((builder_name, change.id_string, target))
return hashlib.sha256(string).hexdigest()
|
Python
| 0
|
@@ -1437,89 +1437,8 @@
te(%0A
- builder_name=builder_name,%0A change=change,%0A target=target,%0A
@@ -1468,16 +1468,16 @@
e_hash,%0A
+
@@ -1601,149 +1601,8 @@
l):%0A
- builder_name = ndb.StringProperty(required=True)%0A change = ndb.PickleProperty(required=True)%0A target = ndb.StringProperty(required=True)%0A
is
@@ -1633,16 +1633,31 @@
roperty(
+indexed=False,
required
|
fa3ac19213664f7281bc7b84ddf8734f6c58c57c
|
Changing output to be scraper specific
|
dat/RecipesScraper/RecipesScraper/pipelines.py
|
dat/RecipesScraper/RecipesScraper/pipelines.py
|
# -*- coding: utf-8 -*-
from scrapy.exporters import JsonItemExporter
class JsonPipeline(object):
"""Save Pipeline output to JSON."""
def __init__(self):
self.file = open("recipes.json", 'wb')
self.exporter = JsonItemExporter(self.file, encoding='utf-8', ensure_ascii=False)
self.exporter.start_exporting()
def close_spider(self, spider):
self.exporter.finish_exporting()
self.file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
|
Python
| 0.999615
|
@@ -46,24 +46,29 @@
import Json
+Lines
ItemExporter
@@ -68,16 +68,16 @@
xporter%0A
-
%0A%0Aclass
@@ -159,16 +159,29 @@
t__(self
+, spider_name
):%0A
@@ -201,16 +201,26 @@
= open(%22
+output/%7B%7D_
recipes.
@@ -224,16 +224,36 @@
es.json%22
+.format(spider_name)
, 'wb')%0A
@@ -280,16 +280,21 @@
r = Json
+Lines
ItemExpo
@@ -384,24 +384,152 @@
xporting()%0A%0A
+ @classmethod%0A def from_crawler(cls, crawler):%0A return cls(%0A spider_name=crawler.spider.name%0A )%0A%0A
def clos
|
85671daee9fb1ed1b9f096aa364d05da8fe55b8e
|
Add clipboard support
|
iotp.py
|
iotp.py
|
#!/usr/bin/env python
# Import system libraries
import base64
import json
import os
# Import PyPi libraries
from appdirs import AppDirs
import click
import pyotp
# Set app information
appname = 'iotp'
appauthor = 'Dan Mills'
appversion = '0.0.1'
# Setup appdirs
dirs = AppDirs(appname, appauthor)
keyFile = os.path.join(dirs.user_data_dir, 'keys.json')
def setup_keys():
"""
Check for data file and directory and create if is doesn't exist
Returns a dictionary of saved keys
"""
if not os.path.isdir(dirs.user_data_dir):
os.makedirs(dirs.user_data_dir)
try:
with open(keyFile, 'r') as f:
keys = json.load(f)
except ValueError:
keys = {}
except IOError:
open(keyFile, 'w').close()
keys = {}
return keys
def save_keys(keys):
"""Takes a dictionary of keys and then saves them to the keys file"""
with open(keyFile, 'w') as f:
json.dump(keys, f, indent=True)
def get_totp(key):
"""
Accepts a BASE32 encoded key and returns the TOTP if it is valid and None
if it is invalid
"""
try:
totp = pyotp.TOTP(key)
return totp.now()
except TypeError:
return None
@click.group()
@click.version_option(appversion)
def cli():
pass
@cli.command()
@click.argument('service', required=False)
def get(service=None):
"""
Gets TOTP codes for service specified. If no service is specified it
it prints codes for all services
"""
keys = setup_keys()
if not service:
for service in keys:
value = keys[service]
totp = get_totp(value)
if totp:
click.echo('{}: {}'.format(service, totp))
elif service in keys:
totp = get_totp(keys[service])
if totp:
click.echo('{}: {}'.format(service, totp))
else:
click.echo('{} does not exist'.format(service))
@cli.command()
@click.argument('service')
@click.argument('key')
def set(service, key):
"""Accepts a service and a key and saves it out to the keyFile"""
keys = setup_keys()
try:
base64.b32decode(key)
except TypeError:
click.echo('{} is not a valid key'.format(key))
else:
keys[service] = key
save_keys(keys)
@cli.command()
@click.argument('service')
def rm(service):
"""Accepts a service and removes it from the list of services"""
keys = setup_keys()
if keys.pop(service, None):
click.echo('Removed {}'.format(service))
save_keys(keys)
else:
click.echo('{} does not exist'.format(service))
if __name__ == '__main__':
cli()
|
Python
| 0.000001
|
@@ -156,16 +156,33 @@
rt pyotp
+%0Aimport pyperclip
%0A%0A# Set
@@ -1359,16 +1359,60 @@
se)%0A
+@click.option('--copy', '-c', is_flag=True)%0A
def get(
serv
@@ -1407,16 +1407,22 @@
def get(
+copy,
service=
@@ -1914,16 +1914,74 @@
totp))%0A
+ if copy:%0A pyperclip.copy(totp)%0A
else
|
4a25286506cc8e50b5e1225b12015f4d0da3ccfc
|
Put api token auth endpoint under v1.
|
smbackend/urls.py
|
smbackend/urls.py
|
from django.conf.urls import patterns, include, url
from services.api import all_views as services_views
from services.api import AccessibilityRuleView
from observations.api import views as observations_views
from rest_framework import routers
from observations.views import obtain_auth_token
from munigeo.api import all_views as munigeo_views
# from django.contrib import admin
# admin.autodiscover()
router = routers.DefaultRouter()
registered_api_views = set()
for view in services_views + munigeo_views + observations_views:
kwargs = {}
if view['name'] in registered_api_views:
continue
else:
registered_api_views.add(view['name'])
if 'base_name' in view:
kwargs['base_name'] = view['base_name']
router.register(view['name'], view['class'], **kwargs)
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'smbackend.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
# url(r'^', include(v1_api.urls)),
# url(r'^admin/', include(admin.site.urls)),
url(r'^open311/', 'services.views.post_service_request', name='services'),
url(r'^v1/', include(router.urls)),
url(r'^api-token-auth/', obtain_auth_token)
)
|
Python
| 0
|
@@ -1159,16 +1159,19 @@
url(r'%5E
+v1/
api-toke
|
94764b8daed7ef6df8ac47462013b08d30de7e8f
|
refactor for less ugliness, resolves #7
|
source/display.py
|
source/display.py
|
class Display():
def __init__(self):
self.start = "Welcome"
self.draw = "Draw"
self.computer = "Computer Wins"
self.human = "You Win"
self.next_move = "What is your next move?"
self.bad_move = "That is not a legal move."
def show(self, text):
print text
def render_board(self, raw_board):
stringified_board = self.stringify_board(raw_board)
rendered_board = self.construct_board(stringified_board)
return rendered_board
def stringify_board(self, board):
for i in range(0, len(board)):
if board[i] == 0:
board[i] = " "
elif board[i] == 1:
board[i] = " X "
elif board[i] == 10:
board[i] = " O "
return board
def construct_board(self, board):
board_size = self.get_board_size(board)
rack = self.construct_rack(board_size)
wall = '|'
rows = []
for i in range(0, board_size):
rows.append([])
for j in range(0, board_size):
rows[i].append(board.pop(0))
working_row = rows[i]
rows[i] = wall.join(working_row)
constructed_board = '\n' + rack.join(rows) + '\n'
return constructed_board
def get_board_size(self, board):
from math import sqrt
return int(sqrt(len(board)))
def construct_rack(self, board_size):
corner = '+'
shelves = []
for i in range(0, board_size):
shelves.append('---')
rack = '\n' + corner.join(shelves) + '\n'
return rack
|
Python
| 0.000001
|
@@ -864,93 +864,530 @@
-board_size = self.get_board_size(board)%0A rack = self.construct_rack(board_size
+rack = self.construct_rack(board)%0A rows = self.construct_rows(board)%0A constructed_board = '%5Cn' + rack.join(rows) + '%5Cn'%0A return constructed_board%0A%0A def construct_rack(self, board):%0A board_size = self.get_board_size(board)%0A corner = '+'%0A shelves = %5B%5D%0A for i in range(0, board_size):%0A shelves.append('---')%0A rack = '%5Cn' + corner.join(shelves) + '%5Cn'%0A return rack%0A%0A def construct_rows(self, board):%0A board_size = self.get_board_size(board
)%0A
@@ -1667,90 +1667,19 @@
-constructed_board = '%5Cn' + rack.join(rows) + '%5Cn'%0A return constructed_board
+return rows
%0A%0A
@@ -1784,239 +1784,5 @@
)))
- %0A%0A def construct_rack(self, board_size):%0A corner = '+'%0A shelves = %5B%5D%0A for i in range(0, board_size):%0A shelves.append('---')%0A rack = '%5Cn' + corner.join(shelves) + '%5Cn'%0A return rack%0A
%0A
|
a89dee8f9b20479cc3d6e82ae730628440f335cc
|
Add fibt2 and qm stats
|
src/collectors/xfs/xfs.py
|
src/collectors/xfs/xfs.py
|
# coding=utf-8
"""
The XFSCollector collects XFS metrics using /proc/fs/xfs/stat.
#### Dependencies
* /proc/fs/xfs/stat
"""
import diamond.collector
import sys
class XFSCollector(diamond.collector.Collector):
PROC = '/proc/fs/xfs/stat'
def get_default_config_help(self):
config_help = super(XFSCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the xfs collector settings
"""
config = super(XFSCollector, self).get_default_config()
config.update({
'path': 'xfs'
})
return config
def collect(self):
"""
Collect xfs stats.
For an explanation of the following metrics visit
http://xfs.org/index.php/Runtime_Stats
https://github.com/torvalds/linux/blob/master/fs/xfs/xfs_stats.h
"""
data_structure = {
'extent_alloc': (
'alloc_extent',
'alloc_block',
'free_extent',
'free_block'
),
'abt': (
'lookup',
'compare',
'insrec',
'delrec'
),
'blk_map': (
'read_ops',
'write_ops',
'unmap',
'add_exlist',
'del_exlist',
'look_exlist',
'cmp_exlist'
),
'bmbt': (
'lookup',
'compare',
'insrec',
'delrec'
),
'dir': (
'lookup',
'create',
'remove',
'getdents'
),
'trans': (
'sync',
'async',
'empty'
),
'ig': (
'ig_attempts',
'ig_found',
'ig_frecycle',
'ig_missed',
'ig_dup',
'ig_reclaims',
'ig_attrchg'
),
'log': (
'writes',
'blocks',
'noiclogs',
'force',
'force_sleep'
),
'push_ail': (
'try_logspace',
'sleep_logspace',
'pushes',
'success',
'pushbuf',
'pinned',
'locked',
'flushing',
'restarts',
'flush'
),
'xstrat': (
'quick',
'split'
),
'rw': (
'write_calls',
'read_calls'
),
'attr': (
'get',
'set',
'remove',
'list'
),
'icluster': (
'iflush_count',
'icluster_flushcnt',
'icluster_flushinode'
),
'vnodes': (
'vn_active',
'vn_alloc',
'vn_get',
'vn_hold',
'vn_rele',
'vn_reclaim',
'vn_remove',
'vn_free'
),
'buf': (
'xb_get',
'xb_create',
'xb_get_locked',
'xb_get_locked_waited',
'xb_busy_locked',
'xb_miss_locked',
'xb_page_retries',
'xb_page_found',
'xb_get_read'
),
'abtb2': (
'xs_abtb_2_lookup',
'xs_abtb_2_compare',
'xs_abtb_2_insrec',
'xs_abtb_2_delrec',
'xs_abtb_2_newroot',
'xs_abtb_2_killroot',
'xs_abtb_2_increment',
'xs_abtb_2_decrement',
'xs_abtb_2_lshift',
'xs_abtb_2_rshift',
'xs_abtb_2_split',
'xs_abtb_2_join',
'xs_abtb_2_alloc',
'xs_abtb_2_free',
'xs_abtb_2_moves'
),
'abtc2': (
'xs_abtc_2_lookup',
'xs_abtc_2_compare',
'xs_abtc_2_insrec',
'xs_abtc_2_delrec',
'xs_abtc_2_newroot',
'xs_abtc_2_killroot',
'xs_abtc_2_increment',
'xs_abtc_2_decrement',
'xs_abtc_2_lshift',
'xs_abtc_2_rshift',
'xs_abtc_2_split',
'xs_abtc_2_join',
'xs_abtc_2_alloc',
'xs_abtc_2_free',
'xs_abtc_2_moves'
),
'bmbt2': (
'xs_bmbt_2_lookup',
'xs_bmbt_2_compare',
'xs_bmbt_2_insrec',
'xs_bmbt_2_delrec',
'xs_bmbt_2_newroot',
'xs_bmbt_2_killroot',
'xs_bmbt_2_increment',
'xs_bmbt_2_decrement',
'xs_bmbt_2_lshift',
'xs_bmbt_2_rshift',
'xs_bmbt_2_split',
'xs_bmbt_2_join',
'xs_bmbt_2_alloc',
'xs_bmbt_2_free',
'xs_bmbt_2_moves'
),
'ibt2': (
'lookup',
'compare',
'insrec',
'delrec',
'newroot',
'killroot',
'increment',
'decrement',
'lshift',
'rshift',
'split',
'join',
'alloc',
'free',
'moves'
),
'xpc': (
'xs_xstrat_bytes',
'xs_write_bytes',
'xs_read_bytes'
),
'debug': (
'debug',
)
}
f = open(self.PROC)
new_stats = f.readlines()
f.close()
stats = {}
for line in new_stats:
items = line.rstrip().split()
stats[items[0]] = [int(a) for a in items[1:]]
for key in stats.keys():
for item in enumerate(data_structure[key]):
metric_name = '.'.join([key, item[1]])
value = stats[key][item[0]]
self.publish_counter(metric_name, value)
|
Python
| 0.000001
|
@@ -5841,32 +5841,566 @@
%0A ),%0A
+ 'fibt2': (%0A 'lookup',%0A 'compare',%0A 'insrec',%0A 'delrec',%0A 'newroot',%0A 'killroot',%0A 'increment',%0A 'decrement',%0A 'lshift',%0A 'rshift',%0A 'split',%0A 'join',%0A 'alloc',%0A 'free',%0A 'moves'%0A ),%0A 'qm': (%0A 'xs_qm_dquot',%0A 'xs_qm_dquot_unused'%0A ),%0A%0A
'xpc
|
c0fc71d8197580e1c517c20f619de5f4c3185922
|
move pointer generation to the exceptions & serializer
|
drfjsonapi/renderers.py
|
drfjsonapi/renderers.py
|
"""
drfjsonapi.renderers
~~~~~~~~~~~~~~~~~~~~~
DRF renderer that is compliant with the JSON API spec
"""
from collections import OrderedDict
from rest_framework.renderers import JSONRenderer
class JsonApiRenderer(JSONRenderer):
""" JSON API compliant DRF renderer
Inherit from the DRF JSONRenderer since JSON API is
simply a structured representation of JSON.
"""
media_type = 'application/vnd.api+json'
def get_errors(self, data):
""" Return an array of "Error" objects
Set the proper RFC 6901 JSON pointer object by checking
the type of error. Global resource errors, field-level
errors, & relationship field errors will have different
error codes.
The existing pointer will already be set but not yet
fully qualified like it needs to be specifically for
JSON API.
:spec:
jsonapi.org/format/#errors
"""
for error in data['errors']:
pointer = error['source'].get('pointer', None)
if error['code'] == 'FieldError':
error['source']['pointer'] = '/data/attributes%s' % pointer
elif error['code'] == 'RelationshipError':
error['source']['pointer'] = '/data/relationships%s' % pointer
elif error['code'] == 'ResourceError':
error['source']['pointer'] = '/data'
return data
# pylint: disable=too-many-arguments
def _get_include(self, field_name, cache, context, models, ret):
""" Given a cache dict & models serialize them
This is a self-referential walking of the cache tree
that was created by the `IncludeFilter`. It ensures
no dupes exist within the compound documents array
but doesn't do anything with the primary data.
It does not return anything & instead has mutation
side-effects of the inclusion array `ret`.
WARN: this is probably computationally expensive
in several way. By dupe checking & by creating
a fresh serializer for each model.
Fresh serializers are used just in case fields
are dynamically altered on a per model basis.
"""
field = cache['field']
for model in models:
try:
related = getattr(model, field_name).all()
except AttributeError:
if not getattr(model, field_name):
continue
related = [getattr(model, field_name)]
for _model in related:
context['include'] = cache.keys()
data = field.get_serializer(_model, context=context).data
# no dupes
if data not in ret:
ret.append(data)
for key, val in cache.items():
if key != 'field':
self._get_include(key, val, context, related, ret)
def get_included(self, resources, serializer, request):
""" Return the top level "Included Resources" array
This should return a list that is compliant with the
"Resource Objects" section of the JSON API spec.
Since these are the compound documents to be "sideloaded"
there should be no duplicates within the included array
itself or the primary data.
The drfjsonapi `IncludeFilter` adds a private property
to the request object named `_includes` which greatly
reduces the complexity of this process.
TIP: read the documentation of the `IncludeFilter`
class for more information.
:spec:
jsonapi.org/format/#document-resource-objects
"""
# could be single model or many or serializer is None
# if single then coerce into list
models = getattr(serializer, 'instance', None)
if models and not isinstance(models, list):
models = [models]
if not all((resources, models, hasattr(request, '_includes'))):
return []
# could be a ReturnDict or ReturnList but coerce
# into list so simple 'in' checks can work later
elif resources and not isinstance(resources, list):
resources = [resources]
ret = []
for key, val in request._includes.items():
self._get_include(key, val, serializer.context, models, ret)
# remove dupes from primary data
return [data for data in ret if data not in resources]
def get_jsonapi(self):
""" Return the top level "JSON API" object
Only the `version` member is valid.
:spec:
jsonapi.org/format/#document-jsonapi-object
"""
return {'version': '1.0'}
def get_links(self, request, pager):
""" Return the top level "Links" object
According to the JSON API spec this should include
the required pagination links.
:spec:
jsonapi.org/format/#document-links
jsonapi.org/format/#fetching-pagination
"""
links = {'self': request.build_absolute_uri()}
if pager:
links.update(pager['links'])
return links
def get_meta(self, pager):
""" Return the top level "Meta" object
We include some helpful counters from the pagination
results otherwise it's empty.
"""
return pager.get('meta', {})
def get_top_level(self, data, request, pager, serializer=None):
""" Return the "Top Level" object of the resource(s)
This should return a dict that is compliant with the
"Top Level" section of the JSON API spec.
:spec:
jsonapi.org/format/#document-top-level
"""
return {
'data': data,
'included': self.get_included(data, serializer, request),
'jsonapi': self.get_jsonapi(),
'links': self.get_links(request, pager),
'meta': self.get_meta(pager),
}
def render(self, data, media_type=None, renderer_context=None):
""" DRF entry point
`data` can be quite a few different data formats
unforutnately. It could be a single resource dict,
None (no single resource), an array of many resource
dicts with paging info, an empty array, or an "Errors"
object.
This should be handled better somehow I'd imagine.
"""
pager = {}
request = renderer_context['request']
# list with drfjsonapi pager
if isinstance(data, OrderedDict) and 'pager' in data:
pager = data['pager']
data = data['results']
if not data:
data = self.get_top_level(data, request, pager)
elif 'errors' in data:
data = self.get_errors(data)
else:
serializer = data.serializer
if isinstance(data, list):
serializer = serializer.child
data = self.get_top_level(data, request, pager, serializer)
return super(JsonApiRenderer, self).render(
data,
media_type,
renderer_context,
)
|
Python
| 0
|
@@ -941,465 +941,8 @@
%22%22%0A%0A
- for error in data%5B'errors'%5D:%0A pointer = error%5B'source'%5D.get('pointer', None)%0A%0A if error%5B'code'%5D == 'FieldError':%0A error%5B'source'%5D%5B'pointer'%5D = '/data/attributes%25s' %25 pointer%0A elif error%5B'code'%5D == 'RelationshipError':%0A error%5B'source'%5D%5B'pointer'%5D = '/data/relationships%25s' %25 pointer%0A elif error%5B'code'%5D == 'ResourceError':%0A error%5B'source'%5D%5B'pointer'%5D = '/data'%0A
|
b6b99dff989fb6662f795a95895e070424f59822
|
Add test for login button instead of edit buttons if not logged
|
candidates/tests/test_person_view.py
|
candidates/tests/test_person_view.py
|
# Smoke tests for viewing a candidate's page
from __future__ import unicode_literals
import re
from django.test.utils import override_settings
from django_webtest import WebTest
from .dates import processors_before, processors_after
from .factories import (
CandidacyExtraFactory, PersonExtraFactory
)
from .uk_examples import UK2015ExamplesMixin
class TestPersonView(UK2015ExamplesMixin, WebTest):
def setUp(self):
super(TestPersonView, self).setUp()
person_extra = PersonExtraFactory.create(
base__id='2009',
base__name='Tessa Jowell'
)
CandidacyExtraFactory.create(
election=self.election,
base__person=person_extra.base,
base__post=self.dulwich_post_extra.base,
base__on_behalf_of=self.labour_party_extra.base
)
def test_get_tessa_jowell(self):
response = self.app.get('/person/2009/tessa-jowell')
self.assertTrue(
re.search(
r'''(?msx)
<h1>Tessa\s+Jowell</h1>\s*
<p>Candidate\s+for\s+
<a\s+href="/election/2015/post/65808/dulwich-and-west-norwood">Dulwich\s+
and\s+West\s+Norwood</a>\s+in\ <a\ href="/election/2015/constituencies">2015
\s+General\s+Election</a>\s*</p>''',
response.text
)
)
@override_settings(TEMPLATE_CONTEXT_PROCESSORS=processors_before)
def test_get_tessa_jowell_before_election(self):
response = self.app.get('/person/2009/tessa-jowell')
self.assertContains(response, 'Contesting the 2015 General Election')
@override_settings(TEMPLATE_CONTEXT_PROCESSORS=processors_after)
def test_get_tessa_jowell_after_election(self):
response = self.app.get('/person/2009/tessa-jowell')
self.assertContains(response, 'Contested the 2015 General Election')
def test_get_non_existent(self):
response = self.app.get(
'/person/987654/imaginary-person',
expect_errors=True
)
self.assertEqual(response.status_code, 404)
|
Python
| 0
|
@@ -1,50 +1,4 @@
-# Smoke tests for viewing a candidate's page%0A%0A
from
@@ -1999,8 +1999,330 @@
e, 404)%0A
+%0A def test_shows_no_edit_buttons_if_user_not_authenticated(self):%0A response = self.app.get('/person/2009/tessa-jowell')%0A edit_buttons = response.html.find_all('a', attrs=%7B'class': 'button'%7D)%0A self.assertEqual(len(edit_buttons), 1)%0A self.assertEqual(edit_buttons%5B0%5D.string, 'Log in to edit')%0A
|
33e15e1afae139c3b55dea8f648c1913ea7d89ad
|
Correct package level for ACEFTS
|
pyatmlab/datasets/acefts.py
|
pyatmlab/datasets/acefts.py
|
import datetime
import itertools
import collections
import logging
import numpy
from . import dataset
from . import constants
class ACEFTS(dataset.SingleMeasurementPerFileDataset,
dataset.ProfileDataset):
"""SCISAT Atmospheric Chemistry Experiment FTS
"""
# basedir = "/home/gerrit/sshfs/glacier/data/1/gholl/data/ACE"
subdir = "{year:04d}-{month:02d}"
re = r"(?P<type>s[sr])(?P<orbit>\d{5})v(?P<version>\d\.\d)\.asc"
_time_format = "%Y-%m-%d %H:%M:%S"
aliases = {"CH4_profile": "CH4",
"delta_CH4_profile": "CH4_err",
"p": "P_pa",
"S_CH4_profile": "CH4_SA_fake"}
filename_fields = {"orbit": "u4", "version": "U3", "type": "U2"}
unique_fields = {"orbit", "type", "time"}
n_prof = "z"
range = (5e3, 150e3)
@staticmethod
def read_header(fp):
"""Read header from open file
Should be opened at the beginning. Will advance location from
start of header to end of header.
:param fp: File open at beginning of header
:returns: Dictionary with header information
"""
head = collections.OrderedDict()
isempty = lambda line: not line.isspace()
for line in itertools.takewhile(isempty, fp):
k, v = line.split("|")
head[k.strip()] = v.strip()
if head == {}:
raise dataset.InvalidFileError(
"Unable to extract header from {0.name}. Empty?".format(fp))
return head
def read_single(self, f, fields="all"):
with open(f) as fp:
head = self.read_header(fp)
line = fp.readline()
while line.isspace():
line = fp.readline()
names = line.replace("P (atm)", "P_atm").split()
# numpy.ma.empty fails with datetime dtype
# https://github.com/numpy/numpy/issues/4583
#D = numpy.ma.empty((150,),
D = numpy.empty((150,),
list(zip(names, ["f4"]*len(names)))
+ [("P_pa", "f4"), ("CH4_SA_fake", "f4", (150,))])
for (n, line) in enumerate(fp):
# why does this not work?
# http://stackoverflow.com/q/22865877/974555
#D[names][n] = tuple(float(f) for f in line.split())
try:
vals = tuple(float(f) for f in line.split())
except ValueError:
# raise InvalidFileError instead so I can catch more
# narrowly higher up in the stack
raise dataset.InvalidFileError("Unable to read content")
for (i, name) in enumerate(names):
D[name][n] = vals[i]
# km -> m
D["z"] *= 1e3
D["P_pa"] = D["P_atm"] * constants.atm
# assume error covariance matrix to be diagonal
# and convert std. error to variance. Errors on flagged values
# are 0.
val = D["CH4_err"]>0
D["CH4_SA_fake"] = numpy.diag(D["CH4_err"]**2)
D["CH4_SA_fake"][:, ~val] = 0
D["CH4_SA_fake"][~val, :] = 0
head["lat"] = float(head["latitude"])
head["lon"] = float(head["longitude"])
# make sure lons are in (-180, 180)
if head["lon"] < -180:
head["lon"] += 360
if head["lon"] > 180:
head["lon"] -= 360
# for time, strip off both incomplete timezone designation and
# decimal part (truncating it to the nearest second)
head["time"] = datetime.datetime.strptime(
head["date"].split(".")[0].split("+")[0], self._time_format)
return (head, D if fields=="all" else D[fields])
def get_time_from_granule_contents(self, p):
"""Get time from granule contents.
Takes str with path, returns two datetimes
"""
with open(p) as f:
head = self.read_header(f)
# cut of "+00" part, datetime defaults to UTC and having only
# hours is contrary to any standard, so strptime cannot handle
# it
return tuple(datetime.datetime.strptime(
head[m + "_time"][:-3], self._time_format)
for m in ("start", "end"))
def get_z(self, meas):
try:
return super().get_z(meas)
except IndexError:
pass # parent failed, continue here
m = meas["z"]
if m[-1] < 150: # oops, still in km
return m * 1e3
else:
return m
def flag(self, arr):
flagged = self.combine(arr, self.related["flags"])
flnm = self.related["flags"].aliases["flag"]
# See e-mail Patrick 2014-06-04
with numpy.errstate(invalid="ignore"):
badlev = flagged[flnm]>2
badprof = ((flagged[flnm]>=4) & (flagged[flnm]<=6)).any(1)
logging.info("Flagging {:d}/{:d} profiles and {:d}/{:d} levels".format(
badprof.sum(), badprof.size, badlev.sum(), badlev.size))
arr["CH4"][badlev] = numpy.nan
arr["CH4"][badprof, :] = numpy.nan
arr["CH4_err"][badlev] = numpy.nan
arr["CH4_err"][badprof, :] = numpy.nan
arr["CH4_SA_fake"][numpy.tile(badlev[:, :, numpy.newaxis], (1,1,150))] = numpy.nan
arr["CH4_SA_fake"][badprof, :, :] = numpy.nan
return arr
|
Python
| 0.000001
|
@@ -78,24 +78,25 @@
umpy%0A%0Afrom .
+.
import data
@@ -105,16 +105,17 @@
t%0Afrom .
+.
import
|
45c17681bfdfc374e94b086f9cdda4f314be5045
|
Add entries and preamble arguments to BibliographyData.__init__().
|
pybtex/database/__init__.py
|
pybtex/database/__init__.py
|
# Copyright (C) 2006, 2007, 2008, 2009 Andrey Golovizin
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from pybtex.exceptions import PybtexError
class BibliographyDataError(PybtexError):
pass
class BibliographyData(object):
def __init__(self):
self.entries = {}
self._preamble = []
def __eq__(self, other):
if not isinstance(other, BibliographyData):
return super(BibliographyData, self) == other
return (
self.entries == other.entries
and self._preamble == other._preamble
)
def add_to_preamble(self, s):
self._preamble.append(s)
def preamble(self):
return ''.join(self._preamble)
def add_entry(self, key, entry):
if key in self.entries:
raise BibliographyDataError('repeated bibliograhpy entry: %s' % key)
entry.collection = self
self.entries[key] = entry
def add_entries(self, entries):
for key, entry in entries:
self.add_entry(key, entry)
|
Python
| 0
|
@@ -836,24 +836,53 @@
_init__(self
+, entries=None, preamble=None
):%0A s
@@ -925,16 +925,141 @@
ble = %5B%5D
+%0A if entries:%0A self.entries.update(entries)%0A if preamble:%0A self.preamble.extend(preamble)
%0A%0A de
|
5ed01ff0ab0967e8c200e2f891a921cfe617d218
|
Add option to allow rigid registration without scaling
|
pycpd/rigid_registration.py
|
pycpd/rigid_registration.py
|
from builtins import super
import numpy as np
import numbers
from .emregistration import EMRegistration
from .utility import is_positive_semi_definite
class RigidRegistration(EMRegistration):
"""
Rigid registration.
Attributes
----------
R: numpy array (semi-positive definite)
DxD rotation matrix. Any well behaved matrix will do,
since the next estimate is a rotation matrix.
t: numpy array
1xD initial translation vector.
s: float (positive)
scaling parameter.
A: numpy array
Utility array used to calculate the rotation matrix.
Defined in Fig. 2 of https://arxiv.org/pdf/0905.2635.pdf.
"""
# Additional parameters used in this class, but not inputs.
# YPY: float
# Denominator value used to update the scale factor.
# Defined in Fig. 2 and Eq. 8 of https://arxiv.org/pdf/0905.2635.pdf.
# X_hat: numpy array
# Centered target point cloud.
# Defined in Fig. 2 of https://arxiv.org/pdf/0905.2635.pdf.
def __init__(self, R=None, t=None, s=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if self.D != 2 and self.D != 3:
raise ValueError(
'Rigid registration only supports 2D or 3D point clouds. Instead got {}.'.format(self.D))
if R is not None and (R.ndim is not 2 or R.shape[0] is not self.D or R.shape[1] is not self.D or not is_positive_semi_definite(R)):
raise ValueError(
'The rotation matrix can only be initialized to {}x{} positive semi definite matrices. Instead got: {}.'.format(self.D, self.D, R))
if t is not None and (t.ndim is not 2 or t.shape[0] is not 1 or t.shape[1] is not self.D):
raise ValueError(
'The translation vector can only be initialized to 1x{} positive semi definite matrices. Instead got: {}.'.format(self.D, t))
if s is not None and (not isinstance(s, numbers.Number) or s <= 0):
raise ValueError(
'The scale factor must be a positive number. Instead got: {}.'.format(s))
self.R = np.eye(self.D) if R is None else R
self.t = np.atleast_2d(np.zeros((1, self.D))) if t is None else t
self.s = 1 if s is None else s
def update_transform(self):
"""
Calculate a new estimate of the rigid transformation.
"""
# target point cloud mean
muX = np.divide(np.sum(self.PX, axis=0),
self.Np)
# source point cloud mean
muY = np.divide(
np.sum(np.dot(np.transpose(self.P), self.Y), axis=0), self.Np)
self.X_hat = self.X - np.tile(muX, (self.N, 1))
# centered source point cloud
Y_hat = self.Y - np.tile(muY, (self.M, 1))
self.YPY = np.dot(np.transpose(self.P1), np.sum(
np.multiply(Y_hat, Y_hat), axis=1))
self.A = np.dot(np.transpose(self.X_hat), np.transpose(self.P))
self.A = np.dot(self.A, Y_hat)
# Singular value decomposition as per lemma 1 of https://arxiv.org/pdf/0905.2635.pdf.
U, _, V = np.linalg.svd(self.A, full_matrices=True)
C = np.ones((self.D, ))
C[self.D-1] = np.linalg.det(np.dot(U, V))
# Calculate the rotation matrix using Eq. 9 of https://arxiv.org/pdf/0905.2635.pdf.
self.R = np.transpose(np.dot(np.dot(U, np.diag(C)), V))
# Update scale and translation using Fig. 2 of https://arxiv.org/pdf/0905.2635.pdf.
self.s = np.trace(np.dot(np.transpose(self.A),
np.transpose(self.R))) / self.YPY
self.t = np.transpose(muX) - self.s * \
np.dot(np.transpose(self.R), np.transpose(muY))
def transform_point_cloud(self, Y=None):
"""
Update a point cloud using the new estimate of the rigid transformation.
Attributes
----------
Y: numpy array
Point cloud to be transformed - use to predict on new set of points.
Best for predicting on new points not used to run initial registration.
If None, self.Y used.
Returns
-------
If Y is None, returns None.
Otherwise, returns the transformed Y.
"""
if Y is None:
self.TY = self.s * np.dot(self.Y, self.R) + self.t
return
else:
return self.s * np.dot(Y, self.R) + self.t
def update_variance(self):
"""
Update the variance of the mixture model using the new estimate of the rigid transformation.
See the update rule for sigma2 in Fig. 2 of of https://arxiv.org/pdf/0905.2635.pdf.
"""
qprev = self.q
trAR = np.trace(np.dot(self.A, self.R))
xPx = np.dot(np.transpose(self.Pt1), np.sum(
np.multiply(self.X_hat, self.X_hat), axis=1))
self.q = (xPx - 2 * self.s * trAR + self.s * self.s * self.YPY) / \
(2 * self.sigma2) + self.D * self.Np/2 * np.log(self.sigma2)
self.diff = np.abs(self.q - qprev)
self.sigma2 = (xPx - self.s * trAR) / (self.Np * self.D)
if self.sigma2 <= 0:
self.sigma2 = self.tolerance / 10
def get_registration_parameters(self):
"""
Return the current estimate of the rigid transformation parameters.
Returns
-------
self.s: float
Current estimate of the scale factor.
self.R: numpy array
Current estimate of the rotation matrix.
self.t: numpy array
Current estimate of the translation vector.
"""
return self.s, self.R, self.t
|
Python
| 0
|
@@ -1079,16 +1079,28 @@
s=None,
+ scale=True,
*args,
@@ -1110,16 +1110,16 @@
wargs):%0A
-
@@ -2281,16 +2281,43 @@
e else s
+%0A self.scale = scale
%0A%0A de
@@ -3521,32 +3521,67 @@
/0905.2635.pdf.%0A
+ if self.scale is True:%0A
self.s =
@@ -3622,41 +3622,8 @@
.A),
-%0A
np.
@@ -3653,16 +3653,47 @@
elf.YPY%0A
+ else:%0A pass%0A
|
296d3eff909377dcd1d4d334a843c03b00cb1bbe
|
add logdir to saver
|
examples/denoise_class/stages.py
|
examples/denoise_class/stages.py
|
import tensorflow as tf
import numpy as np
import os
from datetime import datetime
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from flowfairy.core.stage import register, Stage
from flowfairy.conf import settings
@register(500)
class SummaryStage(Stage):
def fig2rgb_array(self, expand=True):
self.figure.canvas.draw()
buf = self.figure.canvas.tostring_rgb()
ncols, nrows = self.figure.canvas.get_width_height()
shape = (nrows, ncols, 3) if not expand else (1, nrows, ncols, 3)
return np.fromstring(buf, dtype=np.uint8).reshape(shape)
def reset_fig(self):
self.figure = plt.figure(num=0, figsize=(6,4), dpi=300)
self.figure.clf()
def before(self, sess, net):
tf.summary.scalar('acc', net.accuracy)
tf.summary.scalar('cost', net.cost)
self.pred = net.pred
self.x = net.x
self.y = net.y
#save sound
arg = tf.argmax(self.pred,2)
tf.summary.audio('x', self.y, settings.SAMPLERATE)
tf.summary.audio('pred',tf.cast(arg,tf.float32), settings.SAMPLERATE)
self.chunk=net.chunk
#save fig
self.reset_fig()
img = self.fig2rgb_array()
self.image = tf.Variable(np.zeros(img.shape, dtype=np.uint8))
tf.summary.image('graph', self.image)
#merge tf summaries
self.merged = tf.summary.merge_all()
self.writer = tf.summary.FileWriter(os.path.join(settings.LOG_DIR, str(datetime.now())), sess.graph)
def plot(self, sess):
self.reset_fig()
res, x, y, c = sess.run([self.pred, self.x, self.y, self.chunk])
res = np.argmax(res, 2)
start = c[0]-50
end = (start+settings.CHUNK+100)
plt.subplot('111').plot(res[0,start:end],'r')
plt.subplot('111').plot(y[0,start:end],'b', alpha=0.5)
plt.subplot('111').plot(x[0,start:end],'g', alpha=0.5)
def draw_img(self, sess):
self.plot(sess)
sess.run(self.image.assign(self.fig2rgb_array()))
def run(self, sess, i):
self.draw_img(sess)
summary = sess.run(self.merged)
self.writer.add_summary(summary, i)
@register()
class TrainingStage(Stage):
def before(self, sess, net):
self.optimizer = net.optimizer
def run(self, sess, i):
sess.run(self.optimizer)
@register(1000)
class SavingStage(Stage):
def before(self, sess, net):
self.saver = tf.train.Saver()
def run(self, sess, i):
self.saver.save(sess, log_dir, global_step=i)
|
Python
| 0
|
@@ -236,16 +236,76 @@
ttings%0A%0A
+log_dir = os.path.join(settings.LOG_DIR, settings.LOGNAME)%0A%0A
@registe
|
581b49ad98616b7450c12be1d86960e8f38df9ac
|
Update lca_calculations.py
|
cea/optimization/lca_calculations.py
|
cea/optimization/lca_calculations.py
|
# -*- coding: utf-8 -*-
"""
This file imports the price details from the cost database as a class. This helps in preventing multiple importing
of the corresponding values in individual files.
"""
from __future__ import division
import warnings
import pandas as pd
warnings.filterwarnings("ignore")
__author__ = "Sreepathi Bhargava Krishna"
__copyright__ = "Copyright 2017, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Sreepathi Bhargava Krishna"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "thomas@arch.ethz.ch"
__status__ = "Production"
class LcaCalculations(object):
def __init__(self, locator):
resources_lca = pd.read_excel(locator.get_database_supply_systems(), sheet_name="FEEDSTOCKS")
resources_lca.set_index('code', inplace=True)
# Natural gas
self.NG_TO_CO2_EQ = resources_lca.loc['NATURALGAS']['CO2']
self.NG_TO_OIL_EQ = resources_lca.loc['NATURALGAS']['PEN']
# Drybiomass
self.DRYBIOMASS_TO_CO2_EQ = resources_lca.loc['DRYBIOMASS']['CO2']
self.DRYBIOMASS_TO_OIL_EQ = resources_lca.loc['DRYBIOMASS']['PEN']
# WetBiomass
self.WETBIOMASS_TO_CO2_EQ = resources_lca.loc['WETBIOMASS']['CO2']
self.WETBIOMASS_TO_OIL_EQ = resources_lca.loc['WETBIOMASS']['PEN']
# Electricity MJ/MJoil and kg/MJ
self.EL_TO_CO2_EQ = resources_lca.loc['GRID']['CO2']
self.EL_TO_OIL_EQ = resources_lca.loc['GRID']['CO2']
|
Python
| 0.000001
|
@@ -910,75 +910,8 @@
O2'%5D
-%0A self.NG_TO_OIL_EQ = resources_lca.loc%5B'NATURALGAS'%5D%5B'PEN'%5D
%0A%0A
@@ -1007,83 +1007,8 @@
O2'%5D
-%0A self.DRYBIOMASS_TO_OIL_EQ = resources_lca.loc%5B'DRYBIOMASS'%5D%5B'PEN'%5D
%0A%0A
@@ -1104,83 +1104,8 @@
O2'%5D
-%0A self.WETBIOMASS_TO_OIL_EQ = resources_lca.loc%5B'WETBIOMASS'%5D%5B'PEN'%5D
%0A%0A
@@ -1208,66 +1208,4 @@
2'%5D%0A
- self.EL_TO_OIL_EQ = resources_lca.loc%5B'GRID'%5D%5B'CO2'%5D%0A%0A
|
7d69bcc6474d954b311251bf077750e0418170cb
|
Fix typo and execute JS script found in local folder.
|
button.py
|
button.py
|
import RPi.GPIO as GPIO
import time
import os
from optparse import OptionParser
# Parse input arguments
parser = OptionParser()
parser.add_option("-t", "--testGPIO", action="store_true", help="Test GPIO connection, does not call the JS script.")
# The option --pin sets the Input Pin for your Button
# It default to GPIO24 or HardwarePin 19
parser.add_option("-p", "--pin", dest="pin", help="GPIO pin to use. If not provided it defaults to HardwarePin 19.", default=19)
(options, args) = parser.parse_args()
testingGPIO = options.testGPIO != None
buttonPin = options.pin
#sets GPIO Mode to use Hardware Pin Layout
GPIO.setmode(GPIO.BCM)
#sets GPIO Pin to INPUT mode with a Pull Down Resistor
GPIO.setup(buttonPin,GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
if(testingGPIO):
print "Press the connected button. If you are pressing but you do not see any further output then....there is something wrong with the connection."
while True:
#waits for Pin Input and then exectures the script below
if (GPIO.input(buttonPin)):
if (testingGPIO):
print "PIN " + buttonPing + " works correctly."
continue
#the script that will be executed (as root)
os.system("node /home/pi/guest-password-printer/index.js")
|
Python
| 0
|
@@ -1070,17 +1070,16 @@
uttonPin
-g
+ %22 wor
@@ -1180,40 +1180,8 @@
ode
-/home/pi/guest-password-printer/
inde
|
194c34f0ce3a2decff8401494e57b904032a52a8
|
update local parser
|
log_parser/local_log_parser.py
|
log_parser/local_log_parser.py
|
#!/usr/bin/env python
######################################################
# -*- coding: utf-8 -*-
# File Name: local_log_parser.py
# Author: James Hong & Qian Li
# Created Date: 2017-10-28
# Description: Parse local logs
######################################################
import argparse
import json
import os
import re
import sys
import time
# need to install python-dateutil
import dateutil.parser
import numpy as np
from collections import OrderedDict
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--logdir', '-l', type=str, required=True,
help='Directory where logs files are stored')
parser.add_argument('--outfile', '-o', type=str, required=True,
help='File to save parsed output')
return parser.parse_args()
class StatsObject(object):
def __init__(self):
self.numLambdas = 0
self.data = OrderedDict()
def incrementNumLambdas(self):
self.numLambdas += 1
def record_key_value(self, k, v):
if k not in self.data:
self.data[k] = []
self.data[k].append(v)
def print_stats(self):
print 'Parsed %d lambda logs' % self.numLambdas
for k, v in self.data.iteritems():
print k
print ' mean:', np.mean(v)
print ' stdev:', np.std(v)
print ' median:', np.median(v)
print ' min:', min(v)
print ' max:', max(v)
print ' 10th:', np.percentile(v, 10)
print ' 25th:', np.percentile(v, 25)
print ' 75th:', np.percentile(v, 75)
print ' 90th:', np.percentile(v, 90)
print ' 95th:', np.percentile(v, 95)
print ' 99th:', np.percentile(v, 99)
def dump_parsed_values(self, outfile):
print >> sys.stderr, 'Writing parsed results to', outfile
with open(outfile, 'w') as ofs:
json.dump(self.data, ofs)
REPORT_RE = re.compile(r'Duration: ([\d.]+) ms\s+Billed Duration: (\d+) ms\s+Memory Size: (\d+) MB\s+Max Memory Used: (\d+) MB')
def parse_line(line, stats):
if 'Timelist:' in line:
timelistObj = None
# back compatibility, support two types
try:
_, timelist = line.split('Timelist:', 1)
timelistObj = json.loads(json.loads(timelist.strip()))
except Exception as e:
try:
timelistObj = json.loads(timelist)
except Exception as e:
print >> sys.stderr, e, line
for k, v in timelistObj.iteritems():
stats.record_key_value(k, v)
if 'START' in line:
timeStr, _ = line.split(' ', 1)
parsedDate = dateutil.parser.parse(timeStr)
stats.record_key_value('start-time', time.mktime(parsedDate.timetuple()))
if 'END' in line:
timeStr, _ = line.split(' ', 1)
parsedDate = dateutil.parser.parse(timeStr)
stats.record_key_value('end-time', time.mktime(parsedDate.timetuple()))
matchObj = REPORT_RE.search(line)
if matchObj is not None:
duration = float(matchObj.group(1))
billedDuration = int(matchObj.group(2))
memorySize = int(matchObj.group(3))
maxMemoryUsed = int(matchObj.group(4))
stats.record_key_value('duration', duration)
stats.record_key_value('billed-duration', billedDuration)
stats.record_key_value('memory-size', memorySize)
stats.record_key_value('max-memory-used', maxMemoryUsed)
stats.incrementNumLambdas()
def main(args):
if not os.path.isdir(args.logdir):
raise Exception('Log directory does not exist')
stats = StatsObject()
# recursively walk subdirs
for dirpath, dirnames, filenames in os.walk(args.logdir):
for fileName in filenames:
filePath = os.path.join(dirpath, fileName)
print >> sys.stderr, 'Parsing', fileName
try:
with open(filePath, 'r') as ifs:
for line in ifs:
parse_line(line, stats)
except Exception as e:
print >> sys.stderr, e
stats.print_stats()
if args.outfile is not None:
stats.dump_parsed_values(args.outfile)
if __name__ == '__main__':
main(get_args())
|
Python
| 0.000001
|
@@ -854,16 +854,39 @@
das = 0%0A
+ self.totalLogs = 0%0A
self
@@ -1139,16 +1139,31 @@
bda
+logs out of %25d
logs' %25
self
@@ -1158,16 +1158,17 @@
logs' %25
+(
self.num
@@ -1174,16 +1174,86 @@
mLambdas
+, %0A self.totalLogs)
%0A for
@@ -2499,16 +2499,41 @@
e(k, v)%0A
+ stats.totalLogs += 1%0A
%0A if
|
b4ea95dc2dc1591e96d22b5058cef440416477e0
|
Bump version to 0.10.0b (#740)
|
stellargraph/version.py
|
stellargraph/version.py
|
# -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Global version information
__version__ = "0.9.0b"
|
Python
| 0
|
@@ -650,9 +650,10 @@
%220.
-9
+10
.0b%22
|
f6e51b386fcbb608b5cc0d1c7a95df1494c8cc9b
|
fix for running unit tests in py versions < 3
|
stomp/test/testutils.py
|
stomp/test/testutils.py
|
import os
import socket
import sys
import threading
import logging
log = logging.getLogger('testutils.py')
from configparser import ConfigParser
from stomp import ConnectionListener, StatsListener, WaitingListener
from stomp.backward import *
config = ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), 'setup.ini'))
def get_environ(name):
try:
return os.environ[name]
except:
return None
def get_standard_host():
host = config.get('default', 'host')
port = config.get('default', 'port')
return [(get_environ('STD_HOST') or host, int(get_environ('STD_PORT') or port))]
def get_standard_ssl_host():
host = config.get('default', 'host')
port = config.get('default', 'ssl_port')
return [(get_environ('STD_HOST') or host, int(get_environ('STD_SSL_PORT') or port))]
def get_rabbitmq_host():
host = config.get('rabbitmq', 'host')
port = config.get('rabbitmq', 'port')
return [(get_environ('RABBITMQ_HOST') or host, int(get_environ('RABBITMQ_PORT') or port))]
def get_stompserver_host():
host = config.get('stompserver', 'host')
port = config.get('stompserver', 'port')
return [(get_environ('STOMPSERVER_HOST') or host, int(get_environ('STOMPSERVER_PORT') or port))]
class TestListener(StatsListener,WaitingListener):
def __init__(self, receipt=None):
StatsListener.__init__(self)
WaitingListener.__init__(self, receipt)
self.message_list = []
self.message_condition = threading.Condition()
self.message_received = False
def on_message(self, headers, message):
StatsListener.on_message(self, headers, message)
self.message_list.append((headers, message))
self.message_condition.acquire()
self.message_received = True
self.message_condition.notify()
self.message_condition.release()
def wait_for_message(self):
self.message_condition.acquire()
while not self.message_received:
self.message_condition.wait()
self.message_condition.release()
self.message_received = False
def get_latest_message(self):
return self.message_list[len(self.message_list)-1]
class TestStompServer(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.frames = []
def start(self):
log.debug('Starting stomp server')
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind((self.host, self.port))
self.s.listen(1)
self.running = True
thread = threading.Thread(None, self.run)
thread.daemon = True
thread.start()
self.stopped = False
log.debug('Stomp server started')
def stop(self):
log.debug('Stopping test server')
if self.conn:
try:
self.conn.shutdown(socket.SHUT_WR)
except Exception:
pass
self.conn.close()
if self.s:
self.s.close()
self.running = False
self.conn = None
self.s = None
self.stopped = True
log.debug('Connection stopped')
def get_next_frame(self):
if len(self.frames) > 0:
rtn = self.frames[0]
del self.frames[0]
return rtn
else:
return ''
def add_frame(self, frame):
self.frames.append(frame)
def run(self):
self.conn, addr = self.s.accept()
while self.running:
try:
data = self.conn.recv(1024)
frame = self.get_next_frame()
if self.conn is None:
break
if frame is not None:
self.conn.send(encode(frame))
except Exception:
_, e, _ = sys.exc_info()
log.debug(e)
break
try:
self.conn.close()
except:
pass
self.stopped = True
log.debug('Run loop completed')
|
Python
| 0
|
@@ -7,33 +7,8 @@
os%0A
-import socket%0Aimport sys%0A
impo
@@ -76,16 +76,25 @@
s.py')%0A%0A
+try:%0A
from con
@@ -127,47 +127,89 @@
ser%0A
-%0A%0Afrom stomp import ConnectionListener,
+except ImportError:%0A from ConfigParser import ConfigParser%0A%0A%0Afrom stomp import
Sta
|
221e45828b9cc33d9ae02d08d94dfaa89977d3e7
|
update import_reading for Courtney
|
vehicles/management/commands/import_reading.py
|
vehicles/management/commands/import_reading.py
|
from ciso8601 import parse_datetime
from django.utils.timezone import make_aware
from django.contrib.gis.geos import Point
from busstops.models import Service
from ...models import VehicleLocation, VehicleJourney
from ..import_live_vehicles import ImportLiveVehiclesCommand
class Command(ImportLiveVehiclesCommand):
url = 'http://rtl2.ods-live.co.uk/api/vehiclePositions'
source_name = 'Reading'
services = Service.objects.filter(operator__in=('RBUS', 'GLRB', 'KENN', 'NADS', 'THVB'), current=True)
@staticmethod
def get_datetime(item):
return make_aware(parse_datetime(item['observed']))
def get_vehicle(self, item):
vehicle = item['vehicle']
defaults = {
'source': self.source
}
if vehicle.isdigit():
defaults['fleet_number'] = vehicle
return self.vehicles.get_or_create(
defaults,
operator_id='RBUS',
code=vehicle
)
def get_journey(self, item, vehicle):
journey = VehicleJourney()
journey.route_name = item['service']
latest_journey = vehicle.latest_location and vehicle.latest_location.journey
if latest_journey and latest_journey.service and latest_journey.route_name == journey.route_name:
journey.service = latest_journey.service
elif journey.route_name:
try:
journey.service = self.get_service(
self.services.filter(line_name__iexact=journey.route_name),
Point(float(item['longitude']), float(item['latitude']))
)
except Service.DoesNotExist:
pass
if not journey.service:
print(item)
return journey
def create_vehicle_location(self, item):
return VehicleLocation(
latlong=Point(float(item['longitude']), float(item['latitude'])),
heading=item['bearing'] or None
)
|
Python
| 0
|
@@ -489,16 +489,24 @@
, 'THVB'
+, 'CTNY'
), curre
@@ -743,16 +743,51 @@
f.source
+,%0A 'operator_id': 'RBUS'
%0A
@@ -959,15 +959,29 @@
r_id
-='RBUS'
+__in=('RBUS', 'CTNY')
,%0A
|
08b3c7fb0577ed38dea3f427ffcc8ebc1faf2ca0
|
Update config.py
|
src/dogecoinrpc/config.py
|
src/dogecoinrpc/config.py
|
# Copyright (c) 2010 Witchspace <witchspace81@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
Utilities for reading dogecoin configuration files.
"""
def read_config_file(filename):
"""
Read a simple ``'='``-delimited config file.
Raises :const:`IOError` if unable to open file, or :const:`ValueError`
if an parse error occurs.
"""
f = open(filename)
try:
cfg = {}
for line in f:
line = line.strip()
if line and not line.startswith("#"):
try:
(key, value) = line.split('=', 1)
cfg[key] = value
except ValueError:
pass # Happens when line has no '=', ignore
finally:
f.close()
return cfg
def read_default_config(filename=None):
"""
Read dogecoin default configuration from the current user's home directory.
Arguments:
- `filename`: Path to a configuration file in a non-standard location (optional)
"""
if filename is None:
import os
import platform
home = os.getenv("HOME")
if not home:
raise IOError("Home directory not defined, don't know where to look for config file")
if platform.system() == "Darwin":
location = 'Library/Application Support/Bitcoin/dogecoin.conf'
elif platform.system() in ('Windows', 'Microsoft'):
location = 'AppData\\Roaming\\DogeCoin\\dogecoin.conf'
else:
location = '.dogecoin/dogecoin.conf'
filename = os.path.join(home, location)
elif filename.startswith("~"):
import os
filename = os.path.expanduser(filename)
try:
return read_config_file(filename)
except (IOError, ValueError):
pass # Cannot read config file, ignore
|
Python
| 0
|
@@ -2341,11 +2341,12 @@
ort/
-Bit
+Doge
coin
|
63c8f8702e042d9cb3358ba96bf453e6971ad0b2
|
update basemap for changes in error map from seqspace
|
epistasis/models/base.py
|
epistasis/models/base.py
|
__doc__ = """
Base class for epistasis models. This is meant to be called in a subclass.
"""
import numpy as np
import itertools as it
from collections import OrderedDict
# imports from seqspace dependency
from seqspace.utils import farthest_genotype, binary_mutations_map
# Local imports
from epistasis.utils import epistatic_order_indices
from epistasis.mapping.epistasis import EpistasisMap
class BaseModel(EpistasisMap):
def __init__(self, wildtype, genotypes, phenotypes, errors=None, log_transform=False, mutations=None):
""" Populate an Epistasis mapping object.
__Arguments__:
`wildtype` [str] : Wildtype genotype. Wildtype phenotype will be used as reference state.
`genotypes` [array-like, dtype=str] : Genotypes in map. Can be binary strings, or not.
`phenotypes` [array-like] : Quantitative phenotype values
`errors` [array-like] : List of phenotype errors.
`log_transform` [bool] : If True, log transform the phenotypes.
`mutations` [dict]: Mapping dictionary for mutations at each site.
"""
# Defaults to binary mapping if not specific mutations are named
if mutations is None:
mutant = farthest_genotype(wildtype, genotypes)
mutations = binary_mutations_map(wildtype, mutant)
super(BaseModel, self).__init__(wildtype, genotypes, phenotypes, errors=errors, log_transform=log_transform, mutations=mutations)
# Construct a binary representation of the map (method inherited from parent class)
# and make it a subclass of the model.
self._construct_binary()
# Model error if given.
if errors is not None:
self.errors = errors
# ---------------------------------------------------------------------------------
# Loading method
# ---------------------------------------------------------------------------------
@classmethod
def from_gpm(cls, gpm, **kwargs):
""" Initialize an epistasis model from a Genotype-phenotype map object """
# Grab each property from map
model = cls(gpm.wildtype,
gpm.genotypes,
gpm.phenotypes,
mutations = gpm.mutations,
log_transform= False,
**kwargs)
# Set errors outside init to handle log transform
model._errors = gpm.errors
# Might need to change this later (kind of a hack)
model._log_transform = gpm.log_transform
return model
# ---------------------------------------------------------------------------------
# Other methods
# ---------------------------------------------------------------------------------
def get_order(self, order, errors=False, label="genotype"):
""" Return a dict of interactions to values of a given order. """
# get starting index of interactions
if order > self.order:
raise Exception("Order argument is higher than model's order")
# Determine the indices of this order of interactions.
start, stop = epistatic_order_indices(self.length,order)
# Label type.
if label == "genotype":
keys = self.Interactions.genotypes
elif label == "keys":
keys = self.Interactions.keys
else:
raise Exception("Unknown keyword argument for label.")
# Build dictionary of interactions
stuff = OrderedDict(zip(keys[start:stop], self.Interactions.values[start:stop]))
if errors:
errors = OrderedDict(zip(keys[start:stop], self.Interactions.errors[start:stop]))
return stuff, errors
else:
return stuff
def fit(self):
""" Fitting methods for epistasis models. """
raise Exception("""Must be implemented in a subclass.""")
def fit_error(self):
""" Fitting method for errors in the epistatic parameters. """
raise Exception("""Must be implemented in a subclass.""")
|
Python
| 0
|
@@ -2367,16 +2367,20 @@
gpm.
+Raw.
phenotyp
@@ -2376,32 +2376,77 @@
aw.phenotypes, %0A
+ errors = gpm.Raw.errors,%0A
@@ -2515,146 +2515,26 @@
rm=
-False,%0A **kwargs)%0A %0A # Set errors outside init to handle log transform%0A model._errors = gpm.errors
+gpm.log_transform,
%0A
@@ -2538,25 +2538,24 @@
-%0A
# Might
@@ -2550,107 +2550,21 @@
-# Might need to change this later (kind of a hack)%0A model._log_transform = gpm.log_transform
+ **kwargs)
%0A
|
13489726a9b3f9ce9dcd2ff9c3086279db7704fe
|
increment build id
|
esp32/modules/version.py
|
esp32/modules/version.py
|
build = 7
name = "Maffe Maniak"
|
Python
| 0.000001
|
@@ -5,9 +5,9 @@
d =
-7
+8
%0Anam
|
e4c4d78b5f6df13b5dc59114ce78db9725caaa1a
|
Use email.message.Message.get_content_charset (which I just discovered) instead of own implementation
|
mailshareapp/process_emails.py
|
mailshareapp/process_emails.py
|
# License: https://github.com/RobFisher/mailshare/blob/master/LICENSE
import email
import datetime
import warnings
import poll_imap_email
from mailshare.mailshareapp.models import Mail, Addressee
def get_charset(message):
"""
Gets the charset from the Content-Type header from the specified email.message.Message
object. This class has the method get_charset but this returns None.
"""
result = None
t = message.get('Content-Type')
if t:
offset = t.find('charset')
if offset != -1:
offset = offset + len('charset') + 2
charset_len = t[offset:].find('"')
if charset_len != -1:
result = t[offset:offset+charset_len]
return result
def get_plain_body(message):
"""Search all the MIME parts of the email.message.Message and return the plain text body."""
plain_part = None
for part in message.walk():
# for now we assume the first "text/plain" part is what we want
if part.get_content_type() == 'text/plain':
plain_part = part
# we've found it. Get out of here!
break
# settle for the first non-multipart payload in case there is no text/plain,
# but keep looking
if part == None and not part.is_multipart():
plain_part = part
# decode any Base64 and convert to utf-8 if needed
plain_part_payload = None
if plain_part:
plain_part_payload = plain_part.get_payload(decode=True)
charset = get_charset(plain_part)
if charset != None and charset != 'utf-8':
plain_part_payload = plain_part_payload.decode(charset).encode('utf-8')
return plain_part_payload
def get_or_add_addressee(address):
"""
Accepts address fields from an email header of the form 'Name <email@address>'. Looks
up the email address in the Addressee table. If the address does not exist in the table
it is added. In all both cases the matching Addressee object is returned.
Email addresses are considered to be case insensitive for now. While not strictly true,
this seems more useful than the alternative.
"""
(address_name, address_address) = email.utils.parseaddr(address)
addressee_list = Addressee.objects.filter(address__iexact=address_address)
result = None
if len(addressee_list) == 0:
addressee = Addressee(name=address_name, address=address_address)
addressee.save()
result = addressee
else:
result = addressee_list[0]
return result
def datetime_from_email_date(email_date):
"""
Returns a Python datetime object suitable for storing in the database given
the email Date header string. These should comply with this specification:
http://cr.yp.to/immhf/date.html
"""
d = email.utils.parsedate_tz(email_date)
dt = datetime.datetime(*d[0:6])
# now we need to subtract the time zone offset to get a UTC time
tz_offset = datetime.timedelta(seconds=d[9])
dt = dt - tz_offset
return dt
def add_message_to_database(message):
"""Add the message to the database if it is unique according to its Message-ID field."""
message_id = message.get('Message-ID')
matching_messages = Mail.objects.filter(message_id__exact=message_id)
if len(matching_messages) == 0:
m = Mail()
m.sender = get_or_add_addressee(message.get('From'))
m.subject = message.get('Subject')
m.date = datetime_from_email_date(message.get('Date'))
m.message_id = message.get('Message-ID')
m.thread_index = message.get('Thread-Index')
if m.thread_index == None:
m.thread_index = ''
m.body = get_plain_body(message)
with warnings.catch_warnings():
warnings.simplefilter('ignore')
m.save()
addresses = message.get_all('to')
if addresses != None:
for address in message.get_all('to'):
m.to.add(get_or_add_addressee(address))
addresses = message.get_all('cc')
if addresses != None:
for address in message.get_all('cc'):
m.cc.add(get_or_add_addressee(address))
def print_message_headers(message):
"""Given an email.message.Message object, print out some interesting headers."""
print "To: " + message.get('To')
print "From: " + message.get('From')
print "Subject: " + message.get('Subject')
print "Date: " + message.get('Date')
def quick_test():
messages = poll_imap_email.fetch_messages()
for message in messages:
add_message_to_database(message)
if __name__ == '__main__':
messages = poll_imap_email.fetch_messages()
for message in messages:
print_message_headers(message)
plain_body = get_plain_body(message)
print("-----")
print(plain_body)
print("-----")
print("")
|
Python
| 0
|
@@ -195,542 +195,8 @@
ee%0A%0A
-def get_charset(message):%0A %22%22%22%0A Gets the charset from the Content-Type header from the specified email.message.Message%0A object. This class has the method get_charset but this returns None.%0A %22%22%22%0A result = None%0A t = message.get('Content-Type')%0A if t:%0A offset = t.find('charset')%0A if offset != -1:%0A offset = offset + len('charset') + 2%0A charset_len = t%5Boffset:%5D.find('%22')%0A if charset_len != -1:%0A result = t%5Boffset:offset+charset_len%5D%0A return result%0A%0A%0A
def
@@ -978,30 +978,39 @@
t =
-get_charset(plain_part
+plain_part.get_content_charset(
)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.