commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
7f77f822dcf05152634cda771cce934320d23ca1
|
use image data
|
experiments/01-single-dimer/plot_velocity.py
|
experiments/01-single-dimer/plot_velocity.py
|
#!/usr/bin/env python3
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('file', type=str, help='H5MD datafile')
parser.add_argument('--directed', action='store_true')
args = parser.parse_args()
import numpy as np
import h5py
import matplotlib.pyplot as plt
with h5py.File(args.file, 'r') as f:
r = f['particles/dimer/position/value'][...]
r_dt = f['particles/dimer/position/time'][()]
v = f['particles/dimer/velocity/value'][...]
v_dt = f['particles/dimer/velocity/time'][()]
assert abs(r_dt-v_dt) < 1e-12
assert r.shape[1]==2
assert r.shape[2]==3
assert v.shape[1]==2
assert v.shape[2]==3
time = np.arange(r.shape[0])*r_dt
v_com = v.mean(axis=1)
if args.directed:
unit_z = r[:,1,:]-r[:,0,:]
unit_z /= np.sqrt(np.sum(unit_z**2, axis=1)).reshape((-1,1))
vz = np.sum(v_com*unit_z, axis=1)
plt.plot(time, vz)
else:
plt.plot(time, v_com)
plt.show()
|
Python
| 0.000019
|
@@ -412,16 +412,63 @@
e'%5D%5B()%5D%0A
+ im = f%5B'particles/dimer/image/value'%5D%5B...%5D%0A
v =
@@ -557,16 +557,93 @@
me'%5D%5B()%5D
+%0A edges = f%5B'particles/dimer/box/edges'%5D%5B:%5D.reshape((1,-1))%0A%0Ar += edges*im
%0A%0Aassert
|
8fcca42b02653085ae955482d96d68aaac3aa5a6
|
Handle fp.o addresses.
|
fedmsg_meta_fedora_infrastructure/fasshim.py
|
fedmsg_meta_fedora_infrastructure/fasshim.py
|
import collections
import threading
import socket
from hashlib import sha256, md5
_fas_cache = {}
_fas_cache_lock = threading.Lock()
import logging
log = logging.getLogger("moksha.hub")
try:
from six.moves.urllib import parse
except ImportError:
# Really really old 'six' doesn't have this move.. so we fall back to
# python-2 only usage. If we're on an old 'six', then we can assume that
# we must also be on an old Python.
import urllib as parse
def _ordered_query_params(params):
# if OrderedDict is available, preserver order of params
# to make this easily testable on PY3
if hasattr(collections, 'OrderedDict'):
retval = collections.OrderedDict(params)
else:
retval = dict(params)
return retval
def avatar_url(username, size=64, default='retro'):
openid = "http://%s.id.fedoraproject.org/" % username
return avatar_url_from_openid(openid, size, default)
def avatar_url_from_openid(openid, size=64, default='retro', dns=False):
"""
Our own implementation since fas doesn't support this nicely yet.
"""
if dns:
# This makes an extra DNS SRV query, which can slow down our webapps.
# It is necessary for libravatar federation, though.
import libravatar
return libravatar.libravatar_url(
openid=openid,
size=size,
default=default,
)
else:
params = _ordered_query_params([('s', size), ('d', default)])
query = parse.urlencode(params)
hash = sha256(openid.encode('utf-8')).hexdigest()
return "https://seccdn.libravatar.org/avatar/%s?%s" % (hash, query)
def avatar_url_from_email(email, size=64, default='retro', dns=False):
"""
Our own implementation since fas doesn't support this nicely yet.
"""
if dns:
# This makes an extra DNS SRV query, which can slow down our webapps.
# It is necessary for libravatar federation, though.
import libravatar
return libravatar.libravatar_url(
email=email,
size=size,
default=default,
)
else:
params = _ordered_query_params([('s', size), ('d', default)])
query = parse.urlencode(params)
hash = md5(email.encode('utf-8')).hexdigest()
return "https://seccdn.libravatar.org/avatar/%s?%s" % (hash, query)
def make_fas_cache(**config):
global _fas_cache
if _fas_cache:
return _fas_cache
log.warn("No previous fas cache found. Looking to rebuild.")
try:
import fedora.client
import fedora.client.fas2
except ImportError:
log.warn("No python-fedora installed. Not caching fas.")
return {}
if not 'fas_credentials' in config:
log.warn("No fas_credentials found. Not caching fas.")
return {}
creds = config['fas_credentials']
default_url = 'https://admin.fedoraproject.org/accounts/'
fasclient = fedora.client.fas2.AccountSystem(
base_url=creds.get('base_url', default_url),
username=creds['username'],
password=creds['password'],
)
timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(600)
try:
log.info("Downloading FAS cache")
request = fasclient.send_request('/user/list',
req_params={'search': '*'},
auth=True)
except fedora.client.ServerError as e:
log.warning("Failed to download fas cache %r" % e)
return {}
finally:
socket.setdefaulttimeout(timeout)
log.info("Caching necessary user data")
for user in request['people']:
nick = user['ircnick']
if nick:
_fas_cache[nick] = user['username']
email = user['email']
if email:
_fas_cache[email] = user['username']
del request
del fasclient
del fedora.client.fas2
return _fas_cache
def nick2fas(nickname, **config):
log.debug("Acquiring _fas_cache_lock for nicknames.")
with _fas_cache_lock:
log.debug("Got _fas_cache_lock for nicknames.")
fas_cache = make_fas_cache(**config)
result = fas_cache.get(nickname, nickname)
log.debug("Released _fas_cache_lock for nicknames.")
return result
def email2fas(email, **config):
log.debug("Acquiring _fas_cache_lock for emails.")
with _fas_cache_lock:
log.debug("Got _fas_cache_lock for emails.")
fas_cache = make_fas_cache(**config)
result = fas_cache.get(email, email)
log.debug("Released _fas_cache_lock for emails.")
return result
|
Python
| 0
|
@@ -4328,32 +4328,117 @@
ail, **config):%0A
+ if email.endswith('@fedoraproject.org'):%0A return email.rsplit('@', 1)%5B0%5D%0A%0A
log.debug(%22A
|
26802dad0bafe8ac67d96b3c30e414af66cf0997
|
Order by the balance column
|
l10n_br_financial/report/financial_cashflow.py
|
l10n_br_financial/report/financial_cashflow.py
|
# -*- coding: utf-8 -*-
# Copyright 2017 KMEE
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, fields, models, _, tools
from ..models.financial_move_model import (
FINANCIAL_MOVE,
FINANCIAL_STATE,
FINANCIAL_TYPE
)
class FinancialCashflow(models.Model):
_name = 'financial.cashflow'
_auto = False
amount_cumulative_balance = fields.Monetary(
string=u"Balance",
)
amount_debit = fields.Monetary(
string=u"Debit",
)
amount_credit = fields.Monetary(
string=u"Credit",
)
state = fields.Selection(
selection=FINANCIAL_STATE,
string='Status',
)
company_id = fields.Many2one(
comodel_name='res.company',
string=u'Company',
)
currency_id = fields.Many2one(
comodel_name='res.currency',
string='Currency',
)
partner_id = fields.Many2one(
comodel_name='res.partner',
)
document_number = fields.Char(
string=u"Document Nº",
)
document_item = fields.Char(
string=u"Document item",
)
document_date = fields.Date(
string=u"Document date",
)
amount_document = fields.Monetary(
string=u"Document amount",
)
due_date = fields.Date(
string=u"Due date",
)
move_type = fields.Selection(
selection=FINANCIAL_TYPE,
)
business_due_date = fields.Date(
string='Business due date',
)
payment_mode = fields.Many2one(
comodel_name='payment.mode', # FIXME:
)
payment_term = fields.Many2one(
comodel_name='payment.term', # FIXME:
)
@api.model_cr
def init(self):
tools.drop_view_if_exists(self.env.cr, self._table)
self.env.cr.execute("""
CREATE OR REPLACE VIEW financial_cashflow_credit AS
SELECT
financial_move.create_date,
financial_move.id,
financial_move.document_number,
financial_move.document_item,
financial_move.move_type,
financial_move.state,
financial_move.business_due_date,
financial_move.document_date,
financial_move.payment_mode,
financial_move.payment_term,
financial_move.due_date,
financial_move.partner_id,
financial_move.currency_id,
coalesce(financial_move.amount_document, 0)
AS amount_document,
coalesce(financial_move.amount_document, 0)
AS amount_credit,
0 AS amount_debit,
coalesce(financial_move.amount_document, 0)
AS amount_balance
FROM
public.financial_move
WHERE
financial_move.move_type = 'r';
""")
self.env.cr.execute("""
CREATE OR REPLACE VIEW financial_cashflow_debit AS
SELECT
financial_move.create_date,
financial_move.id,
financial_move.document_number,
financial_move.document_item,
financial_move.move_type,
financial_move.state,
financial_move.business_due_date,
financial_move.document_date,
financial_move.payment_mode,
financial_move.payment_term,
financial_move.due_date,
financial_move.partner_id,
financial_move.currency_id,
coalesce(financial_move.amount_document, 0)
AS amount_document,
0 AS amount_credit,
(-1) * coalesce(financial_move.amount_document, 0)
AS amount_debit,
(-1) * coalesce(financial_move.amount_document, 0)
AS amount_balance
FROM
public.financial_move
WHERE
financial_move.move_type = 'p';
""")
self.env.cr.execute("""
CREATE OR REPLACE VIEW financial_cashflow_base AS
SELECT
c.create_date,
c.id,
c.document_number,
c.move_type,
c.state,
c.business_due_date,
c.document_date,
c.payment_mode,
c.payment_term,
c.due_date,
c.partner_id,
c.currency_id,
c.amount_document,
c.amount_credit,
c.amount_debit,
c.amount_balance
FROM
financial_cashflow_credit c
UNION ALL
SELECT
d.create_date,
d.id,
d.document_number,
d.move_type,
d.state,
d.business_due_date,
d.document_date,
d.payment_mode,
d.payment_term,
d.due_date,
d.partner_id,
d.currency_id,
d.amount_document,
d.amount_credit,
d.amount_debit,
d.amount_balance
FROM
financial_cashflow_debit d;
""")
self.env.cr.execute("""
CREATE OR REPLACE VIEW financial_cashflow AS
SELECT
b.create_date,
b.id,
b.document_number,
b.move_type,
b.state,
b.business_due_date,
b.document_date,
b.payment_mode,
b.payment_term,
b.due_date,
b.partner_id,
b.currency_id,
b.amount_document,
b.amount_credit,
b.amount_debit,
b.amount_balance,
SUM(b.amount_balance)
OVER (order by b.due_date, b.id)
AS amount_cumulative_balance
-- aqui deveria haver um campo balance_date ou algo assim
-- que seria a data de crédito/débito efetivo na conta
-- pois boletos e cheques tem data de crédito d+1 ou d+2
-- após o depósito/pagamento. Exemplo:
-- over(order by b.data_credito_debito)
FROM
financial_cashflow_base b;
""")
|
Python
| 0
|
@@ -348,16 +348,96 @@
= False
+%0A # _order = 'amount_cumulative_balance'%0A _order = 'business_due_date, id'
%0A%0A am
@@ -6527,16 +6527,25 @@
er by b.
+business_
due_date
|
8ac7a7924277ef660df92f5f221dc678e4a14d9c
|
allow non-cell regions in ConstantFunctionByRegion.get_constants()
|
sfepy/discrete/functions.py
|
sfepy/discrete/functions.py
|
import numpy as nm
from sfepy.base.base import assert_, OneTypeList, Container, Struct
class Functions(Container):
"""Container to hold all user-defined functions."""
def from_conf(conf):
objs = OneTypeList(Function)
for key, fc in conf.iteritems():
fun = Function(name = fc.name,
function = fc.function,
is_constant = False,
extra_args = {})
objs.append(fun)
obj = Functions(objs)
return obj
from_conf = staticmethod(from_conf)
class Function(Struct):
"""Base class for user-defined functions."""
def __init__(self, name, function, is_constant=False, extra_args=None):
Struct.__init__(self, name = name, function = function,
is_constant = is_constant)
if extra_args is None:
extra_args = {}
self.extra_args = extra_args
def __call__(self, *args, **kwargs):
_kwargs = dict(kwargs)
_kwargs.update(self.extra_args)
return self.function(*args, **_kwargs)
def set_function(self, function, is_constant=False):
self.function = function
self.is_constant = is_constant
def set_extra_args(self, **extra_args):
self.extra_args = extra_args
class ConstantFunction(Function):
"""Function with constant values."""
def __init__(self, values):
"""Make a function out of a dictionary of constant values. When
called with coors argument, the values are repeated for each
coordinate."""
name = '_'.join(['get_constants'] + values.keys())
def get_constants(ts=None, coors=None, mode=None, **kwargs):
out = {}
if mode == 'special':
for key, val in values.iteritems():
if '.' in key:
vkey = key.split('.')[1]
out[vkey] = val
elif (mode == 'qp'):
for key, val in values.iteritems():
if '.' in key: continue
val = nm.array(val, dtype=nm.float64, ndmin=3)
out[key] = nm.tile(val, (coors.shape[0], 1, 1))
elif (mode == 'special_constant') or (mode is None):
for key, val in values.iteritems():
if '.' in key: continue
out[key] = val
else:
raise ValueError('unknown function mode! (%s)' % mode)
return out
Function.__init__(self, name = name, function = get_constants,
is_constant = True)
class ConstantFunctionByRegion(Function):
"""
Function with constant values in regions.
"""
def __init__(self, values):
"""
Make a function out of a dictionary of constant values per region. When
called with coors argument, the values are repeated for each
coordinate in each of the given regions.
"""
name = '_'.join(['get_constants_by_region'] + values.keys())
def get_constants(ts=None, coors=None, mode=None,
term=None, problem=None, **kwargs):
out = {}
if mode == 'qp':
qps = term.get_physical_qps()
assert_(qps.num == coors.shape[0])
for key, val in values.iteritems():
if '.' in key: continue
rval = nm.array(val[val.keys()[0]], dtype=nm.float64,
ndmin=3)
s0 = rval.shape[1:]
matdata = nm.zeros(qps.shape[:2] + s0, dtype=nm.float64)
for rkey, rval in val.iteritems():
region = problem.domain.regions[rkey]
rval = nm.array(rval, dtype=nm.float64, ndmin=3)
ii = term.region.get_cell_indices(region.cells)
matdata[ii] = rval
out[key] = matdata.reshape((-1,) + s0)
return out
Function.__init__(self, name=name, function=get_constants,
is_constant=True)
|
Python
| 0.000004
|
@@ -3889,54 +3889,200 @@
-ii = term.region.get_cell_indices(region.cel
+cells = region.get_cells(true_cells_only=False)%0A ii = term.region.get_cell_indices(cells,%0A true_cells_only=Fa
ls
+e
)%0A
|
5a3a92c67e63ce7196e51555d2e683d746dd81b7
|
Add settings get_defaults() utility
|
virtool/settings.py
|
virtool/settings.py
|
import logging
logger = logging.getLogger(__name__)
SCHEMA = {
# Samples
"sample_group": {
"type": "string",
"default": "none"
},
"sample_group_read": {
"type": "boolean",
"default": True
},
"sample_group_write": {
"type": "boolean",
"default": False
},
"sample_all_read": {
"type": "boolean",
"default": True
},
"sample_all_write": {
"type": "boolean",
"default": False
},
"sample_unique_names": {
"type": "boolean",
"default": True
},
# HMM
"hmm_slug": {
"type": "string",
"default": "virtool/virtool-hmm"
},
"enable_api": {
"type": "boolean",
"default": False
},
# External Services
"enable_sentry": {
"type": "boolean",
"default": True
},
# Software Updates
"software_channel": {
"type": "string",
"default": "stable",
"allowed": [
"stable",
"alpha",
"beta"
]
},
# Accounts
"minimum_password_length": {
"type": "integer",
"default": 8
},
# Reference settings
"default_source_types": {
"type": "list",
"default": [
"isolate",
"strain"
]
}
}
LEGACY_SCHEMA = {
# HTTP Server
"server_host": {
"type": "string",
"default": "localhost"
},
"server_port": {
"type": "integer",
"default": 9950
},
"enable_api": {
"type": "boolean",
"default": False
},
# File paths
"data_path": {
"type": "string",
"default": "data"
},
"watch_path": {
"type": "string",
"default": "watch"
},
# Host resource limits
"proc": {
"type": "integer",
"default": 8
},
"mem": {
"type": "integer",
"default": 16
},
# MongoDB
"db_name": {
"type": "string",
"default": "virtool"
},
"db_host": {
"type": "string",
"default": "localhost"
},
"db_port": {
"type": "integer",
"default": 27017
},
"db_username": {
"type": "string",
"default": ""
},
"db_password": {
"type": "string",
"default": ""
},
"db_use_auth": {
"type": "boolean",
"default": False
},
"db_use_ssl": {
"type": "boolean",
"default": True
},
# HMM
"hmm_slug": {
"type": "string",
"default": "virtool/virtool-hmm"
},
# Jobs
"pathoscope_bowtie_proc": {
"type": "integer",
"default": 8
},
"pathoscope_bowtie_mem": {
"type": "integer",
"default": 16
},
"nuvs_proc": {
"type": "integer",
"default": 8
},
"nuvs_mem": {
"type": "integer",
"default": 16
},
"create_subtraction_proc": {
"type": "integer",
"default": 2
},
"create_subtraction_mem": {
"type": "integer",
"default": 4
},
"build_index_proc": {
"type": "integer",
"default": 2
},
"build_index_mem": {
"type": "integer",
"default": 4
},
# Samples
"sample_group": {
"type": "string",
"default": "none"
},
"sample_group_read": {
"type": "boolean",
"default": True
},
"sample_group_write": {
"type": "boolean",
"default": False
},
"sample_all_read": {
"type": "boolean",
"default": True
},
"sample_all_write": {
"type": "boolean",
"default": False
},
"sample_unique_names": {
"type": "boolean",
"default": True
},
# Proxy
"proxy_address": {
"type": "string",
"default": ""
},
"proxy_enable": {
"type": "boolean",
"default": False
},
"proxy_password": {
"type": "string",
"default": ""
},
"proxy_username": {
"type": "string",
"default": ""
},
"proxy_trust": {
"type": "string",
"default": False
},
# External Services
"enable_sentry": {
"type": "boolean",
"default": True
},
"software_channel": {
"type": "string",
"default": "stable",
"allowed": [
"stable",
"alpha",
"beta"
]
},
# Accounts
"minimum_password_length": {
"type": "integer",
"default": 8
},
# Reference settings
"default_source_types": {
"type": "list",
"default": [
"isolate",
"strain"
]
}
}
|
Python
| 0.000001
|
@@ -4764,28 +4764,109 @@
%22strain%22%0A %5D%0A %7D%0A%7D%0A
+%0A%0Adef get_defaults():%0A return %7Bkey: SCHEMA%5Bkey%5D%5B%22default%22%5D for key in SCHEMA%7D%0A
|
02ae6cadac686c9c601bcad42c6b95b8811c3cfb
|
Add --ip and --port flag to example.
|
examples/commandline.py
|
examples/commandline.py
|
"""commandline.
Usage:
commandline.py lookup --lang=<lang> --word=<preword>
commandline.py -h | --help
Options:
-h --help Show this screen.
--lang=<lang> Language of suggested word.
--word=<preword> Pre-word of suggested word.
"""
import redis
import py_word_suggest
# from config import REDIS_IP
from docopt import docopt
# r = redis.StrictRedis(host='py-word-suggest-redis', port=6379, db=0)
# r = redis.StrictRedis(host=REDIS_IP, port=6379, db=0)
rs = '172.17.0.3'
r = redis.StrictRedis(host=rs, port=6379, db=0)
def main():
try:
obj = py_word_suggest.Selector_redis(r)
except Exception as e:
print("{e} Fail to connect to: {ip}".format(e=e,ip=rs))
exit(1)
arguments = docopt(__doc__, version='commandline 0.0.1')
if arguments['lookup']:
key = 'lang:{l}:gram:2:{w}'.format(l=arguments['--lang'],w=arguments['--word'])
try:
fetch = obj.gen_fetchWords(key)
except Exception as e:
print("{e}".format(e=e))
exit(1)
print("'{w}' has the following suggested words:\n".format(w=arguments['--word']))
print(list(obj.gen_suggestWord(*fetch)))
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -71,16 +71,56 @@
preword%3E
+ %5B--ip=%3Credis-ip%3E%5D %5B--port=%3Credis-port%3E%5D
%0A comma
@@ -167,16 +167,24 @@
elp
+
Show thi
@@ -208,16 +208,24 @@
g=%3Clang%3E
+
Languag
@@ -267,16 +267,18 @@
ord%3E
+
Pre-word
@@ -301,232 +301,470 @@
rd.%0A
-%0A%22%22%22%0Aimport redis%0Aimport py_word_suggest%0A# from config import REDIS_IP%0Afrom docopt import docopt%0A# r = redis.StrictRedis(host='py-word-suggest-redis', port=6379, db=0)%0A# r = redis.StrictRedis(host=REDIS_IP, port=6379, db=0)%0A
+ --ip=%3Crediis-ip%3E Ip of redis server (Default: 172.17.0.3)%0A --port=%3Crediis-port%3E Port of redis server (Default: 6379)%0A%0A%22%22%22%0Aimport redis%0Aimport py_word_suggest%0A# from config import REDIS_IP%0Afrom docopt import docopt%0A%0A%0Adef main():%0A arguments = docopt(__doc__, version='commandline 0.0.1')%0A if arguments%5B'--port'%5D:%0A rp = arguments%5B'--port'%5D%0A else:%0A rp = 6379%0A if arguments%5B'--ip'%5D:%0A rs = arguments%5B'--ip'%5D%0A else:%0A
rs =
@@ -777,16 +777,20 @@
17.0.3'%0A
+
r = redi
@@ -821,20 +821,18 @@
ort=
-6379
+rp
, db=0)%0A
def
@@ -827,28 +827,16 @@
, db=0)%0A
-def main():%0A
try:
@@ -883,20 +883,16 @@
redis(r)
-
%0A exc
@@ -954,16 +954,23 @@
to: %7Bip%7D
+:%7Bport%7D
%22.format
@@ -978,13 +978,23 @@
e=e,
+
ip=rs
+, port=rp
))%0A
@@ -1013,69 +1013,8 @@
1)%0A%0A
- arguments = docopt(__doc__, version='commandline 0.0.1')%0A
@@ -1101,16 +1101,17 @@
-lang'%5D,
+
w=argume
@@ -1172,20 +1172,16 @@
rds(key)
-
%0A exc
|
e9905f9929819ea6bb206a969747435a9c6737d7
|
Return used text and Image object as tuple
|
claptcha/claptcha.py
|
claptcha/claptcha.py
|
# -*- coding: utf-8 -*-
import sys
import os
import random
from functools import wraps
from io import BytesIO
from PIL import Image, ImageDraw, ImageFont
class ClaptchaError(Exception):
"""Exception class for Claptcha errors"""
pass
class Claptcha(object):
def __init__(self, source, font, size=(200,80), margin=(20,20), **kwargs):
self.source = source
self.size = size
self.margin = margin
self.font = font
self.format = kwargs.get('format', 'PNG')
self.resample = kwargs.get('resample', Image.BILINEAR)
@property
def image(self):
text = self.text
w, h = self.font.getsize(text)
margin_x = round(self.margin_x * w / self.w)
margin_y = round(self.margin_y * h / self.h)
image = Image.new('RGB',
(w + 2*margin_x, h + 2*margin_y),
(255, 255, 255))
# Text
self._writeText(image, text, pos=(margin_x, margin_y))
# Line
self._drawLine(image)
return image
@property
def bytes(self):
bytes = BytesIO()
self.image.save(bytes, format = self.format)
bytes.seek(0)
return bytes
def write(self, file):
self.image.save(file, format = self.format)
@property
def source(self):
return self.__source
@source.setter
def source(self, source):
if not (isinstance(source, str) or callable(source)):
raise ClaptchaError("source has to be either a string or be callable")
self.__source = source
@property
def text(self):
if isinstance(self.source, str):
return self.source
else:
return self.source()
def _with_pair_validator(func):
@wraps(func)
def wrapper(inst, pair):
if not (hasattr(pair, '__len__') and hasattr(pair, '__getitem__')):
raise ClaptchaError("Sequence not provided")
if len(pair) != 2:
raise ClaptchaError("Sequence has to have exactly 2 elements")
return func(inst, pair)
return wrapper
@property
def size(self):
return self.__size
@size.setter
@_with_pair_validator
def size(self, size):
self.__size = (int(size[0]), int(size[1]))
@property
def w(self):
return self.size[0]
@property
def h(self):
return self.size[1]
@property
def margin(self):
return self.__margin
@margin.setter
@_with_pair_validator
def margin(self, margin):
self.__margin = (int(margin[0]), int(margin[1]))
@property
def margin_x(self):
return self.__margin[0]
@property
def margin_y(self):
return self.__margin[1]
def _with_file_validator(func):
@wraps(func)
def wrapper(inst, file):
if not isinstance(file, ImageFont.ImageFont):
if not os.path.exists(file):
raise ClaptchaError("%s doesn't exist" % (file,))
if not os.path.isfile(file):
raise ClaptchaError("%s is not a file" % (file,))
return func(inst, file)
return wrapper
@property
def font(self):
return self.__font
@font.setter
@_with_file_validator
def font(self, font):
if isinstance(font, ImageFont.ImageFont):
self.__font = font
else:
fontsize = self.h - 2 * self.margin_x
self.__font = ImageFont.truetype(font, fontsize)
def _writeText(self, image, text, pos):
offset = 0
x,y = pos
for c in text:
# Write letter
c_size = self.font.getsize(c)
c_image = Image.new('RGBA', c_size, (0,0,0,0))
c_draw = ImageDraw.Draw(c_image)
c_draw.text((0, 0), c, font=self.font, fill=(0,0,0,255))
# Transform
c_image = self._rndLetterTransform(c_image)
# Paste onto image
image.paste(c_image, (x+offset, y), c_image)
offset += c_size[0]
def _drawLine(self, image):
w,h = image.size
w *= 5
h *= 5
l_image = Image.new('RGBA', (w,h), (0,0,0,0))
l_draw = ImageDraw.Draw(l_image)
x1 = int(w * random.uniform(0, 0.1))
y1 = int(h * random.uniform(0, 1))
x2 = int(w * random.uniform(0.9, 1))
y2 = int(h * random.uniform(0, 1))
# Draw
l_draw.line(((x1, y1), (x2, y2)), fill=(0, 0, 0, 255), width=12)
# Transform
l_image = self._rndLineTransform(l_image)
l_image = l_image.resize(image.size, resample=self.resample)
# Paste onto image
image.paste(l_image, (0,0), l_image)
def _rndLetterTransform(self, image):
w,h = image.size
dx = w * random.uniform(0.2, 0.7)
dy = h * random.uniform(0.2, 0.7)
x1, y1 = self.__class__._rndPointDisposition(dx, dy)
x2, y2 = self.__class__._rndPointDisposition(dx, dy)
w += abs(x1) + abs(x2)
h += abs(x1) + abs(x2)
quad = self.__class__._quadPoints((w,h), (x1,y1), (x2,y2))
return image.transform(image.size, Image.QUAD,
data=quad, resample=self.resample)
def _rndLineTransform(self, image):
w,h = image.size
dx = w * random.uniform(0.2, 0.5)
dy = h * random.uniform(0.2, 0.5)
x1, y1 = [abs(z) for z in self.__class__._rndPointDisposition(dx, dy)]
x2, y2 = [abs(z) for z in self.__class__._rndPointDisposition(dx, dy)]
quad = self.__class__._quadPoints((w,h), (x1,y1), (x2,y2))
return image.transform(image.size, Image.QUAD,
data=quad, resample=self.resample)
@staticmethod
def _rndPointDisposition(dx, dy):
x = int(random.uniform(-dx, dx))
y = int(random.uniform(-dy, dy))
return (x,y)
@staticmethod
def _quadPoints(size, disp1, disp2):
w,h = size
x1,y1 = disp1
x2,y2 = disp2
return (
x1, -y1,
-x1, h + y2,
w + x2, h - y2,
w - x2, y1
)
|
Python
| 0.999863
|
@@ -1047,22 +1047,30 @@
return
+ (text,
image
+)
%0A%0A @p
|
ca1fa126611f958ccd893f034df74e8821f41771
|
update prototype
|
hardware/prototype/read_serial.py
|
hardware/prototype/read_serial.py
|
__author__ = 'sachinpatney'
import serial
import time
import binascii
ser = serial.Serial('/dev/ttyUSB0', baudrate=9600, timeout=1.0)
s = b''
def do(cmd):
if cmd == 'play':
print('Playing music ...')
while True:
bytesToRead = ser.inWaiting()
if bytesToRead > 0:
s += ser.read()
if binascii.hexlify(s) == b'7e':
w = ser.read(2)
s += w
l = int(binascii.hexlify(w), 16)
s += ser.read(l + 2)
s = binascii.hexlify(s)
if s[6:8] == b'90':
data = s[32:-2]
data = binascii.unhexlify(data).decode('utf-8')
print(data)
do(data.strip())
else:
s = b''
else:
s = b''
time.sleep(0.3)
|
Python
| 0
|
@@ -716,16 +716,53 @@
t(data)%0A
+ print(len(data))%0A
|
8f01b5901034cbfa5ff5170a64e6d19e1f49512e
|
fix AutoLSTM test after merge (#3988)
|
pyzoo/test/zoo/zouwu/autots/model/test_auto_lstm.py
|
pyzoo/test/zoo/zouwu/autots/model/test_auto_lstm.py
|
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from torch.utils.data import Dataset, DataLoader
import torch
import numpy as np
from unittest import TestCase
import pytest
from zoo.zouwu.autots.model.auto_lstm import AutoLSTM
from zoo.orca.automl import hp
def get_x_y(size):
input_feature_dim = 10
output_feature_dim = 2
past_seq_len = 5
future_seq_len = 1
x = np.random.randn(size, past_seq_len, input_feature_dim)
y = np.random.randn(size, future_seq_len, output_feature_dim)
return x, y
class RandomDataset(Dataset):
def __init__(self, size=1000):
x, y = get_x_y(size)
self.x = torch.from_numpy(x).float()
self.y = torch.from_numpy(y).float()
def __len__(self):
return self.x.shape[0]
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
def train_dataloader_creator(config):
return DataLoader(RandomDataset(size=1000),
batch_size=config["batch_size"],
shuffle=True)
def valid_dataloader_creator(config):
return DataLoader(RandomDataset(size=400),
batch_size=config["batch_size"],
shuffle=True)
class TestAutoLSTM(TestCase):
def setUp(self) -> None:
from zoo.orca import init_orca_context
init_orca_context(cores=8, init_ray_on_spark=True)
def tearDown(self) -> None:
from zoo.orca import stop_orca_context
stop_orca_context()
def test_fit_np(self):
auto_lstm = AutoLSTM(input_feature_num=10,
output_target_num=2,
optimizer=torch.optim.Adam,
loss=torch.nn.MSELoss(),
metric="mse",
hidden_dim=hp.grid_search([32, 64]),
layer_num=hp.randint(1, 3),
lr=hp.choice([0.001, 0.003, 0.01]),
dropout=hp.uniform(0.1, 0.2),
logs_dir="/tmp/auto_lstm",
cpus_per_trial=2,
name="auto_lstm")
auto_lstm.fit(data=get_x_y(size=1000),
epochs=1,
batch_size=hp.choice([32, 64]),
validation_data=get_x_y(size=400),
n_sampling=1,
)
best_model = auto_lstm.get_best_model()
assert 0.1 <= best_model.config['dropout'] <= 0.2
assert best_model.config['batch_size'] in (32, 64)
assert 1 <= best_model.config['layer_num'] < 3
def test_fit_data_creator(self):
auto_lstm = AutoLSTM(input_feature_num=10,
output_target_num=2,
optimizer=torch.optim.Adam,
loss=torch.nn.MSELoss(),
metric="mse",
hidden_dim=hp.grid_search([32, 64]),
layer_num=hp.randint(1, 3),
lr=hp.choice([0.001, 0.003, 0.01]),
dropout=hp.uniform(0.1, 0.2),
logs_dir="/tmp/auto_lstm",
cpus_per_trial=2,
name="auto_lstm")
auto_lstm.fit(data=train_dataloader_creator,
epochs=1,
batch_size=hp.choice([32, 64]),
validation_data=valid_dataloader_creator,
n_sampling=1,
)
best_model = auto_lstm.get_best_model()
assert 0.1 <= best_model.config['dropout'] <= 0.2
assert best_model.config['batch_size'] in (32, 64)
assert 1 <= best_model.config['layer_num'] < 3
if __name__ == "__main__":
pytest.main([__file__])
|
Python
| 0
|
@@ -798,32 +798,8 @@
hp%0A%0A
-%0Adef get_x_y(size):%0A
inpu
@@ -817,20 +817,16 @@
im = 10%0A
-
output_f
@@ -840,20 +840,16 @@
dim = 2%0A
-
past_seq
@@ -857,20 +857,16 @@
len = 5%0A
-
future_s
@@ -876,16 +876,37 @@
len = 1%0A
+%0A%0Adef get_x_y(size):%0A
x =
@@ -2055,34 +2055,49 @@
put_feature_num=
-10
+input_feature_dim
,%0A
@@ -2121,33 +2121,50 @@
tput_target_num=
-2
+output_feature_dim
,%0A
@@ -2188,32 +2188,22 @@
timizer=
-torch.optim.
+'
Adam
+'
,%0A
@@ -3245,18 +3245,33 @@
ure_num=
-10
+input_feature_dim
,%0A
@@ -3311,17 +3311,34 @@
get_num=
-2
+output_feature_dim
,%0A
@@ -3374,24 +3374,14 @@
zer=
-torch.optim.
+'
Adam
+'
,%0A
|
2846b996783b896f69ec0870569c7e442ddcc652
|
fix imports, arguments
|
sfepy/terms/terms_fibres.py
|
sfepy/terms/terms_fibres.py
|
from sfepy.terms.terms import *
from sfepy.terms.terms_hyperelastic_tl import HyperElasticTLBase
from sfepy.homogenization.utils import iter_sym
class FibresActiveTLTerm(HyperElasticTLBase):
r""":description: Hyperelastic active fibres term. Effective stress $S_{ij} =
A f_{\rm max} \exp{-(\frac{\epsilon - \varepsilon_{\rm opt}}{s})^2}$, where
$\epsilon = E_{ij} d_i d_j$ is the Green strain $\ull{E}$ projected to the
fibre direction $\ul{d}$.
:definition:
$\int_{\Omega} S_{ij}(\ul{u}) \delta E_{ij}(\ul{u};\ul{v})$ """
name = 'dw_tl_fib_a'
arg_types = ('material_1', 'material_2', 'material_3',
'material_4', 'material_5', 'virtual', 'state')
geometry = [(Volume, 'virtual')]
family_data_names = ['E']
def compute_crt_data( self, family_data, ap, vg, mode, **kwargs ):
pars = self.get_args(['material_1', 'material_2', 'material_3',
'material_4', 'material_5'], **kwargs)
fmax, eps_opt, s, fdir, act = pars
strainE = family_data[0]
eps = nm.zeros_like(fmax)
omega = nm.empty_like(strainE)
for ii, (ir, ic) in enumerate(iter_sym(fdir.shape[2])):
omega[...,ii,0] = fdir[...,ir,0] * fdir[...,ic,0]
eps[...,0,0] += omega[...,ii,0] * strainE[...,ii,0]
tau = act * fmax * nm.exp(-((eps - eps_opt) / s)**2.0)
if mode == 0:
out = omega * tau
else:
shape = list(strainE.shape)
shape[-1] = shape[-2]
out = nm.empty(shape, dtype=nm.float64)
for ir in range(omega.shape[2]):
for ic in range(omega.shape[2]):
out[...,ir,ic] = omega[...,ir,0] * omega[...,ic,0]
out[:] *= -2.0 * ((eps - eps_opt) / (s**2.0)) * tau
return out
|
Python
| 0.000001
|
@@ -25,16 +25,64 @@
mport *%0A
+from sfepy.terms.terms_base import VectorVector%0A
from sfe
@@ -212,16 +212,30 @@
eTLTerm(
+VectorVector,
HyperEla
@@ -872,16 +872,8 @@
ata,
- ap, vg,
mod
|
75dd4dece168fbe38609fc0ec4843f7563075a80
|
Bump release
|
doc/source/conf.py
|
doc/source/conf.py
|
# -*- coding: utf-8 -*-
#
# ipcalc documentation build configuration file, created by
# sphinx-quickstart on Sat Apr 11 00:06:38 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ipcalc'
copyright = u'2009, Wijnand Modderman'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.5'
# The full version, including alpha/beta/rc tags.
release = '0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'ipcalcdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ipcalc.tex', u'ipcalc Documentation',
u'Wijnand Modderman', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
Python
| 0
|
@@ -1698,17 +1698,17 @@
se = '0.
-4
+5
'%0A%0A# The
|
37e549e34a20eb6dd14be305a7efff5d2a7832f5
|
Create SetupDSRCMigrate Step
|
dbaas/workflow/steps/util/foreman.py
|
dbaas/workflow/steps/util/foreman.py
|
from dbaas_credentials.models import CredentialType
from dbaas_foreman import get_foreman_provider
from physical.models import Vip
from workflow.steps.util.base import VipProviderClient
from util import exec_remote_command_host, get_or_none_credentials_for
from base import BaseInstanceStep
class Foreman(BaseInstanceStep):
def __init__(self, instance):
super(Foreman, self).__init__(instance)
self.credentials = get_or_none_credentials_for(
self.environment, CredentialType.FOREMAN
)
self._provider = None
@property
def provider(self):
if self._provider is None:
self._provider = get_foreman_provider(self.infra, self.credentials)
return self._provider
@property
def fqdn(self):
output = {}
script = 'hostname -f'
exec_remote_command_host(self.host, script, output)
return output['stdout'][0].strip()
@property
def reverse_ip(self):
output = {}
script = 'nslookup {}'.format(self.host.address)
exec_remote_command_host(self.host, script, output)
ret = ''.join(output['stdout'])
if 'name = ' not in ret:
return None
return ret.split('name = ')[1].split('.\n')[0]
def is_valid(self):
return self.credentials is not None
def do(self):
raise NotImplementedError
def undo(self):
pass
class SetupDSRC(Foreman):
def __unicode__(self):
return "Foreman registering DSRC class..."
def do(self):
if not self.is_valid:
return
vip_identifier = Vip.objects.get(infra=self.infra).identifier
client = VipProviderClient(self.infra.environment)
vip = client.get_vip(vip_identifier)
self.provider.setup_database_dscp(
self.fqdn, vip.vip_ip, vip.dscp, self.instance.port
)
class DeleteHost(Foreman):
def __unicode__(self):
return "Foreman removing host..."
def do(self):
if not self.is_valid:
return
fqdn = self.fqdn
hostname = self.host.hostname
reverse_ip = self.reverse_ip
self.provider.delete_host(fqdn)
self.provider.delete_host(hostname)
if reverse_ip:
if reverse_ip.split('.')[0] == hostname.split('.')[0]:
self.provider.delete_host(reverse_ip)
|
Python
| 0
|
@@ -1599,290 +1599,258 @@
-vip_identifier = Vip.objects.get(infra=self.infra).identifier%0A client = VipProviderClient(self.infra.environment)%0A vip = client.get_vip(vip_identifier)%0A%0A self.provider.setup_database_dscp(%0A self.fqdn, vip.vip_ip, vip.dscp, self.instance.port%0A
+self.provider.setup_database_dscp(%0A self.fqdn, self.vip.vip_ip, self.vip.dscp, self.instance.port%0A )%0A%0A%0Aclass SetupDSRCMigrate(SetupDSRC):%0A def do(self):%0A self.vip = self.future_vip%0A super(SetupDSRCMigrate, self).do(
)%0A%0A%0A
|
59d4ed667259607a9e6e8bdfe66388a511422379
|
update iuwandbox test
|
tools/wandbox/tests/test_iuwandbox.py
|
tools/wandbox/tests/test_iuwandbox.py
|
#!/usr/bin/env python
#
# test_iuwandbox.py
#
from __future__ import print_function
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../fused')
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../../wandbox')
try:
import unittest2 as unittest
except:
import unittest
import iuwandbox
import fused_iutest_files
import shutil
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
root = os.path.normpath(os.path.dirname(os.path.abspath(__file__)) + '/../../../')
fused_src = root + '/fused-src'
test_src = root + '/test/syntax_tests.cpp'
test_opt_nomain = [ '--encoding', 'utf-8-sig' ]
test_opt = [ '--encoding', 'utf-8-sig', '-f"-DIUTEST_USE_MAIN"' ]
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
class iuwandbox_test_base(unittest.TestCase):
dir = None
def setUp(self):
self.capture = StringIO()
sys.stdout = self.capture
self.dir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
return super(iuwandbox_test_base, self).setUp()
def tearDown(self):
sys.stdout = sys.__stdout__
os.chdir(self.dir)
self.capture.close()
return super(iuwandbox_test_base, self).tearDown()
def dump(self):
value = self.capture.getvalue()
eprint(value)
class nofused_iuwandbox_test(iuwandbox_test_base):
def setUp(self):
if os.path.exists(fused_src):
try:
shutil.rmtree(fused_src)
except:
pass
if os.path.exists(fused_src):
self.skipTest('fused-src is exists')
return super(nofused_iuwandbox_test, self).setUp()
def test_nofused(self):
sys.argv[1:] = [ test_src ]
sys.argv.extend(test_opt)
with self.assertRaises(SystemExit) as cm:
iuwandbox.main()
self.dump()
self.assertEqual(cm.exception.code, 1, self.capture.getvalue())
self.assertRegex(self.capture.getvalue(), '.*please try \"make fused\".*')
class iuwandbox_test(iuwandbox_test_base):
def setUp(self):
if not os.path.exists(fused_src):
try:
fused_iutest_files.FusedAll(fused_iutest_files.IUTEST_INCLUDE_DIR, fused_src)
# os.system('python ' + root + '/tools/fused/fused_iutest_files.py ' + fused_src)
except:
pass
if not os.path.exists(fused_src):
self.skipTest('fused-src is not exists')
return super(iuwandbox_test, self).setUp()
def test_nomain(self):
sys.argv[1:] = [ test_src ]
sys.argv.extend(test_opt_nomain)
with self.assertRaises(SystemExit) as cm:
iuwandbox.main()
self.dump()
self.assertEqual(cm.exception.code, 1, self.capture.getvalue())
self.assertRegex(self.capture.getvalue(), '.*hint:.*')
self.assertRegex(self.capture.getvalue(), '.*If you do not use boost test, please specify the file with the main function first..*')
def test_run(self):
sys.argv[1:] = [ test_src ]
sys.argv.extend(test_opt)
with self.assertRaises(SystemExit) as cm:
iuwandbox.main()
self.dump()
self.assertEqual(cm.exception.code, 0, self.capture.getvalue())
self.assertRegex(self.capture.getvalue(), '.*OK.*')
def test_same_filename(self):
sys.argv[1:] = [ 'src/main.cpp', 'src/A/sample.cpp', 'src/B/sample.cpp' ]
sys.argv.extend(test_opt_nomain)
with self.assertRaises(SystemExit) as cm:
iuwandbox.main()
self.dump()
self.assertEqual(cm.exception.code, 0, self.capture.getvalue())
self.assertRegex(self.capture.getvalue(), '.*OK.*')
if __name__ == "__main__":
unittest.main()
|
Python
| 0
|
@@ -1609,33 +1609,65 @@
path.exists(
-fused_src
+os.path.join(fused_src, 'iutest.min.hpp')
):%0A
@@ -2499,33 +2499,65 @@
path.exists(
-fused_src
+os.path.join(fused_src, 'iutest.min.hpp')
):%0A
|
a901ebf403566bdb1ef468b2d528d80f6533ead1
|
Update dragging example for new plugin architecture
|
examples/drag_points.py
|
examples/drag_points.py
|
"""
Draggable Points Example
========================
This example shows how a D3 plugin can be created to make plot elements
draggable. A stopPropagation command is used to allow the drag behavior
and pan/zoom behavior to work in tandem.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import mpld3
from mpld3 import plugins, utils
class DragPlugin(plugins.PluginBase):
JAVASCRIPT = r"""
var DragPlugin = function(fig, prop){
this.fig = fig;
this.prop = mpld3.process_props(this, prop, {}, ["id"]);
mpld3.insert_css("#" + fig.figid + " path.dragging",
{"fill-opacity": "1.0 !important",
"stroke-opacity": "1.0 !important"});
}
DragPlugin.prototype.draw = function(){
var obj = mpld3.get_element(this.prop.id);
var drag = d3.behavior.drag()
.origin(function(d) { return {x:obj.ax.x(d[0]),
y:obj.ax.y(d[1])}; })
.on("dragstart", dragstarted)
.on("drag", dragged)
.on("dragend", dragended);
obj.elements()
.data(obj.data)
.style("cursor", "default")
.call(drag);
function dragstarted(d) {
d3.event.sourceEvent.stopPropagation();
d3.select(this).classed("dragging", true);
}
function dragged(d, i) {
d[0] = obj.ax.x.invert(d3.event.x);
d[1] = obj.ax.y.invert(d3.event.y);
d3.select(this)
.attr("transform", "translate(" + [d3.event.x,d3.event.y] + ")");
}
function dragended(d) {
d3.select(this).classed("dragging", false);
}
}
mpld3.register_plugin("drag", DragPlugin);
"""
def __init__(self, points):
if isinstance(points, mpl.lines.Line2D):
suffix = "pts"
else:
suffix = None
self.dict_ = {"type": "drag",
"id": utils.get_id(points, suffix)}
fig, ax = plt.subplots()
np.random.seed(0)
points = ax.plot(np.random.normal(size=20),
np.random.normal(size=20), 'or', alpha=0.5,
markersize=50, markeredgewidth=1)
ax.set_title("Click and Drag", fontsize=18)
plugins.connect(fig, DragPlugin(points[0]))
mpld3.show()
|
Python
| 0
|
@@ -430,132 +430,344 @@
-var DragPlugin = function(fig, prop)%7B%0A this.fig = fig;%0A this.p
+mpld3.register_plugin(%22drag%22, DragPlugin);%0A DragPlugin.prototype = Object.create(mpld3.Plugin.prototype);%0A DragPlugin.prototype.constructor = DragPlugin;%0A DragPlugin.prototype.requiredP
rop
+s
=
-mpld3.process_props(this, prop, %7B%7D, %5B%22id%22%5D);%0A%0A
+%5B%22id%22%5D;%0A DragPlugin.prototype.defaultProps = %7B%7D%0A function DragPlugin(fig, props)%7B%0A mpld3.Plugin.call(this, fig, props);%0A
@@ -817,24 +817,26 @@
.dragging%22,%0A
+
@@ -905,16 +905,18 @@
+
%22stroke-
@@ -946,24 +946,25 @@
nt%22%7D);%0A %7D
+;
%0A%0A DragPl
@@ -1042,16 +1042,17 @@
his.prop
+s
.id);%0A%0A
@@ -1371,20 +1371,23 @@
ata(obj.
-data
+offsets
)%0A
@@ -1937,56 +1937,8 @@
%7D
-%0A%0A mpld3.register_plugin(%22drag%22, DragPlugin);
%0A
|
207dd3b5f59ecc66c896cc0f3ad90e283fca5145
|
refactor the registration process
|
src/app.py
|
src/app.py
|
"""The netify application object."""
# Copyright 2015 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from flask import Flask
class NetifyApp(object):
"""The Netify Application object."""
flask_app = None
def __init__(self, config=None):
if self.flask_app is None:
self.__class__.flask_app = Flask(__name__)
if config and self.flask_app:
config.update_flask(self.flask_app)
def register_views(self, views):
"""Register the view classes against the flask app."""
for view in views:
view.register(self.flask_app)
def run(self, host=None, port=None, debug=None):
"""Run the Flask Server."""
self.flask_app.run(host, port, debug)
|
Python
| 0
|
@@ -790,16 +790,35 @@
is None:
+ # First time init
%0A
@@ -877,37 +877,98 @@
-if config and self.flask_app:
+ self.registered_views = %5B%5D%0A if config:%0A self.config = config
%0A
@@ -976,16 +976,25 @@
+ self.
config.u
@@ -1122,79 +1122,793 @@
app.
-%22%22%22%0A for view in views:%0A view.register(self.flask_app
+%0A%0A The %22Method%22 name registered in the Flask app is the %22name%22 field for%0A each View class.%0A %22%22%22%0A view_config = self.config.netify_views%0A enabled = %5Bname.strip() for name in view_config%5B'enabled'%5D.split(',')%5D%0A for view in views:%0A view_cls = view.value%0A if view.name in enabled:%0A if view_cls.name in self.registered_views:%0A self.flask_app.logger.warning(%0A 'Not Registering view %25s. A view has already '%0A 'been registered for %25s.' %25 (view.name, view_cls.name))%0A view_opts = self.config.get_page_options(view_cls.name)%0A view_cls.register(self, **view_opts)%0A self.registered_views.append(view_cls.name
)%0A%0A
|
67f56e977b3f94e8b1c22ea2070b417a359cd5bf
|
remove misleading comments about not existing features of C*
|
feedly/storage/cassandra/timeline_storage.py
|
feedly/storage/cassandra/timeline_storage.py
|
from cqlengine import BatchQuery
from feedly.storage.base import BaseTimelineStorage
from feedly.storage.cassandra import models
from feedly.serializers.cassandra.activity_serializer import CassandraActivitySerializer
import logging
logger = logging.getLogger(__name__)
class CassandraTimelineStorage(BaseTimelineStorage):
"""
A feed timeline implementation that uses Apache Cassandra as
backend storage.
CQL is used to access the data stored on cassandra via the ORM
library cqlengine.
"""
from feedly.storage.cassandra.connection import setup_connection
setup_connection()
default_serializer_class = CassandraActivitySerializer
base_model = models.Activity
insert_batch_size = 100
def __init__(self, serializer_class=None, **options):
self.column_family_name = options.pop('column_family_name')
super(CassandraTimelineStorage, self).__init__(
serializer_class, **options)
self.model = self.get_model(self.base_model, self.column_family_name)
@classmethod
def get_model(cls, base_model, column_family_name):
'''
Creates an instance of the base model with the table_name (column family name)
set to column family name
:param base_model: the model to extend from
:param column_family_name: the name of the column family
'''
camel_case = ''.join([s.capitalize()
for s in column_family_name.split('_')])
class_name = '%sFeedModel' % camel_case
return type(class_name, (base_model,), {'__table_name__': column_family_name})
@property
def serializer(self):
'''
Returns an instance of the serializer class
'''
return self.serializer_class(self.model)
def get_batch_interface(self):
return BatchQuery()
def contains(self, key, activity_id):
return self.model.objects.filter(feed_id=key, activity_id=activity_id).count() > 0
def index_of(self, key, activity_id):
if not self.contains(key, activity_id):
raise ValueError
return len(self.model.objects.filter(feed_id=key, activity_id__gt=activity_id).values_list('feed_id'))
def get_nth_item(self, key, index):
return self.model.objects.filter(feed_id=key).order_by('-activity_id')[index]
def get_slice_from_storage(self, key, start, stop, filter_kwargs=None):
'''
:returns list: Returns a list with tuples of key,value pairs
'''
results = []
limit = 10 ** 6
query = self.model.objects.filter(feed_id=key)
if filter_kwargs:
query = query.filter(**filter_kwargs)
if start not in (0, None):
offset_activity_id = self.get_nth_item(key, start)
query = query.filter(
activity_id__lte=offset_activity_id.activity_id)
if stop is not None:
limit = (stop - (start or 0))
for activity in query.order_by('-activity_id')[:limit]:
results.append([activity.activity_id, activity])
return results
def add_to_storage(self, key, activities, batch_interface=None, *args, **kwargs):
'''
Adds the activities to the feed on the given key
(The serialization is done by the serializer class)
:param key: the key at which the feed is stored
:param activities: the activities which to store
To keep inserts fast we use cqlengine's batch_insert which uses
prepared batches and ignore the passed batch_interface
'''
if batch_interface is not None:
logger.info(
'%r.add_to_storage batch_interface was ignored' % self.__class__)
for model_instance in activities.values():
model_instance.feed_id = str(key)
self.model.objects.batch_insert(
activities.values(), batch_size=self.insert_batch_size, atomic=False)
def remove_from_storage(self, key, activities, batch_interface=None, *args, **kwargs):
'''
Deletes multiple activities from storage
Unfortunately CQL 3.0 does not support the IN operator inside DELETE query's where-clause
for that reason we are going to create 1 query per activity
With cassandra >= 2.0 is possible to do this in one single query
example:
self.model.objects.filter(feed_id=key, activity_id__in=[a.id for a in activities]).delete()
'''
batch = batch_interface or BatchQuery()
for activity_id in activities.keys():
self.model(feed_id=key, activity_id=activity_id).batch(
batch).delete()
if batch_interface is None:
batch.execute()
def count(self, key, *args, **kwargs):
return self.model.objects.filter(feed_id=key).count()
def delete(self, key, *args, **kwargs):
self.model.objects.filter(feed_id=key).delete()
def trim(self, key, length, batch_interface=None):
batch = batch_interface or BatchQuery()
last_activity = self.get_slice_from_storage(key, 0, length)[-1]
if last_activity:
for activity in self.model.filter(feed_id=key, activity_id__lt=last_activity[0]):
activity.batch(batch).delete()
if batch_interface is None:
batch.execute()
|
Python
| 0
|
@@ -4048,440 +4048,8 @@
s):%0A
- '''%0A Deletes multiple activities from storage%0A Unfortunately CQL 3.0 does not support the IN operator inside DELETE query's where-clause%0A for that reason we are going to create 1 query per activity%0A%0A With cassandra %3E= 2.0 is possible to do this in one single query%0A%0A example:%0A self.model.objects.filter(feed_id=key, activity_id__in=%5Ba.id for a in activities%5D).delete()%0A%0A '''%0A
|
12720a225210afe73f301c2112f0a7ddcdc41bcb
|
Fix CSR loading in ACME example client script.
|
examples/acme_client.py
|
examples/acme_client.py
|
"""Example script showing how to use acme client API."""
import logging
import os
import pkg_resources
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
import OpenSSL
from acme import client
from acme import messages
from acme import jose
logging.basicConfig(level=logging.DEBUG)
NEW_REG_URL = 'https://www.letsencrypt-demo.org/acme/new-reg'
BITS = 2048 # minimum for Boulder
DOMAIN = 'example1.com' # example.com is ignored by Boulder
# generate_private_key requires cryptography>=0.5
key = jose.JWKRSA(key=rsa.generate_private_key(
public_exponent=65537,
key_size=2048,
backend=default_backend()))
acme = client.Client(NEW_REG_URL, key)
regr = acme.register()
logging.info('Auto-accepting TOS: %s', regr.terms_of_service)
acme.update_registration(regr.update(
body=regr.body.update(agreement=regr.terms_of_service)))
logging.debug(regr)
authzr = acme.request_challenges(
identifier=messages.Identifier(typ=messages.IDENTIFIER_FQDN, value=DOMAIN),
new_authzr_uri=regr.new_authzr_uri)
logging.debug(authzr)
authzr, authzr_response = acme.poll(authzr)
csr = OpenSSL.crypto.load_certificate_request(
OpenSSL.crypto.FILETYPE_ASN1, pkg_resources.resource_string(
'acme.jose', os.path.join('testdata', 'csr.der')))
try:
acme.request_issuance(csr, (authzr,))
except messages.Error as error:
print ("This script is doomed to fail as no authorization "
"challenges are ever solved. Error from server: {0}".format(error))
|
Python
| 0
|
@@ -1276,13 +1276,8 @@
acme
-.jose
', o
|
763ca6b8c359b28f562dcddfd39904d1e59c3a7d
|
update unit test
|
_unittests/ut_files/test_files.py
|
_unittests/ut_files/test_files.py
|
"""
@brief test log(time=12s)
You should indicate a time in seconds. The program ``run_unittests.py``
will sort all test files by increasing time and run them.
"""
import sys
import os
import unittest
import shlex
try:
import src
import pyquickhelper as skip_
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..",
"..",
"pyquickhelper",
"src")))
if path not in sys.path:
sys.path.append(path)
import src
import pyquickhelper as skip_
from pyquickhelper.loghelper import fLOG
from pyquickhelper.helpgen import docstring2html
from src.pyensae.file_helper.magic_file import MagicFile
from src.pyensae.file_helper import file_tail, file_encoding
from src.pyensae.sql import Database
class TestFiles (unittest.TestCase):
def test_shlex(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
this = r"c:\rep1\rep2\frep\_urep.py rrr c:\rr\i.py"
r = shlex.split(this, posix=False)
fLOG(r)
assert r[0] == r"c:\rep1\rep2\frep\_urep.py"
def test_files(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
path = os.path.abspath(os.path.dirname(__file__))
mg = MagicFile()
cmd = path + " -f .*[.]py"
fLOG("**", cmd)
res = mg.lsr(cmd)
fLOG(res)
if len(res) == 0:
raise FileNotFoundError("cmd: " + cmd)
res = mg.lsr("")
fLOG(res)
assert len(res) > 0
def test_head(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
fp = os.path.abspath(__file__)
mg = MagicFile()
fLOG("--", fp)
res = mg.head("{0} -n 3".format(fp))
fLOG("*****", res)
assert "test log" in res.data
res = mg.head("{0} --n 3 -e ascii".format(fp))
resr = mg.head("{0} --n 3 -e utf8 --raw".format(fp))
fLOG(resr)
assert resr != res
assert "<" not in resr
assert "@brief" in resr
assert "usage" not in resr
assert not isinstance(res, str)
def test_head2(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
fp = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
"data",
"Exportutf8.txt")
mg = MagicFile()
fLOG("--", fp)
res = mg.head("{0} -n 3".format(fp))
fLOG("*****", res)
res = mg.head("{0} -n=3".format(fp))
fLOG("*****", res)
res = mg.head("{0}".format(fp))
fLOG("*****", res)
assert "9.0" in res.data
res = mg.head("{0} --n 3 -e utf8".format(fp))
try:
res = mg.head("Exportutf8.txt")
except FileNotFoundError:
pass
def test_grep(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
fp = os.path.join(
os.path.dirname(
os.path.abspath(__file__)),
"data",
"Exportutf8.txt")
mg = MagicFile()
fLOG("--", fp)
res = mg.grep("{0} .*6.* -n 3 -r".format(fp))
fLOG("*****", res)
self.assertEqual(res.strip("\n"), "1.2 3.4 5.6".strip("\n"))
def test_tail(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
fp = os.path.abspath(__file__)
mg = MagicFile()
fLOG("--", fp)
res = mg.tail("{0} -n 3".format(fp))
fLOG("*****", res)
assert "unittest.main" in res.data
res = mg.tail("{0} --n 3 -e ascii".format(fp))
res = mg.tail("{0} --n 3 -e utf8".format(fp))
res = file_tail(fp, threshold=300, nbline=3)
res = [_ for _ in res if len(_) > 0]
fLOG("#####", res)
if "unittest.main" not in res[-2]:
raise Exception("unittest.main not in " + str(res[-2]))
def test_tail_utf8(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
fp = os.path.join(os.path.dirname(__file__), "data", "lines_utf8.txt")
lines = file_tail(fp, nbline=3, threshold=20)
fLOG(lines)
def test_files_repo(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
path = os.path.abspath(os.path.dirname(__file__))
mg = MagicFile()
cmd = path
fLOG("**", cmd)
res = mg.lsrepo(cmd)
fLOG(res)
if len(res) == 0:
raise FileNotFoundError("cmd: " + cmd)
res = mg.lsrepo("")
fLOG(res)
assert len(res) > 0
def test_htmlhelp(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
mg = MagicFile()
mg.add_context(
{"file_tail": file_tail, "Database": Database, "text": 3})
cmd = "-np -f rawhtml file_tail"
res = mg.hhelp(cmd)
assert "<p>extracts the first nbline of a file " in res
res = mg.hhelp("-np -f rst file_tail")
assert ":param threshold:" in res
res = mg.hhelp("-np -f rawhtml Database")
assert "SQL file which can be empty or not," in res
doc = docstring2html(Database.__init__, format="rawhtml")
assert "it can also contain several files separated by" in doc
fLOG("----------")
res = mg.hhelp("-np -f rst Database.__init__")
assert "it can also contain several files separated by" in res
res = mg.hhelp("Database.__init__")
assert res is not None
res = mg.hhelp("-np -f text Database.__init__")
assert "it can also contain several files separated by" in res
assert "@param" in res
def test_encoding(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
this = os.path.abspath(__file__).replace(".pyc", ".py")
res = file_encoding(this)
self.assertEqual(
res, {'encoding': 'ascii', 'confidence': 1.0, 'language': ''})
if __name__ == "__main__":
unittest.main()
|
Python
| 0
|
@@ -5768,22 +5768,18 @@
-assert
+if
%22:param
@@ -5778,32 +5778,50 @@
aram
- threshold:%22 in
+%22 in res:%0A raise Exception(
res
+)
%0A
|
8b67820a60d89304710d568e19191f3ca32e0fb8
|
Add tests for BibsAndAttachedToSolr exporter
|
django/sierra/export/tests/test_exporters.py
|
django/sierra/export/tests/test_exporters.py
|
"""
Tests classes derived from `export.exporter.Exporter`.
"""
import pytest
# FIXTURES AND TEST DATA
# Fixtures used in the below tests can be found in
# django/sierra/base/tests/conftest.py:
# sierra_records_by_recnum_range, sierra_full_object_set,
# new_exporter, get_records, export_records, delete_records,
# solr_conn, solr_search
pytestmark = pytest.mark.django_db
@pytest.fixture
def solr_exporter_test_params(sierra_records_by_recnum_range,
sierra_full_object_set):
bib_set = sierra_records_by_recnum_range('b4371446')
eres_set = sierra_records_by_recnum_range('e1001249')
item_set = sierra_records_by_recnum_range('i4264281')
itype_set = sierra_full_object_set('ItypeProperty')
istatus_set = sierra_full_object_set('ItemStatusProperty')
location_set = sierra_full_object_set('Location')
return {
'BibsToSolr': {
'record_set': bib_set,
'cores': ['bibdata', 'marc'],
'try_delete': True
},
'EResourcesToSolr': {
'record_set': eres_set,
'cores': ['haystack'],
'try_delete': True
},
'ItemsToSolr': {
'record_set': item_set,
'cores': ['haystack'],
'try_delete': True
},
'ItemStatusesToSolr': {
'record_set': istatus_set,
'cores': ['haystack'],
'try_delete': False
},
'ItypesToSolr': {
'record_set': itype_set,
'cores': ['haystack'],
'try_delete': False
},
'LocationsToSolr': {
'record_set': location_set,
'cores': ['haystack'],
'try_delete': False
},
}
# TESTS
@pytest.mark.parametrize('etype_code', [
'BibsToSolr',
'EResourcesToSolr',
'ItemsToSolr',
'ItemStatusesToSolr',
'ItypesToSolr',
'LocationsToSolr'])
def test_export_get_records(etype_code, solr_exporter_test_params,
new_exporter, get_records):
"""
For Exporter classes that that get data from Sierra, blah
"""
exporter = new_exporter(etype_code, 'full_export', 'waiting')
db_records = get_records(exporter)
expected_records = solr_exporter_test_params[etype_code]['record_set']
assert len(db_records) > 0
assert all([rec in db_records for rec in expected_records])
@pytest.mark.parametrize('etype_code', [
'BibsToSolr',
'EResourcesToSolr',
'ItemsToSolr',
'ItemStatusesToSolr',
'ItypesToSolr',
'LocationsToSolr'])
def test_exports_to_solr(etype_code, solr_exporter_test_params, new_exporter,
export_records, delete_records, solr_conn,
solr_search):
"""
For Exporter classes that load data into Solr, blah
"""
record_set = solr_exporter_test_params[etype_code]['record_set']
cores = solr_exporter_test_params[etype_code]['cores']
try_delete = solr_exporter_test_params[etype_code]['try_delete']
load_exporter = new_exporter(etype_code, 'full_export', 'waiting')
conns = {c: solr_conn(c) for c in cores}
pre_results = {c: solr_search(conns[c], {'q': '*'}) for c in cores}
export_records(load_exporter, record_set)
load_results = {c: solr_search(conns[c], {'q': '*'}) for c in cores}
del_results = {}
if try_delete:
del_exporter = new_exporter(etype_code, 'full_export', 'waiting')
delete_records(del_exporter, record_set)
del_results = {c: solr_search(conns[c], {'q': '*'}) for c in cores}
for core in cores:
assert len(pre_results[core]) == 0
assert len(load_results[core]) > 0
if try_delete:
assert len(del_results[core]) == 0
|
Python
| 0
|
@@ -992,36 +992,51 @@
'try_delete':
-True
+%5B'bibdata', 'marc'%5D
%0A %7D,%0A
@@ -1150,36 +1150,44 @@
'try_delete':
-True
+%5B'haystack'%5D
%0A %7D,%0A
@@ -1308,12 +1308,20 @@
e':
-True
+%5B'haystack'%5D
%0A
@@ -1452,37 +1452,34 @@
'try_delete':
-False
+%5B%5D
%0A %7D,%0A
@@ -1590,37 +1590,34 @@
'try_delete':
-False
+%5B%5D
%0A %7D,%0A
@@ -1746,24 +1746,201 @@
e':
-False%0A %7D,
+%5B%5D%0A %7D,%0A 'BibsAndAttachedToSolr': %7B%0A 'record_set': bib_set,%0A 'cores': %5B'bibdata', 'haystack', 'marc'%5D,%0A 'try_delete': %5B'bibdata', 'marc'%5D%0A %7D
%0A
@@ -2114,32 +2114,61 @@
LocationsToSolr'
+,%0A 'BibsAndAttachedToSolr'
%5D)%0Adef test_expo
@@ -2797,24 +2797,53 @@
tionsToSolr'
+,%0A 'BibsAndAttachedToSolr'
%5D)%0Adef test_
@@ -3816,31 +3816,59 @@
': '*'%7D)
- for c in cores
+%0A for c in try_delete
%7D%0A%0A f
@@ -3978,24 +3978,32 @@
%0A if
+core in
try_delete:%0A
|
643c8bf95bd5ac0df32eed39beb7124badd723ed
|
allow extra args in subprocess dispatcher
|
lib/amqp_service/dispatcher/subprocess_dispatcher.py
|
lib/amqp_service/dispatcher/subprocess_dispatcher.py
|
import logging
import subprocess
from amqp_service.dispatcher import util
LOG = logging.getLogger(__name__)
class SubprocessDispatcher(object):
def launch_job(self, command, arguments=[],
wrapper=None, wrapper_arguments=[], environment={},
stdout=None, stderr=None):
command_list = []
if wrapper:
command_list.append(wrapper)
command_list.extend(wrapper_arguments)
command_list.append(command)
command_list.extend(arguments)
with util.environment(environment):
LOG.debug('executing subprocess using command_list: %s',
command_list)
exit_code = subprocess.call(command_list,
stdout=stdout, stderr=stderr)
if exit_code > 0:
# XXX get error message
LOG.debug('failed to execute subprocess job, exit_code = %d',
exit_code)
return False, exit_code
else:
LOG.debug('succesfully executed subprocess job')
return True, exit_code
|
Python
| 0
|
@@ -288,16 +288,26 @@
err=None
+, **kwargs
):%0A%0A
|
2dd00f27e0134b04db66a31ef83d34417ce39c46
|
fix bug where attempted to pickle a placeholder
|
cleverhans/serial.py
|
cleverhans/serial.py
|
"""Serialization functionality.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import joblib
import tensorflow as tf
from cleverhans.model import Model
from cleverhans.utils import safe_zip
class PicklableVariable(object):
"""
A wrapper around a Variable that makes it picklable.
The name of the Variable will not be reliable, only the value. Models
intended to be picklable should identify variables by referencing
Python objects rather than by using TensorFlow's names.
TensorFlow Variables have different values associated with each Session.
For this class, the value associated with the default Session will be used
for both saving and loading, so both operations require that a default
Session has been selected.
Pickle is not secure. Unpickle only files you made yourself.
See cleverhans_tutorials/mnist_tutorial_picklable.py for examples of a
complete model training, pickling, and unpickling process using
PicklableVariable.
See cleverhans.picklable_model for models built using PicklableVariable.
"""
def __init__(self, *args, **kwargs):
self.var = tf.Variable(*args, **kwargs)
def __getstate__(self):
sess = tf.get_default_session()
if sess is None:
raise RuntimeError("PicklableVariable requires a default "
"TensorFlow session")
return {'var': sess.run(self.var)}
def __setstate__(self, d):
self.var = tf.Variable(d['var'])
sess = tf.get_default_session()
if sess is None:
raise RuntimeError("PicklableVariable requires a default "
"TensorFlow session")
sess.run(self.var.initializer)
class NoRefModel(Model):
"""
A Model that can be pickled because it contains no references to any
Variables (e.g. it identifies Variables only by name).
The Model must be able to find all of its Variables via get_params
for them to be pickled.
Note that NoRefModel may have different Variable names after it is
restored, e.g. if the unpickling is run with a different enclosing
scope. NoRefModel will still work in these circumstances as long
as get_params returns the same order of Variables after unpickling
as it did before pickling.
See also cleverhans.picklable_model for a different, complementary
pickling strategy: models that can be pickled because they use *only*
references to Variables and work regardless of Variable names.
"""
def __getstate__(self):
# Serialize everything except the Variables
out = self.__dict__.copy()
# Add the Variables
sess = tf.get_default_session()
if sess is None:
raise RuntimeError("NoRefModel requires a default "
"TensorFlow session")
out["_tf_variables"] = sess.run(self.get_params())
return out
def __setstate__(self, d):
tf_variables = d["_tf_variables"]
del d["_tf_variables"]
# Deserialize everything except the Variables
self.__dict__ = d
# Deserialize the Variables
sess = tf.get_default_session()
if sess is None:
raise RuntimeError("NoRefModel requires a default "
"TensorFlow session")
for var, value in safe_zip(self.get_params(), tf_variables):
var.load(value, sess)
def save(filepath, obj):
"""Saves an object to the specified filepath using joblib.
joblib is like pickle but will save NumPy arrays as separate files for
greater efficiency.
:param filepath: str, path to save to
:obj filepath: object to save
"""
joblib.dump(obj, filepath)
def load(filepath):
"""Returns an object stored via `save`
"""
obj = joblib.load(filepath)
return obj
|
Python
| 0
|
@@ -2707,16 +2707,263 @@
.copy()%0A
+%0A # The base Model class adds this tf reference to self%0A # We mustn't pickle anything tf, this will need to be%0A # regenerated after the model is reloaded.%0A if %22_dummy_input%22 in out:%0A del out%5B%22_dummy_input%22%5D%0A%0A
|
054dc32d30ca9175a6c8b40af52491b8e3a98978
|
Debug the URL that's being requested
|
heufybot/modules/util/webutils.py
|
heufybot/modules/util/webutils.py
|
from twisted.plugin import IPlugin
from heufybot.moduleinterface import BotModule, IBotModule
from heufybot.utils.logutils import logExceptionTrace
from zope.interface import implements
import re, requests
class WebUtils(BotModule):
implements(IPlugin, IBotModule)
name = "WebUtils"
canDisable = False
def hookBot(self, bot):
self.bot = bot
def actions(self):
return [ ("fetch-url", 1, self.fetchURL) ]
def fetchURL(self, url, params = None, extraHeaders = None):
headers = { "user-agent": "Mozilla/5.0" }
if extraHeaders:
headers.update(extraHeaders)
try:
request = requests.get(url, params=params, headers=headers)
pageType = request.headers["content-type"]
if not re.match("^(text/.*|application/((rss|atom|rdf)\+)?xml(;.*)?|application/(.*)json(;.*)?)$", pageType):
# Make sure we don't download any unwanted things
return None
return request
except requests.RequestException as ex:
logExceptionTrace("Error while fetching from {}: {}".format(url, ex))
return None
webutils = WebUtils()
|
Python
| 0.000001
|
@@ -28,16 +28,47 @@
IPlugin%0A
+from twisted.python import log%0A
from heu
@@ -216,16 +216,25 @@
s%0Aimport
+ logging,
re, req
@@ -1015,16 +1015,70 @@
rn None%0A
+ log.msg(request.url, level=logging.DEBUG)%0A
|
87f7d677cfd0a9947f661d88eb482a253c416f27
|
Remove mentions of has_appcontent from ct_tracker
|
feincms/module/page/extensions/ct_tracker.py
|
feincms/module/page/extensions/ct_tracker.py
|
# ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
#
# ct_tracker.py
# Fein
#
# Created by Martin J. Laubach on 02.10.09.
# Copyright (c) 2009 Martin J. Laubach. All rights reserved.
#
# ------------------------------------------------------------------------
"""
Track the content types for pages. Instead of gathering the content
types present in each page at run time, save the current state at
saving time, thus saving a db query on page delivery.
"""
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models.signals import pre_save
from django.utils.translation import ugettext_lazy as _
# ------------------------------------------------------------------------
HAS_APPCONTENT_KEY = '_has_appcontent'
def page_count_content_types(self):
"""
Returns a representation of all the content types present on a page.
Note that the content types are stored as the id of the django_content_type
so that it does not depend on the order/number of registered CTs.
"""
ct_inventory = {}
if self.id is not None:
# find all concrete content type tables which have at least one entry for
# the current CMS object
sql = ' UNION '.join([
'SELECT %d AS ct_idx, region, COUNT(*) FROM %s WHERE parent_id=%s GROUP BY region' % (
idx,
cls._meta.db_table,
self.pk) for idx, cls in enumerate(self._feincms_content_types)])
from django.db import connection
cursor = connection.cursor()
cursor.execute(sql)
row = cursor.fetchall()
# Now convert the content types to django ContentType.id, so the result
# set is stable wrt. registered feincms content types.
has_appcontent = False
for ct_idx, region, count in row:
from django.contrib.contenttypes.models import ContentType
from feincms.content.application.models import ApplicationContent
if count:
if not ct_inventory.has_key(region):
ct_inventory[region] = list()
feincms_ct = self._feincms_content_types[ct_idx]
django_ct = ContentType.objects.get_for_model(feincms_ct)
ct_inventory[region].append(django_ct.id)
if issubclass(feincms_ct, ApplicationContent):
has_appcontent = True
ct_inventory[HAS_APPCONTENT_KEY] = has_appcontent
return ct_inventory
# ------------------------------------------------------------------------
def get_tr_map(self):
"""
Build the translation map for django ct to feincms ct
"""
# Prime translation map and cache it in the class. This needs to be
# done late as opposed to at class definition time as not all information
# is ready, especially when we are doing a "syncdb" the ContentType table
# does not yet exist
tr_map = getattr(self.__class__, '_django_ct_to_feincms_ct_map', None)
if tr_map is None:
tr_map = { }
for idx, ct in enumerate(self._feincms_content_types):
tr_map[ContentType.objects.get_for_model(ct).id] = idx
setattr(self.__class__, '_django_ct_to_feincms_ct_map', tr_map)
return tr_map
# ------------------------------------------------------------------------
def page_get_content_types_for_region(self, region):
"""
Overrides Page.get_content_types_for_region.
If a page with an empty _ct_inventory is encountered, compute all the
content types currently used on that page and save the list in the page
object itself. Further requests for that page can then access that
information and find out which content types are used without resorting
to multiple selects on different ct tables.
It is therefore important that even an "empty" page does not have an
empty _ct_inventory. Luckily, this is ensured with the HAS_APPCONTENT_KEY
entry.
"""
inv = self._ct_inventory
if self.id and len(inv) == 0:
self._ct_inventory = inv = self.count_content_types()
self._delayed_save = True # Mark instance so pre_save_handler doesn't null out _ct_inventory
self.save()
retval = [0] * len(self._feincms_content_types)
region_ct_inventory = inv.get(region.key, ())
tr_map = get_tr_map(self)
for django_ct in region_ct_inventory:
retval[tr_map[django_ct]] = 1
return retval
# ------------------------------------------------------------------------
def has_appcontent(self):
inv = self._ct_inventory
return inv.get(HAS_APPCONTENT_KEY, False)
# ------------------------------------------------------------------------
def pre_save_handler(sender, instance, **kwargs):
"""
Intercept save and null out the content type list in the page itself.
"""
# The _delayed_save attribute is only present if we are currently updating
# the _ct_inventory itself (see page_get_content_types_for_region above).
# If we are, don't zero out the computed result.
if not getattr(instance, '_delayed_save', False):
instance._ct_inventory = None
# ------------------------------------------------------------------------
def register(cls, admin_cls):
from feincms.contrib.fields import JSONField
cls.add_to_class('_ct_inventory', JSONField(_('content types'), editable=False, blank=True, null=True))
cls.add_to_class('count_content_types', page_count_content_types)
cls._get_content_types_for_region = page_get_content_types_for_region
pre_save.connect(pre_save_handler, sender=cls)
# Optimize views.applicationcontent since we know what ct are in this page
import feincms.views.applicationcontent
feincms.views.applicationcontent.page_has_appcontent = has_appcontent
# ------------------------------------------------------------------------
|
Python
| 0.000001
|
@@ -838,47 +838,8 @@
----
-%0AHAS_APPCONTENT_KEY = '_has_appcontent'
%0A%0Ade
@@ -1834,39 +1834,8 @@
es.%0A
- has_appcontent = False%0A
@@ -2351,173 +2351,8 @@
d)%0A%0A
- if issubclass(feincms_ct, ApplicationContent):%0A has_appcontent = True%0A%0A ct_inventory%5BHAS_APPCONTENT_KEY%5D = has_appcontent%0A%0A
@@ -4325,185 +4325,8 @@
al%0A%0A
-# ------------------------------------------------------------------------%0Adef has_appcontent(self):%0A inv = self._ct_inventory%0A return inv.get(HAS_APPCONTENT_KEY, False)%0A%0A
# --
@@ -5249,16 +5249,16 @@
region%0A%0A
+
pre_
@@ -5304,207 +5304,8 @@
ls)%0A
-%0A # Optimize views.applicationcontent since we know what ct are in this page%0A import feincms.views.applicationcontent%0A feincms.views.applicationcontent.page_has_appcontent = has_appcontent%0A%0A
# --
|
ab51dd3c6c649e582deeb2309a88738b45bccba8
|
clean up formatting of logger
|
src/logger.py
|
src/logger.py
|
"""
create simple logger class to output results both to text file and display
"""
import os.path,csv,time
class Logger(object):
def __init__(self,OUTPUT_LOCATION):
self.filename = '__ogp-mdt-log-' + str(time.time()).replace('.','') + '.csv'
self.csvfile = open(os.path.join(OUTPUT_LOCATION, self.filename), mode='a')
self.log = csv.writer(self.csvfile)
def write(self, filename,message):
s = os.path.split(filename)
self.log.writerow([s[0],s[1],message])
def close(self):
self.csvfile.close()
|
Python
| 0.99835
|
@@ -94,19 +94,34 @@
path
-,csv,
+%0Aimport csv%0Aimport
time%0A%0A
+%0A
clas
@@ -138,16 +138,17 @@
bject):%0A
+%0A
def
@@ -161,31 +161,32 @@
__(self,
-OUTPUT_LOCATION
+ output_location
):%0A
@@ -252,16 +252,17 @@
ace('.',
+
'') + '.
@@ -311,23 +311,23 @@
oin(
-OUTPUT_LOCATION
+output_location
, se
@@ -424,16 +424,17 @@
ilename,
+
message)
@@ -507,13 +507,15 @@
%5B0%5D,
+
s%5B1%5D,
+
mess
@@ -571,9 +571,8 @@
close()%0A
-%0A
|
ebdb3a510718288f5db14539d7261f10abb59c96
|
Fix a small typo error in clusterdemo.py (#945)
|
examples/clusterdemo.py
|
examples/clusterdemo.py
|
#!/usr/bin/python
"clusterdemo.py: demo of Mininet Cluster Edition prototype"
from mininet.examples.cluster import ( MininetCluster, SwitchBinPlacer,
RemoteLink )
# ^ Could also use: RemoteSSHLink, RemoteGRELink
from mininet.topolib import TreeTopo
from mininet.log import setLogLevel
from mininet.examples.clustercli import ClusterCLI as CLI
def demo():
"Simple Demo of Cluster Mode"
servers = [ 'localhost', 'ubuntu2', 'ubuntu3' ]
topo = TreeTopo( depth=3, fanout=3 )
net = MininetCluster( topo=topo, servers=servers, Link=RemoteLink,
placement=SwitchBinPlacer )
net.start()
CLI( net )
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
demo()
|
Python
| 0.000027
|
@@ -571,17 +571,17 @@
ervers,
-L
+l
ink=Remo
|
1e873bcbbadd45f950521e7ff4f1057187aaa3d7
|
Fix SQL selected / SQL explain for gis queries
|
debug_toolbar/panels/sql/tracking.py
|
debug_toolbar/panels/sql/tracking.py
|
import datetime
import json
from threading import local
from time import time
from django.utils.encoding import force_str
from debug_toolbar import settings as dt_settings
from debug_toolbar.utils import get_stack, get_template_info, tidy_stacktrace
try:
from psycopg2._json import Json as PostgresJson
except ImportError:
PostgresJson = None
class SQLQueryTriggered(Exception):
"""Thrown when template panel triggers a query"""
pass
class ThreadLocalState(local):
def __init__(self):
self.enabled = True
@property
def Wrapper(self):
if self.enabled:
return NormalCursorWrapper
return ExceptionCursorWrapper
def recording(self, v):
self.enabled = v
state = ThreadLocalState()
recording = state.recording # export function
def wrap_cursor(connection, panel):
if not hasattr(connection, "_djdt_cursor"):
connection._djdt_cursor = connection.cursor
connection._djdt_chunked_cursor = connection.chunked_cursor
def cursor(*args, **kwargs):
# Per the DB API cursor() does not accept any arguments. There's
# some code in the wild which does not follow that convention,
# so we pass on the arguments even though it's not clean.
# See:
# https://github.com/jazzband/django-debug-toolbar/pull/615
# https://github.com/jazzband/django-debug-toolbar/pull/896
return state.Wrapper(
connection._djdt_cursor(*args, **kwargs), connection, panel
)
def chunked_cursor(*args, **kwargs):
# prevent double wrapping
# solves https://github.com/jazzband/django-debug-toolbar/issues/1239
cursor = connection._djdt_chunked_cursor(*args, **kwargs)
if not isinstance(cursor, BaseCursorWrapper):
return state.Wrapper(cursor, connection, panel)
return cursor
connection.cursor = cursor
connection.chunked_cursor = chunked_cursor
return cursor
def unwrap_cursor(connection):
if hasattr(connection, "_djdt_cursor"):
del connection._djdt_cursor
del connection.cursor
del connection.chunked_cursor
class BaseCursorWrapper:
pass
class ExceptionCursorWrapper(BaseCursorWrapper):
"""
Wraps a cursor and raises an exception on any operation.
Used in Templates panel.
"""
def __init__(self, cursor, db, logger):
pass
def __getattr__(self, attr):
raise SQLQueryTriggered()
class NormalCursorWrapper(BaseCursorWrapper):
"""
Wraps a cursor and logs queries.
"""
def __init__(self, cursor, db, logger):
self.cursor = cursor
# Instance of a BaseDatabaseWrapper subclass
self.db = db
# logger must implement a ``record`` method
self.logger = logger
def _quote_expr(self, element):
if isinstance(element, str):
return "'%s'" % element.replace("'", "''")
else:
return repr(element)
def _quote_params(self, params):
if not params:
return params
if isinstance(params, dict):
return {key: self._quote_expr(value) for key, value in params.items()}
return [self._quote_expr(p) for p in params]
def _decode(self, param):
if PostgresJson and isinstance(param, PostgresJson):
return param.dumps(param.adapted)
# If a sequence type, decode each element separately
if isinstance(param, (tuple, list)):
return [self._decode(element) for element in param]
# If a dictionary type, decode each value separately
if isinstance(param, dict):
return {key: self._decode(value) for key, value in param.items()}
# make sure datetime, date and time are converted to string by force_str
CONVERT_TYPES = (datetime.datetime, datetime.date, datetime.time)
try:
return force_str(param, strings_only=not isinstance(param, CONVERT_TYPES))
except UnicodeDecodeError:
return "(encoded string)"
def _record(self, method, sql, params):
start_time = time()
try:
return method(sql, params)
finally:
stop_time = time()
duration = (stop_time - start_time) * 1000
if dt_settings.get_config()["ENABLE_STACKTRACES"]:
stacktrace = tidy_stacktrace(reversed(get_stack()))
else:
stacktrace = []
_params = ""
try:
_params = json.dumps(self._decode(params))
except TypeError:
pass # object not JSON serializable
template_info = get_template_info()
alias = getattr(self.db, "alias", "default")
conn = self.db.connection
vendor = getattr(conn, "vendor", "unknown")
# Sql might be an object (such as psycopg Composed).
# For logging purposes, make sure it's str.
sql = str(sql)
params = {
"vendor": vendor,
"alias": alias,
"sql": self.db.ops.last_executed_query(
self.cursor, sql, self._quote_params(params)
),
"duration": duration,
"raw_sql": sql,
"params": _params,
"raw_params": params,
"stacktrace": stacktrace,
"start_time": start_time,
"stop_time": stop_time,
"is_slow": duration > dt_settings.get_config()["SQL_WARNING_THRESHOLD"],
"is_select": sql.lower().strip().startswith("select"),
"template_info": template_info,
}
if vendor == "postgresql":
# If an erroneous query was ran on the connection, it might
# be in a state where checking isolation_level raises an
# exception.
try:
iso_level = conn.isolation_level
except conn.InternalError:
iso_level = "unknown"
params.update(
{
"trans_id": self.logger.get_transaction_id(alias),
"trans_status": conn.get_transaction_status(),
"iso_level": iso_level,
"encoding": conn.encoding,
}
)
# We keep `sql` to maintain backwards compatibility
self.logger.record(**params)
def callproc(self, procname, params=None):
return self._record(self.cursor.callproc, procname, params)
def execute(self, sql, params=None):
return self._record(self.cursor.execute, sql, params)
def executemany(self, sql, param_list):
return self._record(self.cursor.executemany, sql, param_list)
def __getattr__(self, attr):
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
|
Python
| 0.998978
|
@@ -4207,32 +4207,363 @@
()%0A try:%0A
+ if isinstance(params, list):%0A%0A def strip_GeomFromEWKB(param):%0A if isinstance(param, str):%0A return param.lstrip(%22ST_GeomFromEWKB('%5C%5Cx%22).rstrip(%22'::bytea)%22)%0A return param%0A%0A params = %5Bstrip_GeomFromEWKB(param) for param in params%5D%0A
retu
|
028d4643f728c2618a4660b986694f807a955024
|
Add some necessary decodes() to neovim_mod
|
vroom/neovim_mod.py
|
vroom/neovim_mod.py
|
from vroom.vim import CONFIGFILE, VimscriptString
from vroom.vim import Communicator as VimCommunicator
import subprocess
import time
import neovim
import os
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Communicator(VimCommunicator):
"""Object to communicate with a Neovim server."""
def __init__(self, args, env, writer):
self.writer = writer.commands
self.args = args
self.start_command = [
'nvim',
'-u', args.vimrc,
'-c', 'set shell=' + args.shell,
'-c', 'source %s' % CONFIGFILE]
env['NVIM_LISTEN_ADDRESS'] = args.servername
self.env = env
self._cache = {}
def Quit(self):
if not hasattr(self, 'conn'):
# Never started
return
try:
self.conn.command('qa!')
except IOError:
pass
def Start(self):
"""Starts Neovim"""
self.process = subprocess.Popen(self.start_command, env=self.env)
start_time = time.time()
# Wait at most 5s for the Neovim socket
while not os.path.exists(self.args.servername) \
and time.time() - start_time < 5:
time.sleep(0.01)
self.conn = neovim.connect(self.args.servername)
def Communicate(self, command, extra_delay=0):
"""Sends a command to Neovim
Args:
command: The command to send.
extra_delay: Delay in excess of --delay
Raises:
Quit: If vim quit unexpectedly.
"""
self.writer.Log(command)
parsed_command = self.conn.replace_termcodes(command, True, True, True)
self.conn.feedkeys(parsed_command, '')
self._cache = {}
time.sleep(self.args.delay + extra_delay)
def Ask(self, expression):
"""Asks vim for the result of an expression.
Args:
expression: The expression to ask for.
Returns:
Vim's output (as a string).
Raises:
Quit: If vim quit unexpectedly.
"""
return self.conn.eval(expression)
def GetBufferLines(self, number):
"""Gets the lines in the requested buffer.
Args:
number: The buffer number to load. SHOULD NOT be a member of
SpecialBuffer, use GetMessages if you want messages. Only works on
real buffers.
Returns:
The buffer lines.
"""
if number not in self._cache:
if number is None:
buf = self.conn.get_current_buffer()
else:
for i in range(len(self.conn.get_buffers())):
b = self.conn.buffers[i]
if b.get_number() == number:
buf = b
break
linecount = buf.get_length()
lines = []
for i in range(linecount):
lines.append(buf.get_line(i))
self._cache[number] = lines
return self._cache[number]
def GetCurrentLine(self):
"""Figures out what line the cursor is on.
Returns:
The cursor's line.
"""
if 'line' not in self._cache:
lineno = self.conn.get_current_window().cursor[0]
self._cache['line'] = int(lineno)
return self._cache['line']
def Kill(self):
"""Kills the Neovim process and removes the socket"""
VimCommunicator.Kill(self)
if os.path.exists(self.args.servername):
os.remove(self.args.servername)
|
Python
| 0
|
@@ -1910,16 +1910,32 @@
ression)
+.decode('utf-8')
%0A%0A def
@@ -2639,16 +2639,32 @@
_line(i)
+.decode('utf-8')
)%0A
|
509cedcddd409af001ce98c61513a5321f9dc00a
|
call update cart on cancel (google)
|
hiicart/gateway/google/gateway.py
|
hiicart/gateway/google/gateway.py
|
import base64
import httplib2
import xml.etree.cElementTree as ET
from decimal import Decimal
from django.template import Context, loader
from hiicart.gateway.base import PaymentGatewayBase, SubmitResult
from hiicart.gateway.google.settings import SETTINGS as default_settings
from hiicart.lib.unicodeconverter import convertToUTF8
class GoogleGateway(PaymentGatewayBase):
"""Payment Gateway for Google Checkout."""
def __init__(self, cart):
super(GoogleGateway, self).__init__("google", cart, default_settings)
self._require_settings(["MERCHANT_ID", "MERCHANT_KEY"])
@property
def _cart_url(self):
"""URL to post the Checkout cart to."""
if self.settings["LIVE"]:
base = "https://checkout.google.com/api/checkout/v2/merchantCheckout/Merchant/%s"
else:
base = "https://sandbox.google.com/checkout/api/checkout/v2/merchantCheckout/Merchant/%s"
return base % self.settings["MERCHANT_ID"]
@property
def _order_url(self):
"""URL for the Order Processing API."""
if self.settings["LIVE"]:
base = "https://checkout.google.com/api/checkout/v2/request/Merchant/%s"
else:
base = "https://sandbox.google.com/checkout/api/checkout/v2/request/Merchant/%s"
return base % self.settings["MERCHANT_ID"]
def _is_valid(self):
"""Return True if gateway is valid."""
# TODO: Query Google to validate credentials
return True
def _send_xml(self, url, xml):
"""Send a command to the Checkout Order Processing API."""
http = httplib2.Http()
headers = {"Content-type": "application/x-www-form-urlencoded",
"Authorization": "Basic %s" % self.get_basic_auth()}
return http.request(url, "POST", xml, headers=headers)
def cancel_items(self, payment, items=None, reason=None):
self._update_with_cart_settings({'request': None})
transaction_id = payment.transaction_id
template = loader.get_template("gateway/google/cancel-items.xml")
ctx = Context({"transaction_id": transaction_id,
"reason": reason,
"comment": None,
"items": items})
cancel_xml = convertToUTF8(template.render(ctx))
response, content = self._send_xml(self._order_url, cancel_xml)
return SubmitResult(None)
def charge_recurring(self, grace_period=None):
"""HiiCart doesn't currently support manually charging subscriptions with Google Checkout"""
pass
def get_basic_auth(self):
"""Get the base64 encoded string for Basic auth"""
return base64.b64encode("%s:%s" % (self.settings["MERCHANT_ID"],
self.settings["MERCHANT_KEY"]))
def sanitize_clone(self):
"""Remove any gateway-specific changes to a cloned cart."""
pass
def submit(self, collect_address=False, cart_settings_kwargs=None):
"""Submit a cart to Google Checkout.
Google Checkout's submission process is:
* Construct an xml representation of the cart
* Post the xml to Checkout, using HTTP Basic Auth
* Checkout returns a url to redirect the user to"""
# Construct cart xml
self._update_with_cart_settings(cart_settings_kwargs)
template = loader.get_template("gateway/google/cart.xml")
ctx = Context({"cart": self.cart,
"continue_shopping_url": self.settings.get("SHOPPING_URL", None),
"edit_cart_url": self.settings.get("EDIT_URL", None),
"currency": self.settings["CURRENCY"]})
cart_xml = convertToUTF8(template.render(ctx))
response, content = self._send_xml(self._cart_url, cart_xml)
xml = ET.XML(content)
url = xml.find("{http://checkout.google.com/schema/2}redirect-url").text
return SubmitResult("url", url)
def refund_payment(self, payment, reason=None):
"""
Refund the full amount of this payment
"""
self.refund(payment.transaction_id, payment.amount, reason)
payment.state = 'REFUND'
payment.save()
def refund(self, transaction_id, amount, reason=None):
"""Refund a payment."""
self._update_with_cart_settings({'request': None})
template = loader.get_template("gateway/google/refund.xml")
ctx = Context({"transaction_id": transaction_id,
"reason": reason,
"comment": None,
"currency": self.settings["CURRENCY"],
"amount": Decimal(amount).quantize(Decimal('.01'))})
refund_xml = convertToUTF8(template.render(ctx))
response, content = self._send_xml(self._order_url, refund_xml)
return SubmitResult(None)
|
Python
| 0
|
@@ -2373,24 +2373,57 @@
cancel_xml)%0A
+ self.cart.update_state()%0A
retu
|
5dbaf2f519c573dbeb239be0d21282ad432339e8
|
Fix order in base
|
project_euler/library/base.py
|
project_euler/library/base.py
|
from typing import List
def number_to_list(number: int, base: int = 10) -> List[int]:
if number < 0:
raise ValueError(f'Cannot convert {number} to list, must be positive.')
if base <= 0:
raise ValueError(f'Cannot convert to base {base}.')
digits = []
while number > 0:
digits.append(number % base)
number //= base
return digits
def list_to_number(representation: List[int], base: int = 10) -> int:
accumulate = 0
for digit in representation:
accumulate = accumulate * base + digit
return accumulate
def is_permutation(self: int, other: int, base: int = 10) -> bool:
if self // other >= base or other // self >= base:
return False
else:
return sorted(number_to_list(self, base)) == \
sorted(number_to_list(other, base))
|
Python
| 0.000079
|
@@ -372,22 +372,38 @@
return
+list(reversed(
digits
+))
%0A%0A%0Adef l
|
24b868f99d40e5309fc4a8f8e1ca9d9ca00524ea
|
move init code into its own function
|
src/app.py
|
src/app.py
|
# Copyright 2015 Curtis Sand
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The netify application object."""
import os
from flask import Flask
DEFAULT_SECRET_KEY_SIZE = 64 # bytes -> 64 * 8 = 512bits
APP = Flask(__name__)
APP.config.from_object(__name__)
APP.config.update(dict(SECRET_KEY=os.urandom(DEFAULT_SECRET_KEY_SIZE)))
|
Python
| 0.000006
|
@@ -702,16 +702,83 @@
12bits%0A%0A
+%0AAPP = None # Singleton Flask App%0A%0Adef init():%0A global APP%0A
APP = Fl
@@ -783,32 +783,36 @@
Flask(__name__)%0A
+
APP.config.from_
@@ -828,16 +828,20 @@
name__)%0A
+
APP.conf
@@ -904,8 +904,98 @@
SIZE)))%0A
+ return APP%0A%0Adef run(host=None, port=None, debug=None):%0A APP.run(host, port, debug)%0A
|
ddaa70187eb36678b366560f20d53dd75443921c
|
fix test code
|
test/test_case/service.py
|
test/test_case/service.py
|
import unittest
import sys
sys.path.append("../../")
import time
import re
from service_test_utils import ServiceTestUtils
from dockerEE.remote import RemoteInterfaceImpl
from docker_container_test_utils import DockerContainerTestUtils
## TestEnvironmentEmulationServer
#
# The test case for EnvironmentEmulationService
class TestEnvironmentEmulationService(unittest.TestCase):
## execute stub
# @param self The object pointer
# @param action The action
# @return CommandResult
def __execStub(self, action):
return self.__service_interface.sudo("python " + self.__stub + " " + action, True)
## init test case
# @param self The object pointer
def setUp(self):
host = "localhost"
user = "vagrant"
password = "vagrant"
## stub file
self.__stub = "/tmp/service_stub.py"
## service interface
self.__service = ServiceTestUtils("python " + self.__stub, host, user, password)
## remote interface
self.__interface = RemoteInterfaceImpl(host, user, password)
## env.yml parameter
self.__parameter = {"servers":[{"name": "c1", "image": "centos", "IPs": [{"dev": "eth0", "IP": "192.168.0.1/24", "gw": "192.168.0.254/24"}]}, {"name": "c2", "image": "centos", "IPs": [{"dev": "eth0", "IP": "192.168.0.2/24"}, {"dev": "eth1", "IP": "192.168.1.2/24", "gw": "192.168.1.254/24"}]}]}
## test utils
self.__utils = DockerContainerTestUtils(host, user, password)
## environment definition file
self.__filename = "/tmp/env.yml"
# make stub script
f = open(self.__stub, "w")
f.write("import sys\n")
f.write("sys.path.append('/vagrant')\n")
f.write("from dockerEE.service import EnvironmentEmulationService\n")
f.write("service = EnvironmentEmulationService('localhost', 'vagrant', 'vagrant')\n")
f.write("service.action()")
f.close()
# make env.yml
f = open(self.__filename, "w")
f.write("---\n")
f.write("servers:\n")
for p in self.__parameter["servers"]:
f.write("- name: " + p["name"] + "\n")
f.write(" image: " + p["image"] + "\n")
f.write(" IPs:\n")
for n in p["IPs"]:
f.write(" - dev: " + n["dev"] + "\n")
f.write(" IP: " + n["IP"] + "\n")
if "gw" in n:
f.write(" gw: " + n["gw"] + "\n")
f.close()
## test "python service.py start/stop"
# @param self The object pointer
def testStartStop(self):
servers = [x["name"] for x in self.__parameter["servers"]]
ret = self.__service.start(self.__filename)
time.sleep(10)
self.assertTrue(self.__utils.checkContainerExist(servers))
for p in self.__parameter["servers"]:
for n in p["IPs"]:
ret = self.__interface.sudo("docker exec -it " + p["name"] + " ip addr show")
self.assertTrue(re.search(r"inet " + n["IP"] + ".*" + n["dev"], ret.stdout))
if "gw" in n:
ret = self.__interface.sudo("docker exec -it " + p["name"] + " ip route show")
self.assertIn("default via " + n["gw"] + " dev " + n["dev"], ret.stdout)
self.__service.stop()
time.sleep(10)
self.assertTrue(self.__utils.checkContainerNotExist(servers))
## test "python service.py status"
# @param self The object pointer
def testStatus(self):
servers = [x["name"] for x in self.__parameter["servers"]]
self.__service.start(self.__filename)
time.sleep(10)
ret = self.__service.status()
status = "servers\n"
for p in self.__parameter["servers"]:
status += "\t" + p["name"] + "\n"
for n in p["IPs"]:
status += "\t\t" + n["dev"] + " : " + n["IP"]
if "gw" in n:
status += " via " + n["gw"] + "\n"
else:
status += "\n"
ret.stdout += "\n"
self.assertIn(status, ret.stdout)
self.__service.stop()
time.sleep(10)
ret = self.__service.status()
self.assertEqual(ret.rc, 0)
## test "python service.py reload"
# @param self The object pointer
def testReload(self):
servers = [x["name"] for x in self.__parameter["servers"]]
self.__service.start(self.__filename, 10)
for s in servers:
self.__interface.sudo("docker exec -it " + s + " touch /tmp/hello_dockerEE")
for s in servers:
ret = self.__interface.sudo("docker exec -it " + s + " test -f /tmp/hello_dockerEE", True)
self.assertEqual(ret.rc, 0)
self.__service.reload(servers[1:])
ret = self.__interface.sudo("docker exec -it " + servers[0] + " test -f /tmp/hello_dockerEE", True)
self.assertEqual(ret.rc, 0)
ret = self.__interface.sudo("docker exec -it " + servers[1] + " test -f /tmp/hello_dockerEE", True)
self.assertEqual(ret.rc, 1)
for p in self.__parameter["servers"]:
for n in p["IPs"]:
ret = self.__interface.sudo("docker exec -it " + p["name"] + " ip addr show")
self.assertTrue(re.search(r"inet " + n["IP"] + ".*" + n["dev"], ret.stdout))
if "gw" in n:
ret = self.__interface.sudo("docker exec -it " + p["name"] + " ip route show")
self.assertIn("default via " + n["gw"] + " dev " + n["dev"], ret.stdout)
self.__service.stop(10)
if __name__ == "__main__":
unittest.main()
|
Python
| 0.000024
|
@@ -3232,32 +3232,46 @@
via %22 + n%5B%22gw%22%5D
+.split(%22/%22)%5B0%5D
+ %22 dev %22 + n%5B%22
@@ -5522,24 +5522,38 @@
%22 + n%5B%22gw%22%5D
+.split(%22/%22)%5B0%5D
+ %22 dev %22 +
|
fd6a590a715e857c3e258b60daa4906a35a6cd37
|
update lava-job-runner
|
lava-job-runner.py
|
lava-job-runner.py
|
#!/usr/bin/python
import xmlrpclib
import sys
import subprocess
import fnmatch
import os
import json
import time
server = None
online_device_types = []
offline_device_types = []
online_devices = []
offline_devices = []
job_map = {}
def poll_jobs():
run = True
submitted_jobs = {}
for job in job_map:
if job_map[job] is not None:
submitted_jobs[job_map[job]] = job
while run:
try:
for job in submitted_jobs:
status = server.scheduler.job_status(job)
if status['job_status'] == 'Complete':
print os.path.basename(submitted_jobs[job]) + ': pass'
submitted_jobs.pop(job, None)
elif status['job_status'] == 'Incomplete':
print os.path.basename(submitted_jobs[job]) + ': fail'
submitted_jobs.pop(job, None)
else:
print str(job) + ' - ' + str(status['job_status'])
if not submitted_jobs:
run = False
break
else:
time.sleep(10)
except (xmlrpclib.ProtocolError, xmlrpclib.Fault, IOError) as e:
print e
continue
def submit_jobs():
global online_devices
global offline_devices
global online_device_types
global offline_device_types
try:
print "Submitting Jobs to Server..."
for job in job_map:
with open(job, 'rb') as stream:
job_data = stream.read()
job_info = json.loads(job_data)
# Check if target is online
if 'target' in job_info:
if job_info['target'] in offline_devices:
print "%s is OFFLINE skipping submission" % job_info['target']
print job + ': skip'
else:
job_map[job] = server.scheduler.submit_job(job_data)
elif 'device_type' in job_info:
if job_info['device_type'] in offline_device_types:
print "All device types: %s are OFFLINE, skipping..." % job_info['target']
print job + ': skip'
else:
job_map[job] = server.scheduler.submit_job(job_data)
else:
print "Malformed JSON: No device_type or target present, skipping..."
print job + ': skip'
except (xmlrpclib.ProtocolError, xmlrpclib.Fault, IOError) as e:
print "ERROR!"
print e
def load_jobs():
top = os.getcwd()
for root, dirnames, filenames in os.walk(top):
for filename in fnmatch.filter(filenames, '*.json'):
job_map[os.path.join(root, filename)] = None
def retrieve_jobs(jobs):
cmd = 'git clone %s' % jobs
try:
print "Cloning LAVA Jobs..."
subprocess.check_output(cmd, shell=True)
print "Clone Successful!"
print "clone-jobs: pass"
except subprocess.CalledProcessError as e:
print "ERROR!"
print "Unable to clone %s" % jobs
print "clone-jobs: fail"
exit(1)
def gather_devices():
global online_devices
global offline_devices
print "Gathering Devices..."
all_devices = server.scheduler.all_devices()
for device in all_devices:
if device[2] == 'offline':
offline_devices.append(device[0])
else:
online_devices.append(device[0])
print "Gathered Devices Successfully!"
def gather_device_types():
global online_device_types
global offline_device_types
print "Gathering Device Types..."
all_device_types = server.scheduler.all_device_types()
for device_type in all_device_types:
if device_type['idle'] < device_type['offline']:
offline_device_types.append(device_type['name'])
else:
online_device_types.append(device_type['name'])
print "Gathered Device Types Successfully!"
def connect(url):
try:
print "Connecting to Server..."
global server
global online_device_types
global offline_device_types
server = xmlrpclib.ServerProxy(url)
gather_device_types()
gather_devices()
print "Connection Successful!"
print "connect-to-server: pass"
except (xmlrpclib.ProtocolError, xmlrpclib.Fault, IOError) as e:
print "ERROR!"
print "Unable to connect to %s" % url
print "The URL should be in the form http(s)://<user>:<token>@hostname/RPC2"
print "connect-to-server: fail"
print e
exit(1)
def main(url, jobs):
connect(url)
retrieve_jobs(jobs)
load_jobs()
submit_jobs()
poll_jobs()
exit(0)
if __name__ == '__main__':
main(sys.argv[1], sys.argv[2])
|
Python
| 0
|
@@ -592,32 +592,61 @@
print
+ 'job-id-' + str(job) + '-' +
os.path.basenam
@@ -671,16 +671,17 @@
ob%5D) + '
+
: pass'%0A
@@ -814,16 +814,45 @@
print
+ 'job-id-' + str(job) + '-' +
os.path
@@ -885,16 +885,17 @@
ob%5D) + '
+
: fail'%0A
@@ -1888,35 +1888,53 @@
print
+os.path.basename(
job
+)
+ ': skip'%0A
@@ -2249,39 +2249,58 @@
print
+os.path.basename(
job
+)
+ '
+
: skip'%0A
@@ -2516,15 +2516,34 @@
int
+os.path.basename(
job
+)
+ '
+
: sk
@@ -3075,16 +3075,17 @@
one-jobs
+
: pass%22%0A
@@ -3221,16 +3221,17 @@
one-jobs
+
: fail%22%0A
@@ -4418,16 +4418,17 @@
o-server
+
: pass%22%0A
@@ -4650,16 +4650,16 @@
e/RPC2%22%0A
-
@@ -4682,16 +4682,17 @@
o-server
+
: fail%22%0A
|
27175c2cf50022af5c4683ba0283506c5f9a5fc6
|
Update migration
|
rest_framework/authtoken/migrations/0001_initial.py
|
rest_framework/authtoken/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from rest_framework.settings import api_settings
try:
from django.contrib.auth import get_user_model
except ImportError: # django < 1.5
from django.contrib.auth.models import User
else:
User = get_user_model()
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Token'
db.create_table('authtoken_token', (
('key', self.gf('django.db.models.fields.CharField')(max_length=40, primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(related_name='auth_token', unique=True, to=orm['%s.%s' % (User._meta.app_label, User._meta.object_name)])),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
))
db.send_create_signal('authtoken', ['Token'])
def backwards(self, orm):
# Deleting model 'Token'
db.delete_table('authtoken_token')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
"%s.%s" % (User._meta.app_label, User._meta.module_name): {
'Meta': {'object_name': User._meta.module_name},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'authtoken.token': {
'Meta': {'object_name': 'Token'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'auth_token'", 'unique': 'True', 'to': "orm['%s.%s']" % (User._meta.app_label, User._meta.object_name)})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['authtoken']
|
Python
| 0.000001
|
@@ -2252,1411 +2252,80 @@
'
-date_joined': ('django.db.models.fields.DateTimeField', %5B%5D, %7B'default': 'datetime.datetime.now'%7D),%0A 'email': ('django.db.models.fields.EmailField', %5B%5D, %7B'max_length': '75', 'blank': 'True'%7D),%0A 'first_name': ('django.db.models.fields.CharField', %5B%5D, %7B'max_length': '30', 'blank': 'True'%7D),%0A 'groups': ('django.db.models.fields.related.ManyToManyField', %5B%5D, %7B'to': %22orm%5B'auth.Group'%5D%22, 'symmetrical': 'False', 'blank': 'True'%7D),%0A 'id': ('django.db.models.fields.AutoField', %5B%5D, %7B'primary_key': 'True'%7D),%0A 'is_active': ('django.db.models.fields.BooleanField', %5B%5D, %7B'default': 'True'%7D),%0A 'is_staff': ('django.db.models.fields.BooleanField', %5B%5D, %7B'default': 'False'%7D),%0A 'is_superuser': ('django.db.models.fields.BooleanField', %5B%5D, %7B'default': 'False'%7D),%0A 'last_login': ('django.db.models.fields.DateTimeField', %5B%5D, %7B'default': 'datetime.datetime.now'%7D),%0A 'last_name': ('django.db.models.fields.CharField', %5B%5D, %7B'max_length': '30', 'blank': 'True'%7D),%0A 'password': ('django.db.models.fields.CharField', %5B%5D, %7B'max_length': '128'%7D),%0A 'user_permissions': ('django.db.models.fields.related.ManyToManyField', %5B%5D, %7B'to': %22orm%5B'auth.Permission'%5D%22, 'symmetrical': 'False', 'blank': 'True'%7D),%0A 'username': ('django.db.models.fields.CharField', %5B%5D, %7B'unique': 'True', 'max_length': '30'%7D)
+id': ('django.db.models.fields.AutoField', %5B%5D, %7B'primary_key': 'True'%7D),
%0A
|
680d324c084a165a8dbc8dc967ffa9048f56cf6b
|
Fix queries - Attempt #7
|
src/app.py
|
src/app.py
|
from flask import Flask, render_template, request, redirect, url_for, session, flash
import psycopg2, sys, os, datetime
from config import *
# Instantiate App
app = Flask(__name__)
app.secret_key = APP_SECRET_KEY
# Database Query Function
def db_query(sql_string, for_selection):
conn = psycopg2.connect(DB_LOCATION)
cur = conn.cursor()
print("The query being executed is", sql_string, "\n")
cur.execute(sql_string)
if for_selection is True:
try:
entries = cur.fetchall()
data = []
for row in entries:
for column in row:
data.append(column)
except:
data = ''
else:
try:
entries = cur.fetchall()
data = {}
for row in entries:
for column in row:
data[column] = row
except:
data = ''
conn.commit()
cur.close()
conn.close()
return data
# Date Validation Function
def validate_date(date_string):
try:
date = datetime.datetime.strptime(date_string, '%m/%d/%Y')
return str(date)
except ValueError:
raise ValueError("Incorrect data format, should be MM/DD/YYYY")
# Templates
@app.route('/')
@app.route('/index')
@app.route('/index.html')
def index():
return redirect(url_for('login'))
@app.route('/login', methods=['GET', 'POST'])
def login():
error = None
if request.method == 'POST':
if False and request.form['username'] != USERNAME:
error = 'Invalid username'
elif False and request.form['password'] != PASSWORD:
error = 'Invalid password'
else:
session['logged_in'] = True
session['username'] = request.form['username']
session['password'] = request.form['password']
return redirect(url_for('report_filter'))
return render_template('login.html')
@app.route('/logout', methods=['GET', 'POST'])
def logout():
session['logged_in'] = False
return render_template('logout.html')
@app.route('/report_filter', methods=['GET', 'POST'])
def report_filter():
# Get facilities list for drop-down menu
facilities_list = db_query('SELECT common_name FROM facilities;', True)
# If a form has been submitted
if request.method == 'POST':
# Validate and pass date
try:
validated_date = validate_date(request.form['filter_date'])
except ValueError:
print("\n\nValueError.args is:", ValueError.args, "\n\n")
flash(ValueError.args)
except TypeError or UnboundLocalError:
flash("You need to enter a date.")
# Not filtering by facility...
if request.form['filter_facility'] == 'none':
moving_query = "SELECT assets.asset_tag, assets.description, f1.location as location1, f2.location as location2, convoys.arrive_dt, convoys.depart_dt" \
" FROM assets" \
" JOIN asset_on ON assets.asset_pk = asset_on.asset_fk" \
" JOIN convoys ON asset_on.asset_fk = convoys.asset_fk" \
" JOIN facilities f1 ON convoys.src_fk = f1.facility_pk" \
" JOIN facilities f2 ON convoys.dst_fk = f2.facility_pk" \
" WHERE convoys.arrive_dt >= '%s' AND convoys.depart_dt <= '%s'" % (validated_date, validated_date)
moving_inventory_data = db_query(moving_query, for_selection=False)
return redirect(url_for('moving_inventory'), date=validated_date, data=moving_inventory_data)
# Filtering by facility...
else:
selected_facility = str(request.form['filter_facility'])
facility_query = "SELECT facilities.fcode, facilities.location, assets.asset_tag, assets.description, asset_at.arrive_dt, asset_at.depart_dt" \
" FROM facilities" \
" JOIN asset_at ON facilities.facility_pk = asset_at.facility_fk" \
" JOIN assets ON asset_at.asset_fk = assets.asset_pk" \
" WHERE facilities.common_name = '%s'" \
" AND asset_at.arrive_dt >= '%s' AND asset_at.depart_dt <= '%s';" % (selected_facility, validated_date, validated_date)
facility_inventory_data = db_query(facility_query, for_selection=False)
return redirect(url_for('facility_inventory'), facility=selected_facility, data=facility_inventory_data)
return render_template('report_filter.html', facilities_list=facilities_list)
@app.route('/facility_inventory', methods=['GET', 'POST'])
def facility_inventory():
return render_template('facility_inventory.html', facility=request.args.get('facility'), date=request.args.get('report_date'))
@app.route('/moving_inventory', methods=['GET', 'POST'])
def moving_inventory():
return render_template('moving_inventory.html', date=request.args.get('report_date'))
# Error Handlers
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
# TODO: Implement other error handlers - http://flask.pocoo.org/docs/0.12/patterns/errorpages/
# Application Deployment
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8080, debug=True)
|
Python
| 0
|
@@ -2684,21 +2684,22 @@
sset_on.
-asset
+convoy
_fk = co
@@ -2704,23 +2704,24 @@
convoys.
-asset_f
+convoy_p
k%22 %5C%0A%09%09%09
@@ -3654,25 +3654,25 @@
t.depart_dt
-%3C
+%3E
= '%25s';%22 %25 (
|
dd5ba84c27947f43c8c437d70e81c935aeadcbdd
|
Fix wrong string escape.
|
kirppu/management/commands/import_old_item_data.py
|
kirppu/management/commands/import_old_item_data.py
|
from collections import defaultdict
from decimal import Decimal
import sys
import re
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.utils.dateparse import parse_datetime
from django.core.management.base import BaseCommand
try:
from typing import Dict, List
except ImportError:
class _AbstractType(object):
def __getitem__(self, item): pass
Dict = _AbstractType()
List = _AbstractType()
from kirppu.models import Item, Vendor
from kirppuauth.models import User
# noinspection SpellCheckingInspection,PyPep8Naming
class PostgreDumpParser(object):
def __init__(self, file_name):
self._file_name = file_name
self._data = defaultdict(list) # type: Dict[str, List[Dict[str, str]]]
@property
def data(self):
return self._data
def parse(self, handle=None):
with (handle or open(self._file_name, "r")) as stream:
current_line_data = None
for line in stream:
if line.endswith("\n"):
line = line[:-1]
if current_line_data is not None:
if line == "\\.":
current_line_data = None
continue
if self.parse_STDIN(line, *current_line_data):
continue
if line.strip() == "":
continue
if line.startswith("COPY "):
current_line_data = self.parse_COPY(line)
@staticmethod
def parse_COPY(line):
m = re.match("COPY (?P<table>[\w_]+) \((?P<columns>(?:\w+, )*\w+)\) FROM stdin;", line)
if m is None:
raise ValueError("Not understood copy: " + line)
table = m.group("table")
columns = m.group("columns").split(", ")
return table, columns
def parse_STDIN(self, line, table, columns):
parts = line.split("\t")
assert len(parts) == len(columns), "Sizes differ: {} != {}: {}".format(len(parts), len(columns), line)
data = {
column: value
for column, value in zip(columns, parts)
}
self._data[table].append(data)
class TypeConverter(object):
@staticmethod
def int(inp):
return int(inp) if inp != "\\N" else None
@staticmethod
def str(inp):
return str(inp) if inp != "\\N" else None
@staticmethod
def bool(inp):
return (True, False)["tf".index(inp)] if inp != "\\N" else None
@staticmethod
def decimal(inp):
return Decimal(inp) if inp != "\\N" else None
@staticmethod
def datetime(inp):
return parse_datetime(inp) if inp != "\\N" else None
ItemColumnTypes = {
"hidden": "bool",
"lost_property": "bool",
"box_id": "int",
"vendor_id": "int",
"code": "str",
"abandoned": "bool",
"type": "str",
"price": "decimal",
"printed": "bool",
"adult": "str",
"itemtype": "str",
"name": "str",
"id": "int",
}
UserColumnTypes = {
"password": "str",
"last_login": "datetime",
"is_superuser": "bool",
"username": "str",
"first_name": "str",
"last_name": "str",
"email": "str",
"is_staff": "bool",
"is_active": "bool",
"date_joined": "datetime",
"phone": "str",
"last_checked": "datetime",
"id": "int",
}
VendorColumnTypes = {
"terms_accepted": "datetime",
"id": "int",
}
# noinspection SpellCheckingInspection
class DbConverter(object):
def __init__(self, table_name):
self._table_name = table_name
self._result = []
def parse(self, data):
p = getattr(self, "_parse_" + self._table_name)
return [
p(row)
for row in data
]
@staticmethod
def _parse_kirppu_item(row):
attrs = {
col: getattr(TypeConverter, ItemColumnTypes[col])(row[col])
for col in [
"hidden",
"lost_property",
"box_id",
"vendor_id",
"code",
"abandoned",
"type",
"price",
"printed",
"adult",
"itemtype",
"name",
]
}
r = Item(**attrs)
return r
@staticmethod
def _parse_kirppuauth_user(row):
attrs = {
col: getattr(TypeConverter, UserColumnTypes[col])(row[col])
for col in [
"password",
"last_login",
"is_superuser",
"username",
"first_name",
"last_name",
"email",
"is_staff",
"is_active",
"date_joined",
"phone",
"last_checked",
]
}
r = User(**attrs)
return r
@staticmethod
def _parse_kirppu_vendor(row):
attrs = {
col: getattr(TypeConverter, VendorColumnTypes[col])(row[col])
for col in [
"terms_accepted",
]
}
r = Vendor(**attrs)
return r
class Command(BaseCommand):
help = r"""Import Item data from PostgreSQL dump from stdin or from a file that has been pre-processed.
Do not use unless you know how this works.
One part of Item-data pre-work: grep -P '^\d+\t[^\t]+\t[^\t]+\t[^\t]+\t\w\w\t\w+\t\w\t\w\t@@@\t.*$'
"""
def add_arguments(self, parser):
parser.add_argument("file", type=str, nargs="?")
def handle(self, *args, **options):
f_name = options.get("file")
if f_name is None:
parser = PostgreDumpParser("stdin")
parser.parse(sys.stdin)
else:
parser = PostgreDumpParser(f_name)
parser.parse()
results = {}
for table, data in parser.data.items():
converter = DbConverter(table)
results[table] = converter.parse(data)
user = results["kirppuauth_user"]
assert len(user) == 1
user = user[0]
vendor = results["kirppu_vendor"]
assert len(vendor) == 1
vendor = vendor[0]
items = results["kirppu_item"]
assert len(items) > 0
with transaction.atomic():
# Create the user if it doesn't exist. user is predefined for exception to use.
try:
user = User.objects.get(username=user.username)
except ObjectDoesNotExist:
user.save()
# Create vendor if it doesn't exist. vendor is predefined for exception to use.
try:
vendor = Vendor.objects.get(user=user)
except ObjectDoesNotExist:
vendor.user = user
vendor.save()
# TODO: Create boxes..
# Create items for the vendor.
for item in items:
item.vendor = vendor
item.save()
for table in results.keys():
# noinspection PyProtectedMember
print("\n".join("{} {}: {}".format(r._meta.object_name, r.pk, str(r)) for r in results[table]))
|
Python
| 0.000302
|
@@ -398,16 +398,28 @@
, item):
+%0A
pass%0A
@@ -1601,16 +1601,17 @@
e.match(
+r
%22COPY (?
@@ -2727,16 +2727,17 @@
e None%0A%0A
+%0A
ItemColu
|
e1cb37a061f1522e027004d5ed2aca572223a4a2
|
Update cooler test utility
|
test/utils.py
|
test/utils.py
|
import h5py
import logging
logger = logging.getLogger(__name__)
def get_cooler_info(file_path):
"""Get information of a cooler file.
Args:
file_path (str): Path to a cooler file.
Returns:
dict: Dictionary containing basic information about the cooler file.
"""
with h5py.File(file_path, 'r') as f:
max_zoom = f.attrs.get('max-zoom')
if max_zoom is None:
logger.info('no zoom found')
raise ValueError(
'The `max_zoom` attribute is missing.'
)
total_length = int(CHROM_CUM_LEN[-1])
max_zoom = f.attrs['max-zoom']
bin_size = int(f[str(max_zoom)].attrs['bin-size'])
max_width = bin_size * TILE_SIZE * 2**max_zoom
info = {
'min_pos': [0.0, 0.0],
'max_pos': [total_length, total_length],
'max_zoom': max_zoom,
'max_width': max_width,
'bins_per_dimension': TILE_SIZE,
}
return info
|
Python
| 0
|
@@ -291,16 +291,57 @@
%22%22%22%0A
+ TILE_SIZE = 256%0A CHROM_CUM_LEN = 0
%0A wit
@@ -611,20 +611,16 @@
ength =
-int(
CHROM_CU
@@ -628,13 +628,8 @@
_LEN
-%5B-1%5D)
%0A
|
173960e50bf4c2b7306b7107aa8f99d60ad385d2
|
update docstatus of documents if docstatus value is changed in workflow
|
frappe/workflow/doctype/workflow/workflow.py
|
frappe/workflow/doctype/workflow/workflow.py
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.model import no_value_fields
class Workflow(Document):
def validate(self):
self.set_active()
self.create_custom_field_for_workflow_state()
self.update_default_workflow_status()
self.validate_docstatus()
def on_update(self):
frappe.clear_cache(doctype=self.document_type)
frappe.cache().delete_key('workflow_' + self.name) # clear cache created in model/workflow.py
def create_custom_field_for_workflow_state(self):
frappe.clear_cache(doctype=self.document_type)
meta = frappe.get_meta(self.document_type)
if not meta.get_field(self.workflow_state_field):
# create custom field
frappe.get_doc({
"doctype":"Custom Field",
"dt": self.document_type,
"__islocal": 1,
"fieldname": self.workflow_state_field,
"label": self.workflow_state_field.replace("_", " ").title(),
"hidden": 1,
"allow_on_submit": 1,
"no_copy": 1,
"fieldtype": "Link",
"options": "Workflow State",
"owner": "Administrator"
}).save()
frappe.msgprint(_("Created Custom Field {0} in {1}").format(self.workflow_state_field,
self.document_type))
def update_default_workflow_status(self):
docstatus_map = {}
states = self.get("states")
for d in states:
if not d.doc_status in docstatus_map:
frappe.db.sql("""
UPDATE `tab{doctype}`
SET `{field}` = %s
WHERE ifnull(`{field}`, '') = ''
AND `docstatus` = %s
""".format(doctype=self.document_type, field=self.workflow_state_field),
(d.state, d.doc_status))
docstatus_map[d.doc_status] = d.state
def validate_docstatus(self):
def get_state(state):
for s in self.states:
if s.state==state:
return s
frappe.throw(frappe._("{0} not a valid State").format(state))
for t in self.transitions:
state = get_state(t.state)
next_state = get_state(t.next_state)
if state.doc_status=="2":
frappe.throw(frappe._("Cannot change state of Cancelled Document. Transition row {0}").format(t.idx))
if state.doc_status=="1" and next_state.doc_status=="0":
frappe.throw(frappe._("Submitted Document cannot be converted back to draft. Transition row {0}").format(t.idx))
if state.doc_status=="0" and next_state.doc_status=="2":
frappe.throw(frappe._("Cannot cancel before submitting. See Transition {0}").format(t.idx))
def set_active(self):
if int(self.is_active or 0):
# clear all other
frappe.db.sql("""UPDATE `tabWorkflow` SET `is_active`=0
WHERE `document_type`=%s""",
self.document_type)
@frappe.whitelist()
def get_fieldnames_for(doctype):
return [f.fieldname for f in frappe.get_meta(doctype).fields \
if f.fieldname not in no_value_fields]
|
Python
| 0
|
@@ -453,32 +453,59 @@
n_update(self):%0A
+%09%09self.update_doc_status()%0A
%09%09frappe.clear_c
@@ -1785,16 +1785,427 @@
.state%0A%0A
+%09def update_doc_status(self):%0A%09%09doc_before_save = self.get_doc_before_save()%0A%09%09for current_doc_state, doc_before_save_state in zip(self.states, doc_before_save.states):%0A%09%09%09if not doc_before_save_state.doc_status == current_doc_state.doc_status:%0A%09%09%09%09frappe.db.set_value(self.document_type,%0A%09%09%09%09%09%7Bself.workflow_state_field: doc_before_save_state.state%7D,%0A%09%09%09%09%09'docstatus',%0A%09%09%09%09%09current_doc_state.doc_status%0A%09%09%09%09)%0A%0A
%09def val
|
ccbae90caafe39a174729bf9b95581c7a05a5554
|
Fix wrong logic in data insertion to Neo4j
|
src/app.py
|
src/app.py
|
from src.common.yaml import Yaml
from src.common.postgres import Postgres
from src.common.path import Path
from src.association_rules.transaction import Transaction
from src.association_rules.fpgrowth import FPGrowth
from src.common.neo4j_driver import Neo4jDriver
if __name__ == "__main__":
path = Path(__file__)
config = None
try:
config = Yaml(path.get_absolute_path("config.yml")).content
except FileNotFoundError:
print("Cannot find config.yml!")
postgres = Postgres(host=config['source']['host'], port=config['source']['port'],
user=config['source']['user'], password=config['source']['password'])
print("Connecting to Postgres...")
try:
postgres.connect(dbname=config['source']['dbname'])
print("OK")
except ConnectionRefusedError:
postgres = None
print("Failed")
if postgres is not None:
print("Querying from Postgres...")
records = postgres.query("SELECT DISTINCT " +
str(config['source']['transaction_column']) + ", " +
str(config['source']['item_column']) +
" FROM " + str(config['source']['table']) +
" ORDER BY " + str(config['source']['transaction_column']))
print("OK")
print("Disconnecting from Postgres...")
postgres.disconnect()
print("OK")
print("Making transactions list...")
transactions = Transaction.list_maker(records)
print("OK")
print("Mining using FP-growth...")
fpgrowth = FPGrowth(transactions, 0.1, 1, 5)
print("OK")
print("Rules created")
print(fpgrowth.no_rules)
# fpgrowth.pretty_print()
# print(fpgrowth.total_lift/fpgrowth.no_rules)
neo4j_driver = Neo4jDriver(host=config['target']['host'], port=config['target']['port'],
user=config['target']['user'], password=config['target']['password'])
print("Connecting to Neo4j database...")
try:
neo4j_driver.connect()
print("OK")
except ConnectionError:
neo4j_driver = None
print("Failed!")
# fpgrowth.pretty_print()
if neo4j_driver is not None:
print("Writing rules to Neo4j database...")
i = 0
for rule in fpgrowth.rules:
antecedents_str = ""
for antecedent in rule.antecedents:
antecedents_str += "'" + str(antecedent) + "',"
antecedents_str = antecedents_str[:-1]
# Create a new set if not existed in database
neo4j_driver.query("MERGE (s:ItemSet {name: {name}, support: {support}})",
{"name": antecedents_str, "support": rule.support})
for antecedent in rule.antecedents:
# Insert node :Item if not existed in database
neo4j_driver.query("MERGE (i:Item {name: {name}})", {"name": str(antecedent)})
# Insert relation between the node and the set
neo4j_driver.query("MATCH (i:Item),(s:ItemSet) WHERE i.name = {iname} AND s.name = {sname} "
"MERGE (i)-[r:OCCURS_IN]->(s) RETURN r",
{"iname": str(antecedent), "sname": antecedents_str})
# Insert node :Item using consequent item if not existed in database
neo4j_driver.query("MERGE (i:Item {name: {name}})", {"name": str(rule.consequent)})
# Create the relation between the node and the set with attributes:
# Consequent, Confidence, Lift
neo4j_driver.query("MATCH (i:Item),(s:ItemSet) WHERE i.name = {iname} AND s.name = {sname} "
"MERGE (i)<-[r:OCCURS_WITH { confidence:{confidence}, lift:{lift} }]-(s) RETURN r",
{"iname": str(rule.consequent), "sname": antecedents_str,
"confidence": rule.confidence, "lift": rule.lift})
print("[" + antecedents_str + "] -> '" + str(rule.consequent) + "'")
neo4j_driver.disconnect()
|
Python
| 0.001203
|
@@ -2286,16 +2286,30 @@
rint()%0A%0A
+ i = 0%0A
@@ -2798,28 +2798,8 @@
ame%7D
-, support: %7Bsupport%7D
%7D)%22,
@@ -2862,33 +2862,8 @@
_str
-, %22support%22: rule.support
%7D)%0A
@@ -3847,33 +3847,32 @@
s.name = %7Bsname%7D
-
%22%0A
@@ -3908,68 +3908,201 @@
i)%3C-
-%5Br:OCCURS_WITH %7B confidence:%7Bconfidence%7D, lift:%7Blift%7D %7D%5D-(s)
+%22%0A %22%5Br:OCCURS_WITH %7B support:%7Bsupport%7D, confidence:%7Bconfidence%7D, lift:%7Blift%7D %7D%5D%22%0A %22-(s)%22%0A %22
RET
@@ -4237,16 +4237,41 @@
+ %22support%22: rule.support,
%22confid
@@ -4330,23 +4330,57 @@
-print(%22
+i += 1%0A print(str(i) + %22.
%5B%22 + ant
@@ -4460,20 +4460,80 @@
driver.disconnect()%0A
+ print(%22Number of rules: %22 + str(fpgrowth.no_rules))%0A
|
2cb9496168766b3985826312298732944df4f018
|
fix similarity_query_interface documentation
|
lexos/interfaces/similarity_query_interface.py
|
lexos/interfaces/similarity_query_interface.py
|
from flask import request, session, render_template, send_file, Blueprint
from lexos.helpers import constants as constants
from lexos.managers import utility, session_manager as session_manager
from lexos.interfaces.base_interface import detect_active_docs
# this is a flask blue print
# it helps us to manage groups of views
# see here for more detail:
# http://exploreflask.com/en/latest/blueprints.html
# http://flask.pocoo.org/docs/0.12/blueprints/
sim_view = Blueprint('sim_query', __name__)
# Tells Flask to load this function when someone is at '/extension'
@sim_view.route("/similarity", methods=["GET", "POST"])
def similarity():
"""Handles the similarity query page functionality. Returns ranked list of
files and their cosine similarities to a comparison document.
:return: a response object (often a render_template call) to flask and
eventually to the browser.
"""
# Detect the number of active documents.
num_active_docs = detect_active_docs()
file_manager = utility.load_file_manager()
encoded_labels = {}
labels = file_manager.get_active_labels()
for i in labels:
encoded_labels[str(i)] = labels[i]
if request.method == 'GET':
# 'GET' request occurs when the page is first loaded
if 'analyoption' not in session:
session['analyoption'] = constants.DEFAULT_ANALYZE_OPTIONS
if 'similarities' not in session:
session['similarities'] = constants.DEFAULT_SIM_OPTIONS
return render_template(
'similarity.html',
labels=labels,
encodedLabels=encoded_labels,
docsListScore="",
docsListName="",
similaritiesgenerated=False,
itm="similarity-query",
numActiveDocs=num_active_docs)
if 'gen-sims' in request.form:
# 'POST' request occur when html form is submitted
# (i.e. 'Get Graphs', 'Download...')
docs_list_score, docs_list_name = utility.generate_similarities(
file_manager)
session_manager.cache_analysis_option()
session_manager.cache_sim_options()
return render_template(
'similarity.html',
labels=labels,
encodedLabels=encoded_labels,
docsListScore=docs_list_score,
docsListName=docs_list_name,
similaritiesgenerated=True,
itm="similarity-query",
numActiveDocs=num_active_docs)
if 'get-sims' in request.form:
# The 'Download Matrix' button is clicked on similarity.html.
session_manager.cache_analysis_option()
session_manager.cache_sim_options()
save_path, file_extension = utility.generate_sims_csv(file_manager)
utility.save_file_manager(file_manager)
return send_file(
save_path,
attachment_filename="similarity-query" + file_extension,
as_attachment=True)
|
Python
| 0.000001
|
@@ -691,16 +691,21 @@
onality.
+%0A%0A
Returns
@@ -719,20 +719,16 @@
list of
-%0A
files a
@@ -771,16 +771,20 @@
mparison
+%0A
documen
@@ -786,17 +786,16 @@
cument.%0A
-%0A
:ret
|
1d7f51cd3366598d238c4c545233d74e7e2fb4d9
|
Remove forgotten debug print
|
atomicapp_builder/cli.py
|
atomicapp_builder/cli.py
|
import argparse
import logging
import tempfile
import shutil
import sys
import atomicapp_builder
from atomicapp_builder.builder import Builder
from atomicapp_builder import constants
from atomicapp_builder import exceptions
from atomicapp_builder import resolver
logger = logging.getLogger(__name__)
def create_parser():
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='action')
build_sp = subparsers.add_parser('build')
build_sp.add_argument(
'--cccp-index',
dest='cccp_index',
help='URI of raw cccp index file (can be file:// for local file), defaults to '
'https://raw.githubusercontent.com/kbsingh/cccp-index/master/index.yml',
default=constants.DEFAULT_CCCP_INDEX)
build_sp.add_argument(
'--build-image',
dest='build_image',
help='Name of image that Dock should use to build images (defaults to "atomic-reactor")',
default='atomic-reactor')
build_sp.add_argument(
'--docker-registry',
dest='docker_registry',
help='URL of Docker registry to poll for existing images and push built images to. '
'Must be without http/https scheme.',
default=None)
build_sp.add_argument(
'--registry-insecure',
dest='registry_insecure',
help='If used, plain http will be used to connect to registry instead of https',
action='store_true',
default=False)
build_sp.add_argument(
'--check-binary-images',
dest='check_binary_images',
# TODO: do we want to check standard means of obtaining the image (e.g. docker.io)?
help='Check whether binary images are obtainable from given registry',
action='store_true',
default=False)
build_sp.add_argument(
'--keep-tmpdir',
dest='keep_tmpdir',
help='Keep tmpdir with sources of all the resolved apps and print a path to it',
action='store_true',
default=False)
# TODO: we would need to be able to specify tags for all built images,
# so we'll have to think of something smarter than just one tag, probably
# build_sp.add_argument(
# '--tag',
# dest='tag',
# help='Tag for the resulting image (app id will be used if tag is not provided)',
# default=None)
log_level_ag = build_sp.add_mutually_exclusive_group()
log_level_ag.add_argument(
'-q', '--quiet',
dest='log_level',
help='Only output names of built images after build is done',
action='store_const',
const=logging.ERROR)
log_level_ag.add_argument(
'-v', '--verbose',
dest='log_level',
help='Print lots of debugging information',
action='store_const',
const=logging.DEBUG)
build_sp.add_argument(
'what',
metavar='PATH | cccp:<app-id>',
help='Path to directory with Nulecule file to build or app id prefixed by "cccp:"')
return parser
def build(args):
# first resolve the images that were already built and that we'll need to build
with TempDir(keep=args['keep_tmpdir']) as tmpdir:
print(tmpdir)
apps = resolver.Resolver(
args['what'],
args['cccp_index'],
args['docker_registry'],
args['registry_insecure'],
tmpdir).resolve()
func_result = 0
for a in apps:
if a.meta_image.built:
logger.info('Meta image for app "{0}" already built.'.format(a.appid))
else:
doing_what = 'Building'
if args['docker_registry']:
doing_what += 'and pushing'
logger.info('{doing} meta image "{mi}" for app "{app}" ...'.
format(doing=doing_what, mi=a.meta_image.imagename, app=a.appid))
bldr = Builder(
args['build_image'],
a.meta_image,
registry=args['docker_registry'],
registry_insecure=args['registry_insecure'],
)
res = bldr.build()
if not res:
func_result = 1
for l in a.meta_image.build_result.build_logs:
logger.debug(l)
logger.info('{doing} meta image "{mi}" for app "{app}" {result}.'.
format(doing=doing_what, mi=a.meta_image.imagename, app=a.appid,
result='succeeded' if res else 'failed')
)
if args['check_binary_images']:
func_result = func_result or _check_binary_images(apps)
if args['keep_tmpdir']:
logger.info('You can find sources of all apps in {0}'.format(tmpdir))
return func_result
def _check_binary_images(apps):
res = 0
for app in apps:
logger.info('Checking for binary images required for app "{0}" ...'.format(app.appid))
for bi in app.binary_images:
if bi.built:
logger.info('Binary image "{0}" exists.'.format(bi.imagename))
else:
logger.error('Binary image "{0}" doesn\'t exist'.format(bi.imagename))
res = 2
return res
class TempDir(object):
"""A context manager that simulates tempfile.TemporaryDirectory, but can be told
to leave the directory untouched even after the context ends.
"""
def __init__(self, suffix='', prefix='tmp', dir=None, keep=False):
self.name = tempfile.mkdtemp(suffix, prefix, dir)
self.keep = keep
def __enter__(self):
return self.name
def __exit__(self, exc, value, tb):
if not self.keep:
shutil.rmtree(self.name)
def run():
parser = create_parser()
args = vars(parser.parse_args())
if args['log_level'] is None:
# TODO: seems that when set_defaults is used on the top level parser directly,
# then then it doesn't propagate to supbarsers; so just check it here
args['log_level'] = logging.INFO
atomicapp_builder.set_logging(args['log_level'])
logger.debug('atomicapp-builder invoked:')
logger.debug('%s', ' '.join(sys.argv))
if args['action'] == 'build':
result = 1
try:
result = build(args)
except exceptions.AtomicappBuilderException as e:
logger.error(e.to_str())
except Exception as e:
logger.exception('Exception while running %s:', sys.argv[0])
sys.exit(result)
|
Python
| 0.000001
|
@@ -3147,30 +3147,8 @@
ir:%0A
- print(tmpdir)%0A
|
b149d3e2be52a9876815b4599164210f086cf0c0
|
update TODO objectives
|
testCaesar.py
|
testCaesar.py
|
import unittest
import Caesar
class TestCryptMethods(unittest.TestCase):
"""Tests for Caesar.py"""
cryptInput = ['encrypt', 'Encrypt', 'decrypt', 'Decrypt', 'blah', 'WHOCARES']
encryptInput = ['foo', 'bar', 'Hello World', '342', '101010111']
decryptInput = ['ktt', 'gfw', 'Mjqqt%\twqi', '897', '656565666']
def setUp(self):
pass
def test_crypt(self):
result = []
for i in range(len(self.cryptInput)):
result.append(Caesar.crypt(self.cryptInput[i]))
self.assertTrue(result[0])
self.assertTrue(result[1])
self.assertFalse(result[2])
self.assertFalse(result[3])
#self.assertRaises
#self.assertRaises
def test_encryption(self):
for i in range(len(encryptInput)):
result[i] = Caesar.crypt(encryptInput[i])
def test_decryption(self):
pass
if __name__ == "__main__":
unittest.main()
|
Python
| 0
|
@@ -233,11 +233,11 @@
', '
-342
+xyz
', '
@@ -292,18 +292,18 @@
jqqt
-%25%5C
+ B
twqi', '
897'
@@ -302,24 +302,24 @@
', '
-897
+cde
', '
-656565666
+101010111
'%5D%0A
@@ -818,27 +818,39 @@
sult
-%5Bi%5D =
+.append(
Caesar.
+en
crypt
-(
+ion(self.
encr
@@ -861,16 +861,68 @@
nput%5Bi%5D)
+)%0A %0A #TODO: test encryption runs appropriately
%0A %0A
@@ -978,16 +978,67 @@
pass
+%0A %0A #TODO: test decryption runs appropriately
%0A%0Aif __n
|
cf4debe97d48d42ac28fb8e2d328a8583e81a007
|
Fix version_info comparison
|
dev/ci.py
|
dev/ci.py
|
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import sys
from .tests import run as run_tests
if sys.version_info > (2, 6):
from .lint import run as run_lint
def run():
"""
Runs the linter and tests
:return:
A bool - if the linter and tests ran successfully
"""
print('Python ' + sys.version.replace('\n', ''))
if sys.version_info > (2, 6):
print('')
lint_result = run_lint()
else:
lint_result = True
print('\nRunning tests')
sys.stdout.flush()
tests_result = run_tests()
return lint_result and tests_result
|
Python
| 0
|
@@ -158,30 +158,31 @@
rsion_info %3E
+=
(2,
-6
+7
):%0A from
@@ -424,14 +424,15 @@
fo %3E
+=
(2,
-6
+7
):%0A
|
18dd46c5008c55f959d1cacbdebe83974c88672e
|
improve fetch_followed_users()
|
app/management/commands/collect_data.py
|
app/management/commands/collect_data.py
|
# coding: utf-8
from concurrent.futures import ThreadPoolExecutor
import logging
import re
import time
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.db import IntegrityError
from retrying import retry
import requests
from app.models import UserRelation
from app.models import RepoStarring
from app.utils_timing import timing_decorator
logger = logging.getLogger('django')
def retry_if_remote_disconnected(exc):
from requests.exceptions import ConnectionError
if isinstance(exc, ConnectionError):
if 'RemoteDisconnected' in str(exc):
logger.info('retry_if_remote_disconnected')
return True
return False
class GitHubCrawler(object):
def __init__(self, token):
self.worker_number = 10
logger.info('worker_number: {0}'.format(self.worker_number))
self.token = token
self.min_stargazers_count = 30
self.session = requests.Session()
self.session.headers = {
'User-Agent': 'Albedo 0.2.0',
'Authorization': 'token {0}'.format(self.token),
}
@retry(retry_on_exception=retry_if_remote_disconnected, wait_fixed=1000 * 60)
def _make_reqeust(self, method, url, **kwargs):
logger.info('make_reqeust: {0} {1}'.format(method, url))
res = self.session.request('GET', url, **kwargs)
if res.status_code == 403:
# https://developer.github.com/v3/#rate-limiting
if 'API rate limit exceeded' in res.json().get('message'):
logger.info('Wait 15 minutes before retrying')
time.sleep(60 * 15)
res = self.session.request('GET', url, **kwargs)
return res
def _parse_total_page(self, res):
try:
link = res.links['last']['url']
except KeyError:
total_page = 0
else:
try:
page_number = re.search('https://api.github.com/[\w\/]+\?page=([\d]+)', link).group(1)
except AttributeError:
raise RuntimeError('Fail to parse the page number')
total_page = int(page_number)
logger.info('total_page: {0}'.format(total_page))
return total_page
def _fetch_pages_concurrently(self, endpoint):
res = self._make_reqeust('GET', endpoint)
total_page = self._parse_total_page(res)
def _fetch_page(page_number):
url = '{0}?page={1}'.format(endpoint, page_number)
res = self._make_reqeust('GET', url, params={'page': page_number})
return res.json()
with ThreadPoolExecutor(max_workers=self.worker_number) as executor:
response_gen = executor.map(_fetch_page, range(1, total_page + 1))
return response_gen
def fetch_user_info(self, username):
endpoint = 'https://api.github.com/users/{0}'.format(username)
res = self._make_reqeust('GET', endpoint)
return res.json()
@timing_decorator
def fetch_followd_users(self, username):
from_user = self.fetch_user_info(username)
try:
UserRelation.create_one(from_user, 'be', from_user)
except IntegrityError:
pass
endpoint = 'https://api.github.com/users/{0}/following'.format(username)
for user_list in self._fetch_pages_concurrently(endpoint):
for to_user in user_list:
try:
UserRelation.create_one(from_user, 'followed', to_user)
except IntegrityError:
continue
@timing_decorator
def fetch_starred_repos(self, username):
from_user = self.fetch_user_info(username)
endpoint = 'https://api.github.com/users/{0}/starred'.format(username)
for repo_list in self._fetch_pages_concurrently(endpoint):
for repo in repo_list:
# these situations could happen!
if not isinstance(repo, dict):
continue
if not repo.get('owner'):
continue
if repo.get('stargazers_count', 0) <= self.min_stargazers_count:
continue
RepoStarring.update_or_create_one(from_user, repo)
if repo['owner']['type'] == 'User':
try:
UserRelation.create_one(from_user, 'starred', repo['owner'])
except IntegrityError:
pass
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('-t', '--token', action='store', dest='token', required=True)
parser.add_argument('-u', '--username', action='store', dest='username', required=True)
def handle(self, *args, **options):
try:
User.objects.create_superuser('albedo', email='', password='hyperion')
except IntegrityError:
pass
github_token = options['token']
github_username = options['username']
self.stdout.write(self.style.SUCCESS('GitHub token: {0}'.format(github_token)))
self.stdout.write(self.style.SUCCESS('GtiHub username: @{0}'.format(github_username)))
self.stdout.write(self.style.SUCCESS('Start data collection'))
crawler = GitHubCrawler(token=github_token)
crawler.fetch_followd_users(github_username)
crawler.fetch_starred_repos(github_username)
related_usernames = UserRelation.objects \
.filter(from_username=github_username) \
.order_by('to_username') \
.values_list('to_username', flat=True) \
.distinct()
user_count = related_usernames.count()
self.stdout.write(self.style.SUCCESS('Total number of related users: {0}'.format(user_count)))
for username in related_usernames.iterator():
if username != github_username:
crawler.fetch_starred_repos(username)
repositories = RepoStarring.objects \
.values_list('repo_full_name', flat=True) \
.distinct()
repo_count = repositories.count()
self.stdout.write(self.style.SUCCESS('Total number of repositories: {0}'.format(repo_count)))
|
Python
| 0.000002
|
@@ -3023,32 +3023,33 @@
def fetch_follow
+e
d_users(self, us
@@ -3046,32 +3046,44 @@
s(self, username
+, fetch_more
):%0A from_
@@ -3593,24 +3593,128 @@
continue
+%0A if fetch_more:%0A self.fetch_followed_users(to_user, fetch_more=False)
%0A%0A @timin
@@ -5476,16 +5476,17 @@
h_follow
+e
d_users(
@@ -5492,32 +5492,49 @@
(github_username
+, fetch_more=True
)%0A crawle
|
22ae21ab43c1f94807e282b7d50987af13a6a9d6
|
Exclude ps1 modules from the TestModules unittest
|
test/units/TestModules.py
|
test/units/TestModules.py
|
# -*- coding: utf-8 -*-
import os
import ast
import unittest
from ansible import utils
class TestModules(unittest.TestCase):
def list_all_modules(self):
paths = utils.plugins.module_finder._get_paths()
paths = [x for x in paths if os.path.isdir(x)]
module_list = []
for path in paths:
for (dirpath, dirnames, filenames) in os.walk(path):
for filename in filenames:
module_list.append(os.path.join(dirpath, filename))
return module_list
def test_ast_parse(self):
module_list = self.list_all_modules()
ERRORS = []
# attempt to parse each module with ast
for m in module_list:
try:
ast.parse(''.join(open(m)))
except Exception, e:
ERRORS.append((m, e))
assert len(ERRORS) == 0, "get_docstring errors: %s" % ERRORS
|
Python
| 0
|
@@ -426,16 +426,119 @@
enames:%0A
+ (path, ext) = os.path.splitext(filename)%0A if ext != %22.ps1%22:%0A
|
b9a318b06ec1c863d599a97dd5bb56ea571f30d5
|
Revise choice of CSS compressor
|
parliament/default_settings.py
|
parliament/default_settings.py
|
import os
DEBUG = True
ADMINS = [
('Michael Mulley', 'michael@michaelmulley.com'),
]
MANAGERS = ADMINS
PROJ_ROOT = os.path.dirname(os.path.realpath(__file__))
CACHE_MIDDLEWARE_KEY_PREFIX = 'parl'
CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
# Set to True to disable functionality where user-provided data is saved
PARLIAMENT_DB_READONLY = False
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'America/Montreal'
# Language code for this installation.
# MUST BE either 'en' or 'fr'
LANGUAGE_CODE = 'en'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.realpath(os.path.join(PROJ_ROOT, '..', '..', 'mediafiles'))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/media/'
STATICFILES_DIRS = [os.path.join(PROJ_ROOT, 'static')]
STATIC_ROOT = os.path.realpath(os.path.join(PROJ_ROOT, '..', '..', 'staticfiles'))
STATIC_URL = '/static/'
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
]
COMPRESS_CSS_FILTERS = [
'compressor.filters.css_default.CssAbsoluteFilter',
'compressor.filters.cssmin.CSSCompressorFilter'
]
COMPRESS_JS_FILTERS = []
COMPRESS_OFFLINE = True
PARLIAMENT_LANGUAGE_MODEL_PATH = os.path.realpath(os.path.join(PROJ_ROOT, '..', '..', 'language_models'))
PARLIAMENT_DISABLE_WORDCLOUD = True
APPEND_SLASH = False
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_AGE = 60*60*24*60 # 60 days
SESSION_COOKIE_SECURE = True
PARLIAMENT_API_HOST = 'api.openparliament.ca'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(PROJ_ROOT, 'templates')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'parliament.accounts.context_processors.auth',
],
},
},
]
MIDDLEWARE_CLASSES = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.common.CommonMiddleware',
'parliament.accounts.middleware.AuthenticatedEmailMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
#'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
#'debug_toolbar.middleware.DebugToolbarMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
'parliament.core.api.FetchFromCacheMiddleware',
]
ROOT_URLCONF = 'parliament.urls'
WSGI_APPLICATION = 'parliament.wsgi.application'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.humanize',
'django.contrib.flatpages',
'django.contrib.sitemaps',
'django.contrib.staticfiles',
'django_extensions',
'haystack',
'sorl.thumbnail',
'compressor',
'parliament.core',
'parliament.accounts',
'parliament.hansards',
'parliament.elections',
'parliament.bills',
'parliament.politicians',
'parliament.activity',
'parliament.alerts',
'parliament.committees',
'parliament.search',
'parliament.text_analysis',
]
THUMBNAIL_SUBDIR = '_thumbs'
THUMBNAIL_PROCESSORS = (
'sorl.thumbnail.processors.colorspace',
'sorl.thumbnail.processors.autocrop',
'parliament.core.thumbnail.crop_first',
'sorl.thumbnail.processors.scale_and_crop',
'sorl.thumbnail.processors.filters',
)
SOUTH_TESTS_MIGRATE = False
TEST_RUNNER = 'parliament.core.test_utils.TestSuiteRunner'
TEST_APP_PREFIX = 'parliament'
LOGGING = {
'version': 1,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(module)s %(levelname)s %(message)s'
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'logging.NullHandler',
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler',
}
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'parliament': {
'handlers': ['console'],
'level': 'WARNING',
}
},
}
|
Python
| 0.000003
|
@@ -1757,21 +1757,15 @@
min.
+r
CSS
-Compressor
+Min
Filt
|
a63746951ee802c5082c34c5c16afe3cbcd8ede4
|
fix last flake8 error
|
doc/source/conf.py
|
doc/source/conf.py
|
# -*- coding: utf-8 -*-
#
# SimPhoNy-Mayavi documentation build configuration file
#
import sys
def mock_modules():
MOCK_MODULES = []
MOCK_TYPES = []
try:
import paraview # noqa
except ImportError:
MOCK_MODULES.extend((
'paraview',
'paraview.simple',
'paraview.servermanager',
'paraview.numpy_support',
'paraview.vtk',
'vtkRenderingPython'))
TYPES = {
mock_type: type(mock_type, bases, {'__module__': path})
for path, mock_type, bases in MOCK_TYPES}
class DocMock(object):
def __init__(self, *args, **kwds):
if '__doc_mocked_name__' in kwds:
self.__docmock_name__ = kwds['__docmocked_name__']
else:
self.__docmock_name__ = 'Unknown'
def __getattr__(self, name):
if name in ('__file__', '__path__'):
return '/dev/null'
else:
return TYPES.get(name, DocMock(__docmock_name__=name))
def __call__(self, *args, **kwards):
return DocMock()
@property
def __name__(self):
return self.__docmock_name__
def __repr__(self):
return '<DocMock.{}>'.format(self.__name__)
sys.modules.update(
(mod_name, Mock(mocked_name=mod_name)) for mod_name in MOCK_MODULES)
print 'mocking modules {} and types {}'.format(MOCK_MODULES, MOCK_TYPES)
# -- General configuration ------------------------------------------------
# check and mock missing modules
mock_modules()
# import the release and version value from the module
from simphony_paraview._version import full_version, version # noqa
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
'sectiondoc.styles.legacy']
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
project = u'SimPhoNy-Paraview'
copyright = u'2015, SimPhoNy FP7 Collaboration'
pygments_style = 'sphinx'
autoclass_content = 'both'
release = version
version = full_version
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# -- Options for HTML output ----------------------------------------------
html_theme = 'alabaster'
html_logo = '_static/simphony_logo.png'
html_static_path = ['_static']
htmlhelp_basename = 'SimPhoNy-ParaviewDoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {}
latex_documents = [(
'index', 'SimPhoNy-Mayavi.tex', u'SimPhoNy-Mayavi Documentation',
u'SimPhoNy FP7 Collaboration', 'manual')]
latex_logo = '_static/simphony_logo.png'
# -- Options for manual page output ---------------------------------------
man_pages = [(
'index', 'simphony', u'SimPhoNy-Mayavi Documentation',
[u'SimPhoNy FP7 Collaboration'], 1)]
# -- Options for Texinfo output -------------------------------------------
texinfo_documents = [(
'index', 'SimPhoNy', u'SimPhoNy-Paraview Documentation',
u'SimPhoNy FP7 Collaboration', 'SimPhoNy-Paraview', 'Visualisation tools',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
epub_title = u'SimPhoNy-Paraview'
epub_author = u'SimPhoNy FP7 Collaboration'
epub_publisher = u'SimPhoNy FP7 Collaboration'
epub_copyright = u'2015, SimPhoNy FP7 Collaboration'
epub_exclude_files = ['search.html']
|
Python
| 0.000001
|
@@ -1329,16 +1329,19 @@
d_name,
+Doc
Mock(moc
|
7f00ff930b70b9c0ca00874d00704cdf540e3939
|
correct errors
|
serial_test.py
|
serial_test.py
|
import datetime
import sys
import pigpio
import time
import math
time_record = int(time.time() * 1000)
time_limit = 50
pi = pigpio.pi()
sensor_message_size = 7
sensor_signal_pin = 4
dead_pin = 17
pi.set_mode(sensor_signal_pin, pigpio.OUTPUT)
h1 = pi.serial_open("/dev/ttyAMA0", 9600)
pi.serial_write_byte(h1, 10 * 2)
pi.write(sensor_signal_pin, pigpio.LOW)
print("start")
sita = 1
try:
while True:
while (int(time.time() * 1000) - time_record) <= time_limit:
time.sleep(0.002)
time_record = int(time.time() * 1000)
distance = []
pi.serial_read(h1) # clear any redauntancy data
pi.write(sensor_signal_pin, pigpio.HIGH)
while pi.serial_data_available(h1) < sensor_message_size - 1:
# print( pi.serial_data_available(h1))
time.sleep(0.0007)
(b, d) = pi.serial_read(h1, sensor_message_size)
pi.write(sensor_signal_pin, pigpio.LOW)
sets = []
for a in d:
sets.append(int(a) / 2.0)
if pi.read(dead_pin) == pigpio.LOW:
print("dead")
if not (abs(sets[2] - sets[1]) > 7 and abs(sets[4] - sets[5]) > 7):
print("SMALLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLER")
if sets[2] < 40:
a = sets[1]+0.5
b = sets[2]
else:
a = sets[5]+0.5
b = sets[4]
c = math.sqrt(a ** 2 + b ** 2 - 2 * a * b * math.cos(math.pi * 25 / 180))
sita = math.acos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c))
ans = a * math.sin(math.pi - sita) / math.sin(sita - math.pi * 25 / 180)
sets[3] = round(ans, 1)
print([sets[0], sets[1], sets[2], sets[3], sets[4], sets[5], sets[6]], round(math.degrees(sita), 1))
# distance = normalize(distance)
except KeyboardInterrupt:
pi.serial_close(h1)
sys.exit(0)
|
Python
| 0.999897
|
@@ -1088,12 +1088,8 @@
if
-not
(abs
@@ -1112,16 +1112,34 @@
1%5D)
-%3E
+%3C
7 and
+sets%5B2%5D %3C 40) or (
abs(
@@ -1161,11 +1161,27 @@
5%5D)
-%3E
+%3C
7
+ and set%5B4%5D %3C 40
):%0A
@@ -1202,46 +1202,15 @@
nt(%22
-SMALLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLER
+%E4%BF%AE%E6%AD%A3FIXED
%22)%0A
|
de039312bc8b356eb1a75f797c2be4d02643f3f2
|
Append "/" to get_step_path_prefix
|
simpleflow/step/workflow.py
|
simpleflow/step/workflow.py
|
import os
import copy
from collections import defaultdict
from .constants import STEP_ACTIVITY_PARAMS_DEFAULT
from .submittable import Step
from .tasks import GetStepsDoneTask
from simpleflow import activity, settings, task
class WorkflowStepMixin(object):
def get_step_bucket(self):
"""
Return the S3 bucket where to store the steps files
"""
return '/'.join((settings.SIMPLEFLOW_S3_HOST, settings.STEP_BUCKET))
def get_step_path_prefix(self):
"""
Return the S3 bucket's path prefix where to store the steps files
"""
return os.path.join(self.get_execution_context().get("workflow_id", "default"), 'steps')
def get_step_activity_params(self):
"""
Returns the params for GetStepsDoneTask and MarkStepAsDone activities
Will be merged with the default ones
"""
return {}
def add_forced_steps(self, steps, reason=None):
"""
Add steps to force
"""
if not hasattr(self, 'steps_forced'):
self.steps_forced = set()
self.steps_forced_reasons = defaultdict(set)
steps = set(steps)
self.steps_forced |= set(steps)
if reason:
for step in steps:
self.steps_forced_reasons[step].add(reason)
def get_forced_steps(self):
return list(getattr(self, 'steps_forced', []))
def add_skipped_steps(self, steps, reason=None):
"""
Add steps to skip
"""
if not hasattr(self, 'steps_skipped'):
self.steps_skipped = set()
self.steps_skipped_reasons = defaultdict(set)
steps = set(steps)
self.steps_skipped |= set(steps)
if reason:
for step in steps:
self.steps_skipped_reasons[step].add(reason)
def get_skipped_steps(self):
return list(getattr(self, 'steps_skipped', []))
def _get_step_activity_params(self):
"""
Returns the merged version between self.get_step_activity_params()
and the default STEP_ACTIVITY_PARAMS_DEFAULT + workflow task list
"""
activity_params_merged = copy.copy(STEP_ACTIVITY_PARAMS_DEFAULT)
if hasattr(self, 'task_list'):
activity_params_merged["task_list"] = self.task_list
activity_params = self.get_step_activity_params()
if activity_params:
activity_params_merged.update(activity_params)
return activity_params_merged
def step(self, *args, **kwargs):
return Step(*args, **kwargs)
def get_steps_done_activity(self):
return task.ActivityTask(activity.Activity(
GetStepsDoneTask,
**self._get_step_activity_params()),
self.get_step_bucket(),
self.get_step_path_prefix())
def get_steps_done(self):
return self.submit(
self.get_steps_done_activity()).result
|
Python
| 0.000001
|
@@ -675,16 +675,17 @@
, 'steps
+/
')%0A%0A
|
86188c5415e82e9a31bae20e672fe3eebbde8767
|
Update print message in gen_jv_xml.py
|
gen_jv_xml.py
|
gen_jv_xml.py
|
#!/usr/bin/python
from sys import argv
import codecs
print "Start."
if len(argv) < 5:
print "Usage : " + argv[0] + " CoordinatesFilename LocationNamePrefix countryName CityName"
print "Example : ./gen_jv_xml.py raw.txt FakePrefix Taiwan Taipei"
exit(0)
currentScript, inputFilename, inputLocationNamePrefix, inputCountry, inputCity = argv
print "currentScript: %r" % currentScript
print "inputFilename: %r" % inputFilename
print "inputLocationNamePrefix: %r" % inputLocationNamePrefix
print "inputCountry: %r" % inputCountry
print "inputCity: %r" % inputCity
print "Opening the rawfile..."
rawfile = codecs.open(inputFilename, 'r', encoding='utf-8')
xmlfile = codecs.open(inputFilename + ".xml.txt", 'w', encoding='utf-8')
currentDataState = 0; # 0:Initialize state; 1: We are now handling departure data; 2: We are now handling destination data;
departureLatList = []
departureLonList = []
destinationLatList = []
destinationLonList = []
# Processing all lines in rawfile
lineCount=0
ignoredLineCount=0
writedLineCount=0
for string in rawfile:
lineCount = lineCount + 1
#print "Processing[%d]:%s" % (lineCount, string)
if (string[0] == '#') :
print "!!!Ignore this line!!![%d]:%s" % (lineCount, string)
ignoredLineCount+=1
continue
# Skip the header (first line)
if (lineCount == 1) :
continue
strtmp1 = string
# Find '('
keyIdxNext = strtmp1.find('(', 0)
while (keyIdxNext != -1):
# Initialize departure/destination state
if (currentDataState == 0) :
currentDataState = 1
startIdx = keyIdxNext + 1
# Find ','
keyIdxEnd = strtmp1.find(',', startIdx)
endIdx = keyIdxEnd
strtmp2 = strtmp1[startIdx:endIdx]
#print "Found latitute:%s" % (strtmp2)
# Save latitute
if (currentDataState == 1) :
departureLatList.append(strtmp2);
elif (currentDataState == 2) :
destinationLatList.append(strtmp2);
# Find ','
keyIdxNext = strtmp1.find(',', endIdx)
startIdx = keyIdxNext + 2
# Find ')'
keyIdxEnd = strtmp1.find(')', startIdx)
endIdx = keyIdxEnd
strtmp2 = strtmp1[startIdx:endIdx]
#print "Found longitute:%s" % (strtmp2)
# Save longitute
if (currentDataState == 1) :
departureLonList.append(strtmp2);
elif (currentDataState == 2) :
destinationLonList.append(strtmp2);
# Try to find next keyIdxNext
keyIdxNext = strtmp1.find('(', endIdx)
# Toggle departure/destination state
if (currentDataState == 1) :
currentDataState = 2
elif (currentDataState == 2) :
currentDataState = 1
# After all lines in rawfile was parsed
for index in range(len(departureLatList)):
depLat = departureLatList[index] + "0"
depLon = departureLonList[index] + "0"
desLat = destinationLatList[index] + "0"
desLon = destinationLonList[index] + "0"
#print "%s,%s\t%s,%s" % (depLat, depLon, desLat, desLon)
# Write xml results to xmlfile
# Prepare datas
locationNameDeparture = inputLocationNamePrefix + "_Departure_" + str(index+1)
locationNameDestination = inputLocationNamePrefix + "_Destination_" + str(index+1)
country = inputCountry
city = inputCity
postalCode=""
street=""
houseNumber=""
crossing=""
# Write departure
print "Writing :[%d]%s(%s,%s), %s(%s,%s)" % (index+1, locationNameDeparture, depLat, depLon, locationNameDestination, desLat, desLon)
xmlfile.write(" <!-- For " + country + "." + city + " JunctionView Test -->\n")
xmlfile.write(" <Location name=\"" + locationNameDeparture + "\">\n")
xmlfile.write(" <address country=\"" + country + "\" city=\"" + city + "\" postalCode=\"" + postalCode + "\" street=\"" + street + "\" houseNumber=\"" + houseNumber + "\" crossing=\"" + crossing + "\" />\n")
xmlfile.write(" <position latitude=\"" + depLat + "\" longitude=\"" + depLon + "\" />\n")
xmlfile.write(" </Location>\n")
# Write destination
xmlfile.write(" <Location name=\"" + locationNameDestination + "\">\n")
xmlfile.write(" <address country=\"" + country + "\" city=\"" + city + "\" postalCode=\"" + postalCode + "\" street=\"" + street + "\" houseNumber=\"" + houseNumber + "\" crossing=\"" + crossing + "\" />\n")
xmlfile.write(" <position latitude=\"" + desLat + "\" longitude=\"" + desLon + "\" />\n")
xmlfile.write(" </Location>\n")
writedLineCount+=1
xmlfile.close()
rawfile.close()
print "writedLineCount:[%d], ignoredLineCount:[%d]" % (writedLineCount, ignoredLineCount)
print "Done."
|
Python
| 0.000001
|
@@ -1172,35 +1172,27 @@
nt %22
-!!!
Ignore
- this
+d
line
-!!!
%5B%25d%5D
-:
+ :
%25s%22
|
4de07bc77efd617acd7629d8c636bb6515d2527e
|
Allow skipping in retrieval
|
mangaki/mangaki/management/commands/retrieve.py
|
mangaki/mangaki/management/commands/retrieve.py
|
from django.core.management.base import BaseCommand, CommandError
from mangaki.utils.anidb import AniDB
from mangaki.models import Anime, Artist
def get_or_create_artist(name):
last, first = name.split()
try:
if Artist.objects.filter(first_name=first, last_name=last).count():
contestants = Artist.objects.filter(first_name=first, last_name=last)
return Artist.objects.get(first_name=first, last_name=last)
elif Artist.objects.filter(first_name=first).count():
contestants = Artist.objects.filter(first_name=first)
return Artist.objects.get(first_name=first)
elif Artist.objects.filter(last_name=last).count():
contestants = Artist.objects.filter(last_name=last)
return Artist.objects.get(last_name=last)
except:
for i, artist in enumerate(contestants):
print('%d: %s' % (i, artist))
answer = int(input('Which one? '))
if answer < len(contestants):
return contestants[answer]
artist = Artist(first_name=first, last_name=last)
artist.save()
return artist
def try_replace(anime, key, artist_name):
print(key, ':', artist_name)
artist = get_or_create_artist(artist_name)
if getattr(anime, key) == artist:
return
answer = input('Remplacer %s par %s ? ' % (getattr(anime, key), artist))
if answer == 'y':
setattr(anime, key, artist)
anime.save()
class Command(BaseCommand):
args = ''
help = 'Retrieve AniDB data'
def handle(self, *args, **options):
category = 'anime';
work_query = 'SELECT mangaki_{category}.work_ptr_id, mangaki_work.id, mangaki_work.title, mangaki_work.poster, mangaki_work.nsfw, COUNT(mangaki_work.id) rating_count FROM mangaki_{category}, mangaki_work, mangaki_rating WHERE mangaki_{category}.work_ptr_id = mangaki_work.id AND mangaki_rating.work_id = mangaki_work.id AND mangaki_{category}.anidb_aid > 0 GROUP BY mangaki_work.id, mangaki_{category}.work_ptr_id HAVING COUNT(mangaki_work.id) >= {min_ratings} ORDER BY {order_by}'
a = AniDB('mangakihttp', 1)
for anime in Anime.objects.raw(work_query.format(category=category, min_ratings=6, order_by='rating_count DESC')):
print(anime.title, anime.id)
creators = a.get(anime.anidb_aid).creators
print(creators)
for creator in creators.findAll('name'):
if creator['type'] == 'Director':
try_replace(anime, 'director', creator.string)
elif creator['type'] == 'Music':
try_replace(anime, 'composer', creator.string)
elif creator['type'] == 'Original Work':
try_replace(anime, 'author', creator.string)
anime.save()
|
Python
| 0.000002
|
@@ -133,24 +133,35 @@
nime, Artist
+%0Aimport sys
%0A%0Adef get_or
@@ -178,24 +178,48 @@
tist(name):%0A
+ if ' ' in name:%0A
last, fi
@@ -237,16 +237,57 @@
split()%0A
+ else:%0A last, first = name, ''%0A
try:
@@ -2195,16 +2195,103 @@
tp', 1)%0A
+ start = int(sys.argv%5B2%5D) if len(sys.argv) %3E 2 else 0 # Skipping%0A i = 0%0A
@@ -2417,22 +2417,81 @@
-print(
+if i %3C start:%0A continue%0A print(i, ':',
anime.ti
@@ -3005,28 +3005,47 @@
anime.save()%0A
+ i += 1%0A
|
8c1a5a04056fe5333f01c0208e1ba98e298838ef
|
Add check option and Fix pattern
|
update-high-use/edit.py
|
update-high-use/edit.py
|
# -*- coding: utf-8 -*-
import argparse
import json
import os
import re
import pymysql
os.environ['PYWIKIBOT_DIR'] = os.path.dirname(os.path.realpath(__file__))
import pywikibot
from config import config_page_name, database # pylint: disable=E0611,W0614
os.environ['TZ'] = 'UTC'
site = pywikibot.Site()
site.login()
config_page = pywikibot.Page(site, config_page_name)
cfg = config_page.text
cfg = json.loads(cfg)
print(json.dumps(cfg, indent=4, ensure_ascii=False))
if not cfg['enable']:
exit('disabled\n')
db = pymysql.connect(host=database['host'],
user=database['user'],
passwd=database['passwd'],
db=database['db'],
charset=database['charset'])
cur = db.cursor()
def get_new_usage(templatename):
cur.execute("""SELECT `count` FROM `{}` WHERE `wiki` = 'zhwiki' AND `title` = %s""".format(database['table']), (templatename))
row = cur.fetchone()
if row is None:
return None
return row[0]
def maintain_doc(text):
text = re.sub(r'<includeonly><!-- 在這裡加入模板的保護標識 --></includeonly>', '', text)
return text
def update(templatename, dry_run, add_template=False):
templatename = pywikibot.Page(site, templatename).title()
print('Checking {}'.format(templatename))
templatedoc = pywikibot.Page(site, '{}/doc'.format(templatename))
if not templatedoc.exists():
print('\t/doc is not exists')
return
if templatedoc.title() in cfg['whitelist']:
print('\tTemplate in whitelist, Skip')
return
new_usage = get_new_usage(templatename)
if new_usage is None:
print('Cannot get new usage')
return
text = templatedoc.text
m = re.search(r'{{\s*(?:High-use|High-risk|高風險模板|高风险模板|U!|High[ _]use)\s*\|\s*([0-9,+]+)\s*(?:\||}})', text, flags=re.I)
if m:
old_usage = m.group(1)
old_usage = re.sub(r'[,+]', '', old_usage)
old_usage = int(old_usage)
diff = (new_usage - old_usage) / old_usage
print('\tUsage: Old: {}, New: {}, Diff: {:+.1f}%'.format(old_usage, new_usage, diff * 100))
if abs(diff) > cfg['diff_limit']:
print('\tUpdate template usage to {}'.format(new_usage))
text = re.sub(r'({{\s*(?:High-use|High-risk|高風險模板|高风险模板|U!|High[ _]use)\s*\|)\s*(?:[0-9,+]+)\s*(\||}})', r'\g<1>{}\g<2>'.format(new_usage), text, flags=re.I)
text = maintain_doc(text)
summary = cfg['summary'].format(new_usage)
pywikibot.showDiff(templatedoc.text, text)
templatedoc.text = text
print('\t', summary)
if not dry_run:
templatedoc.save(summary=summary, minor=False)
else:
print('\tNot reach diff_limit')
elif add_template:
m2 = re.search(r'{{\s*(Template[ _]doc page viewed directly|內聯模板文件|内联模板文件|Template[ _]doc inline|内联模板文档|內聯模板文檔|Documentation[ _]subpage)\s*(\||}})', text)
templatetext = '{{{{High-use|{}}}}}\n'.format(new_usage)
if m2:
text = re.sub(
r'({{\s*(?:Template[ _]doc page viewed directly|內聯模板文件|内联模板文件|Template[ _]doc inline|内联模板文档|內聯模板文檔|Documentation[ _]subpage)\s*(?:\||}}).*\n)',
r'\1{}'.format(templatetext),
text
)
else:
text = templatetext + text
text = maintain_doc(text)
summary = cfg['summary_insert'].format(new_usage)
pywikibot.showDiff(templatedoc.text, text)
templatedoc.text = text
print('\t', summary)
if not dry_run:
templatedoc.save(summary=summary, minor=False)
else:
print('\tCaanot get old usage')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('template', nargs='?')
parser.add_argument('--dry-run', action='store_true', dest='dry_run')
parser.add_argument('--add', action='store_true', dest='add')
parser.set_defaults(dry_run=False, add=False)
args = parser.parse_args()
print(args)
if args.template:
update(args.template, args.dry_run, args.add)
else:
highusetem = pywikibot.Page(site, cfg['highuse_template'])
for page in highusetem.embeddedin(namespaces=[10, 828]):
title = page.title()
if re.search(cfg['skip_titles'], title):
continue
update(title, args.dry_run)
|
Python
| 0
|
@@ -1163,16 +1163,22 @@
dry_run
+=False
, add_te
@@ -1189,16 +1189,29 @@
te=False
+, check=False
):%0A t
@@ -2658,32 +2658,136 @@
if not dry_run:%0A
+ if check and input('Save?').lower() not in %5B'', 'y', 'yes'%5D:%0A return%0A
@@ -3071,24 +3071,36 @@
%7C%7C%7D%7D)', text
+, flags=re.I
)%0A te
@@ -3418,24 +3418,53 @@
text
+,%0A flags=re.I,
%0A
@@ -3741,32 +3741,128 @@
if not dry_run:%0A
+ if check and input('Save?').lower() not in %5B'', 'y', 'yes'%5D:%0A return%0A
temp
@@ -4167,16 +4167,22 @@
rgument(
+'-a',
'--add',
@@ -4215,16 +4215,92 @@
='add')%0A
+ parser.add_argument('-c', '--check', action='store_true', dest='check')%0A
pars
@@ -4335,24 +4335,37 @@
e, add=False
+, check=False
)%0A args =
@@ -4454,16 +4454,24 @@
mplate,
+dry_run=
args.dry
@@ -4481,15 +4481,46 @@
n, a
-rgs.add
+dd_template=args.add, check=args.check
)%0A
@@ -4797,16 +4797,24 @@
(title,
+dry_run=
args.dry
|
5b53a149c2c41448b8a06fed994223d43c87180c
|
Fix breaking script.
|
lenrmc/terminal.py
|
lenrmc/terminal.py
|
import re
import math
from collections import defaultdict
from .studies import Studies
_studies = Studies.db()
class TerminalView(object):
def __init__(self, system):
self._system = system
_desirable = {
'n-transfer': 2,
'stable': 3,
'4He': 1,
'ɣ': -4,
'n': -5,
}
_kwargs = {'selective': True}
def _sort_key(self, line):
desirable = sum(self._desirable.get(n, 0) for n in line.notes)
return line.q_value_kev > 0, desirable, line.q_value_kev
def _reactions(self, line_cls):
reactions = (line_cls(r, **self._kwargs) for r in self._system.reactions())
return sorted(self._filter(reactions), key=self._sort_key, reverse=True)
def _filter(self, reactions):
return reactions
def lines(self, options):
refs = set()
lines = []
line_cls = AsciiTerminalLine if options.ascii else UnicodeTerminalLine
for r in self._reactions(line_cls):
line, _refs = r.terminal(options)
lines.append(line)
refs |= set(_refs)
if refs and options.references:
lines.extend([''] + sorted(refs))
return lines
class StudiesTerminalView(TerminalView):
_not_observed = {'ɣ', 'n'}
_kwargs = {}
def _sort_key(self, reaction):
length = len(reaction._agreements)
sign = 1 if reaction.agreement > 0 else -1
return reaction.agreement, sign * length
def _filter(self, reactions):
for r in reactions:
if r.agreement is None:
continue
if any(n in self._not_observed for n in r.notes):
continue
yield r
class TerminalLine(object):
_notes_template = '{:<55} {:<25}'
def __init__(self, reaction, **kwargs):
self._reaction = reaction
self.q_value_kev = reaction.q_value.kev
self.notes = [self.format(s) for s in reaction.notes]
self._lvalues = reaction.lvalues
self._rvalues = reaction.rvalues
self.references = []
self.marks = []
self._agreements = []
self._add_references(self._lvalues, 'decrease', **kwargs)
self._add_references(self._rvalues, 'increase')
# Cases where a daughter was found in a study
self.agreement = sum(self._agreements) if self._agreements else None
def _spin_and_parity(self, string, values):
spins_and_parities = (n.spin_and_parity for num, n in sorted(values, key=self._sort_key))
spins_and_parities = filter(None, spins_and_parities)
string = '{} {:<20}'.format(string, ', '.join(sorted(spins_and_parities)))
return string
def _add_references(self, values, expected, **kwargs):
selective = kwargs.get('selective')
for result in _studies.isotopes(n.label for num, n in values):
agreement, mark = result.reference_mark(expected)
self._agreements.append(1 if agreement else -1)
if selective and agreement:
continue
self.marks.append(mark)
self.references.append(result.reference_line)
def _add_marks(self, string):
string += ' {}'.format(', '.join(self._format_mark(m) for m in self.marks))
return string
def _format_mark(self, mark):
return '{:>13}'.format(self.format(mark))
def _sort_key(self, pair):
num, n = pair
return n.mass_number, n.label
def _fancy_side(self, delim, side):
isotopes = defaultdict(lambda: 0)
for num, n in side:
isotopes[n] += num
values = []
nuclides = sorted(((num, n) for n, num in isotopes.items()), key=self._sort_key)
for num, n in nuclides:
label = self.format(n.full_label)
string = self._multi_daughter_template.format(num, label) if num > 1 else label
values.append(string)
return ' {} '.format(delim).join(values)
def _add_gamow(self, string):
g = self._reaction.gamow()
if g is None:
return string
return '{} [{:.0f}]'.format(string, g.factor())
def terminal(self, options):
kev = self.q_value_kev
sign = '+' if kev >= 0 else '-'
string = self._reaction_template.format(
self._fancy_side(self._reaction.lvalue_delim, self._lvalues),
self._fancy_side(self._reaction.rvalue_delim, self._rvalues),
kev,
)
if options.gamow:
string = self._add_gamow(string)
if options.notes:
string = self._notes_template.format(string, ', '.join(sorted(self.notes)))
if options.spins:
string = self._spin_and_parity(string, self._lvalues)
string = self._spin_and_parity(string, self._rvalues)
if options.references:
string = self._add_marks(string)
return string.strip(), self.references
class UnicodeTerminalLine(TerminalLine):
_multi_daughter_template = '{}·{}'
_reaction_template = '{} → {} + {:.0f} keV'
def format(self, string):
return string
class AsciiTerminalLine(TerminalLine):
_multi_daughter_template = '{}*{}'
_reaction_template = '{} => {} + {:.0f} keV'
_translated_patterns = [
('→', '->'),
('β', 'B'),
('ε', 'EC'),
('α', 'A'),
('ɣ', 'gamma'),
('ν', 'neutrino'),
('✗', 'x'),
('✓' 'a'),
]
def format(self, string):
for before, after in self._translated_patterns:
string = re.sub(before, after, string)
return string
|
Python
| 0
|
@@ -4165,14 +4165,13 @@
, g.
-factor
+value
())%0A
|
6b5971f1d2d73dc2273a0fc7e167613417156547
|
Allow for highlighting account id's when responding to messages.
|
src/bot.py
|
src/bot.py
|
"""bot.py - implements the FlowBot class, a boilerplate for other bots."""
from .channel_db import ChannelDb
from .server import Server
from .config import Config
import logging
from datetime import datetime
LOG = logging.getLogger(__name__)
class FlowBot(object):
"""A boilerplate for bot development."""
def __init__(self, settings):
"""Initialize the bot with an active flow instance."""
self.config = Config(settings)
self.server = Server(self.config)
self.account_id = self.server.flow.account_id()
self._commands = self._register_commands()
self.channel_db = ChannelDb(self.server, self.config)
@self.server.flow.message
def _handle_message(notification_type, message):
self.handle_message(notification_type, message)
def run(self):
"""Run the bot."""
try:
LOG.info('FlowBot is starting up...')
self.server.flow.process_notifications()
except(KeyboardInterrupt, SystemExit):
LOG.info('FlowBot is shutting down...')
self.server.flow.terminate()
def reply(self, original_message, response_msg):
"""Reply to the original message in the same channel."""
self.message_channel(
channel_id=original_message.get('channelId'),
msg=response_msg
)
def message_channel(self, channel_id, msg):
"""Send a message to the channel."""
self.server.flow.send_message(
cid=channel_id,
oid=self.config.org_id,
msg=msg
)
def message_all_channels(self, msg):
"""Send a message to all this bot's channels."""
for channel_id in self.channels():
self.message_channel(channel_id, msg)
def handle_message(self, notification_type, message):
"""Handle an incoming flow message."""
for m in message.get('regularMessages', []):
self._process_commands(m)
def mentioned(self, message):
"""Determine if this bot was mentioned in the message."""
username_mention = '@' + self.config.username.lower()
return username_mention in message.get('text', '').lower()
def from_admin(self, message):
"""Determine if this message was sent from an admin of the org."""
if message['senderAccountId'] == self.account_id:
return False
for member in self.server.flow.enumerate_channel_members(message['channelId']): # NOQA
if member['accountId'] == message['senderAccountId']:
if member['state'] in ['o', 'a']:
return True
return False
def channels(self):
"""Return the list of channel ids to which this bot belongs."""
channels = self.server.flow.enumerate_channels(self.config.org_id)
return [c['id'] for c in channels]
def _is_author(self, message):
"""Determine if the bot is the author of this message."""
return message['senderAccountId'] == self.account_id
def commands(self):
"""Override this method to provide customer commands.
Returns a dict where the key is the command trigger and the value is
a function which accepts the message as a parameter.
"""
return {}
def _register_commands(self):
"""Register the given commands to this bot.
Expects a dictionary where the key is the command trigger and the value
is the function which processes the message.
"""
commands = []
for commandKey, commandFunc in self.commands().iteritems():
commands.append((commandKey, commandFunc))
return commands
def _process_commands(self, message):
"""Detect and execute commands within the message."""
if not self._is_author(message) and not self._is_old(message):
message_text = message.get('text', '')
for match, command in self._commands:
if match in message_text:
command(message)
def _is_old(self, message):
"""Determine if this is an old message.
Old message age is configured in Config.
"""
if 'creationTime' not in message:
return False
creation_time = datetime.utcfromtimestamp(message['creationTime'] / 1000.0) # NOQA
now_time = datetime.utcnow()
age = now_time - creation_time
return age.seconds > self.config.message_age_limit
|
Python
| 0
|
@@ -1156,16 +1156,32 @@
onse_msg
+, highlight=None
):%0A
@@ -1356,16 +1356,49 @@
onse_msg
+,%0A highlight=highlight
%0A
@@ -1442,24 +1442,40 @@
nnel_id, msg
+, highlight=None
):%0A %22
@@ -1497,32 +1497,66 @@
e to the channel
+, optionally highlight account ids
.%22%22%22%0A sel
@@ -1666,16 +1666,90 @@
msg=msg
+,%0A other_data=%7B'highlighted': highlight%7D if highlight else None
%0A
@@ -1790,16 +1790,32 @@
elf, msg
+, highlight=None
):%0A
@@ -1957,16 +1957,27 @@
_id, msg
+, highlight
)%0A%0A d
|
b42b25ccceeb22ada21ed4be59ee1e7c7d535027
|
Add cmd func for RunnerClient
|
salt/runner.py
|
salt/runner.py
|
# -*- coding: utf-8 -*-
'''
Execute salt convenience routines
'''
# Import python libs
from __future__ import absolute_import, print_function
import logging
# Import salt libs
import salt.exceptions
import salt.loader
import salt.minion
import salt.utils.args
import salt.utils.event
from salt.client import mixins
from salt.output import display_output
from salt.utils.lazy import verify_fun
log = logging.getLogger(__name__)
class RunnerClient(mixins.SyncClientMixin, mixins.AsyncClientMixin, object):
'''
The interface used by the :command:`salt-run` CLI tool on the Salt Master
It executes :ref:`runner modules <all-salt.runners>` which run on the Salt
Master.
Importing and using ``RunnerClient`` must be done on the same machine as
the Salt Master and it must be done using the same user that the Salt
Master is running as.
Salt's :conf_master:`external_auth` can be used to authenticate calls. The
eauth user must be authorized to execute runner modules: (``@runner``).
Only the :py:meth:`master_call` below supports eauth.
'''
client = 'runner'
tag_prefix = 'run'
def __init__(self, opts):
self.opts = opts
@property
def functions(self):
if not hasattr(self, '_functions'):
self._functions = salt.loader.runner(self.opts) # Must be self.functions for mixin to work correctly :-/
return self._functions
def _reformat_low(self, low):
'''
Format the low data for RunnerClient()'s master_call() function
The master_call function here has a different function signature than
on WheelClient. So extract all the eauth keys and the fun key and
assume everything else is a kwarg to pass along to the runner function
to be called.
'''
auth_creds = dict([(i, low.pop(i)) for i in [
'username', 'password', 'eauth', 'token', 'client',
] if i in low])
fun = low.pop('fun')
reformatted_low = {'fun': fun}
reformatted_low.update(auth_creds)
# Support old style calls where arguments could be specified in 'low' top level
if not low.get('args') and not low.get('kwargs'): # not specified or empty
verify_fun(self.functions, fun)
args, kwargs = salt.minion.load_args_and_kwargs(
self.functions[fun],
salt.utils.args.condition_input([], low),
self.opts,
ignore_invalid=True
)
low['args'] = args
low['kwargs'] = kwargs
if 'kwargs' not in low:
low['kwargs'] = {}
if 'args' not in low:
low['args'] = []
reformatted_low['kwarg'] = low
return reformatted_low
def cmd_async(self, low):
'''
Execute a runner function asynchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized to execute runner functions: (``@runner``).
.. code-block:: python
runner.eauth_async({
'fun': 'jobs.list_jobs',
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'pam',
})
'''
reformatted_low = self._reformat_low(low)
return mixins.AsyncClientMixin.cmd_async(self, reformatted_low)
def cmd_sync(self, low, timeout=None):
'''
Execute a runner function synchronously; eauth is respected
This function requires that :conf_master:`external_auth` is configured
and the user is authorized to execute runner functions: (``@runner``).
.. code-block:: python
runner.eauth_sync({
'fun': 'jobs.list_jobs',
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'pam',
})
'''
reformatted_low = self._reformat_low(low)
return mixins.SyncClientMixin.cmd_sync(self, reformatted_low, timeout)
class Runner(RunnerClient):
'''
Execute the salt runner interface
'''
def __init__(self, opts):
super(Runner, self).__init__(opts)
self.returners = salt.loader.returners(opts, self.functions)
self.outputters = salt.loader.outputters(opts)
def print_docs(self):
'''
Print out the documentation!
'''
arg = self.opts.get('fun', None)
docs = super(Runner, self).get_docs(arg)
for fun in sorted(docs):
display_output('{0}:'.format(fun), 'text', self.opts)
print(docs[fun])
# TODO: move to mixin whenever we want a salt-wheel cli
def run(self):
'''
Execute the runner sequence
'''
ret, async_pub = {}, {}
if self.opts.get('doc', False):
self.print_docs()
else:
low = {'fun': self.opts['fun']}
try:
verify_fun(self.functions, low['fun'])
args, kwargs = salt.minion.load_args_and_kwargs(
self.functions[low['fun']],
salt.utils.args.parse_input(self.opts['arg']),
self.opts,
)
low['args'] = args
low['kwargs'] = kwargs
user = salt.utils.get_specific_user()
# Run the runner!
if self.opts.get('async', False):
async_pub = self.async(self.opts['fun'], low, user=user)
# by default: info will be not enougth to be printed out !
log.warn('Running in async mode. Results of this execution may '
'be collected by attaching to the master event bus or '
'by examing the master job cache, if configured. '
'This execution is running under tag {tag}'.format(**async_pub))
return async_pub['jid'] # return the jid
# otherwise run it in the main process
async_pub = self._gen_async_pub()
ret = self._proc_function(self.opts['fun'],
low,
user,
async_pub['tag'],
async_pub['jid'],
False) # Don't daemonize
except salt.exceptions.SaltException as exc:
ret = '{0}'.format(exc)
if not self.opts.get('quiet', False):
display_output(ret, 'nested', self.opts)
return ret
log.debug('Runner return: {0}'.format(ret))
return ret
|
Python
| 0.000003
|
@@ -4061,16 +4061,412 @@
meout)%0A%0A
+ def cmd(self, fun, arg=None, pub_data=None, kwarg=None, print_event=True):%0A '''%0A Execute a function%0A '''%0A return super(RunnerClient, self).cmd(fun,%0A arg,%0A pub_data,%0A kwarg,%0A print_event)%0A%0A
%0Aclass R
|
dba06078985716bda0a0d3a6ab26d0fad73b4c73
|
add a flags parameter to test() to allow passing in during interactive sessions
|
lib/neuroimaging/algorithms/statistics/__init__.py
|
lib/neuroimaging/algorithms/statistics/__init__.py
|
"""
TODO
"""
__docformat__ = 'restructuredtext'
def test(level=1, verbosity=1):
from numpy.testing import NumpyTest
return NumpyTest().test(level, verbosity)
|
Python
| 0
|
@@ -42,16 +42,17 @@
dtext'%0A%0A
+%0A
def test
@@ -76,10 +76,102 @@
ty=1
+, flags=%5B%5D):%0A from neuroimaging.utils.test_decorators import set_flags%0A set_flags(flags
)
-:
%0A
|
2eb72d3b83268f31d0165d8f763c190e007a405f
|
edit submit.py to support testsuite_v1
|
examples/test/submit.py
|
examples/test/submit.py
|
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import collections.abc
import json
import os
import subprocess
import sys
import tempfile
import time
from datetime import timedelta
import logging
def set_logger(name):
log_format = "%(asctime)s - %(levelname)s - %(message)s"
date_format = "%m/%d/%Y %H:%M:%S %p"
logging.basicConfig(filename=f"{name}.cmd.log", level=logging.DEBUG, format=log_format, datefmt=date_format)
class Submitter(object):
def __init__(self, fate_home, work_mode, backend, existing_strategy, spark_submit_config):
self._fate_home = fate_home
self._work_mode = work_mode
self._backend = backend
self._existing_strategy = existing_strategy
self._spark_submit_config = spark_submit_config
@property
def _flow_client_path(self):
return os.path.join(self._fate_home, "../python/fate_flow/fate_flow_client.py")
def set_fate_home(self, path):
self._fate_home = path
return self
def set_work_mode(self, mode):
self._work_mode = mode
return self
def set_backend(self, backend):
self._backend = backend
return self
@staticmethod
def run_cmd(cmd):
logging.info(f"cmd: {' '.join(cmd)}")
subp = subprocess.Popen(cmd,
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = subp.communicate()
return stdout.decode("utf-8")
def submit(self, cmd):
full_cmd = ["python", self._flow_client_path]
full_cmd.extend(cmd)
stdout = self.run_cmd(full_cmd)
try:
stdout = json.loads(stdout)
status = stdout["retcode"]
except json.decoder.JSONDecodeError:
raise ValueError(f"[submit_job]fail, stdout:{stdout}")
if status != 0:
if status == 100 and "table already exists" in stdout["retmsg"]:
return None
raise ValueError(f"[submit_job]fail, status:{status}, stdout:{stdout}")
return stdout
def upload(self, data_path, namespace, name, partition=10, head=1, remote_host=None, backend=None):
if backend is None:
backend = self._backend
conf = dict(
file=data_path,
head=head,
partition=partition,
work_mode=self._work_mode,
table_name=name,
backend=backend,
namespace=namespace
)
with tempfile.NamedTemporaryFile("w") as f:
json.dump(conf, f)
f.flush()
if remote_host:
self.run_cmd(["scp", f.name, f"{remote_host}:{f.name}"])
env_path = os.path.join(self._fate_home, "../../init_env.sh")
upload_cmd = f"source {env_path}"
upload_cmd = f"{upload_cmd} && python {self._flow_client_path} -f upload -c {f.name}"
if self._existing_strategy == 0 or self._existing_strategy == 1:
upload_cmd = f"{upload_cmd} -drop {self._existing_strategy}"
upload_cmd = f"{upload_cmd} && rm {f.name}"
stdout = self.run_cmd(["ssh", remote_host, upload_cmd])
try:
stdout = json.loads(stdout)
status = stdout["retcode"]
except json.decoder.JSONDecodeError:
raise ValueError(f"[submit_job]fail, stdout:{stdout}")
if status != 0:
if status == 100 and "table already exists" in stdout["retmsg"]:
return None
raise ValueError(f"[submit_job]fail, status:{status}, stdout:{stdout}")
return stdout["jobId"]
else:
cmd = ["-f", "upload", "-c", f.name]
if self._existing_strategy == 0 or self._existing_strategy == 1:
cmd.extend(["-drop", f"{self._existing_strategy}"])
stdout = self.submit(cmd)
if stdout is None:
return None
else:
return stdout["jobId"]
def delete_table(self, namespace, name):
pass
def submit_job(self, conf_path, roles, submit_type="train", dsl_path=None, model_info=None, substitute=None):
conf = self.render(conf_path, roles, model_info, substitute)
result = {}
with tempfile.NamedTemporaryFile("w") as f:
json.dump(conf, f)
f.flush()
if submit_type == "train":
stdout = self.submit(["-f", "submit_job", "-c", f.name, "-d", dsl_path])
result['model_info'] = stdout["data"]["model_info"]
else:
stdout = self.submit(["-f", "submit_job", "-c", f.name])
result['jobId'] = stdout["jobId"]
return result
def render(self, conf_path, roles, model_info=None, substitute=None):
with open(conf_path) as f:
d = json.load(f)
if substitute is not None:
d = recursive_update(d, substitute)
d['job_parameters']['work_mode'] = self._work_mode
d['job_parameters']['backend'] = self._backend
d['job_parameters']['spark_submit_config'] = self._spark_submit_config
initiator_role = d['initiator']['role']
d['initiator']['party_id'] = roles[initiator_role][0]
for r in ["guest", "host", "arbiter"]:
if r in d['role']:
for idx in range(len(d['role'][r])):
d['role'][r][idx] = roles[r][idx]
if model_info is not None:
d['job_parameters']['model_id'] = model_info['model_id']
d['job_parameters']['model_version'] = model_info['model_version']
return d
def await_finish(self, job_id, timeout=sys.maxsize, check_interval=3, task_name=None):
deadline = time.time() + timeout
start = time.time()
while True:
stdout = self.submit(["-f", "query_job", "-j", job_id])
status = stdout["data"][0]["f_status"]
elapse_seconds = int(time.time() - start)
date = time.strftime('%Y-%m-%d %X')
if task_name:
log_msg = f"[{date}][{task_name}]{status}, elapse: {timedelta(seconds=elapse_seconds)}"
else:
log_msg = f"[{date}]{job_id} {status}, elapse: {timedelta(seconds=elapse_seconds)}"
if (status == "running" or status == "waiting") and time.time() < deadline:
print(log_msg, end="\r")
time.sleep(check_interval)
continue
else:
print(" " * 60, end="\r") # clean line
print(log_msg)
return status
def recursive_update(d, u):
for k, v in u.items():
if isinstance(v, collections.abc.Mapping):
d[k] = recursive_update(d.get(k, {}), v)
else:
d[k] = v
return d
|
Python
| 0
|
@@ -3366,18 +3366,19 @@
me, %22../
-..
+bin
/init_en
|
1c9f12f808ffa0f1d4f16ea9f35021a83126243f
|
Update test Solr download script to work with default Python 3
|
get-solr-download-url.py
|
get-solr-download-url.py
|
#!/usr/bin/env python
# encoding: utf-8
from __future__ import absolute_import, print_function, unicode_literals
import sys
import requests
# Try to import urljoin from the Python 3 reorganized stdlib first:
try:
from urlparse.parse import urljoin
except ImportError:
from urlparse import urljoin
if len(sys.argv) != 2:
print('Usage: %s SOLR_VERSION' % sys.argv[0], file=sys.stderr)
sys.exit(1)
solr_version = sys.argv[1]
tarball = 'solr-{0}.tgz'.format(solr_version)
dist_path = 'lucene/solr/{0}/{1}'.format(solr_version, tarball)
download_url = urljoin('http://archive.apache.org/dist/', dist_path)
mirror_response = requests.get("http://www.apache.org/dyn/mirrors/mirrors.cgi/%s?asjson=1" % dist_path)
if mirror_response.ok:
mirror_data = mirror_response.json()
download_url = urljoin(mirror_data['preferred'], mirror_data['path_info'])
print(download_url)
|
Python
| 0
|
@@ -222,21 +222,19 @@
from url
-parse
+lib
.parse i
|
f9b7fa7fd7a75c5fb85fae545ed05080a162e967
|
Add sphinx markup to test_input.py
|
test_input.py
|
test_input.py
|
import pandas as pd
import numpy as np
from read_file import input_dataframe
# requires test .csv file containing 2 columns w/ 1 row string header and all below rows float/int
def test_ecg_dataframe_size():
ecg_dataframe = input_dataframe("testfile1.csv")
assert ecg_dataframe.shape[1] == 2
def test_ecg_dataframe_type():
ecg_dataframe = input_dataframe("testfile1.csv")
assert isinstance(ecg_dataframe, pd.DataFrame)
assert isinstance(ecg_dataframe.time[0], np.float64) or isinstance(ecg_dataframe.time[0], np.int64)
assert isinstance(ecg_dataframe.voltage[0], np.float64) or isinstance(ecg_dataframe.voltage[0], np.int64)
def test_exception_nofile():
try:
input_dataframe("")
assert False
except FileNotFoundError:
assert True
def test_exception_nonnumeric_values():
try:
ecg_nonnumeric_dataframe = input_dataframe("test_non_numeric.csv")
pd.to_numeric(ecg_nonnumeric_dataframe['time'])
pd.to_numeric(ecg_nonnumeric_dataframe['voltage'])
assert False
except ValueError:
assert True
def test_exception_empty_file():
assert len(input_dataframe("test_data_empty.csv")) == 0
|
Python
| 0.000004
|
@@ -199,24 +199,109 @@
ame_size():%0A
+ %22%22%22.. function:: test_ecg_dataframe_size()%0A%0A Test size of dataframe.%0A%0A %22%22%22%0A
ecg_data
@@ -409,24 +409,109 @@
ame_type():%0A
+ %22%22%22.. function:: test_ecg_dataframe_type()%0A%0A Test type of dataframe.%0A%0A %22%22%22%0A
ecg_data
@@ -843,24 +843,112 @@
n_nofile():%0A
+ %22%22%22.. function:: test_exception_nofile()%0A%0A Test that file can be found.%0A%0A %22%22%22%0A
try:%0A
@@ -1081,24 +1081,122 @@
c_values():%0A
+ %22%22%22.. function:: test_exception_nonnumeric_values()%0A%0A Test for non-numeric values.%0A %22%22%22%0A
try:%0A
@@ -1476,24 +1476,114 @@
pty_file():%0A
+ %22%22%22.. function:: test_exception_empty_file()%0A%0A Test if dataframe is empty.%0A %22%22%22%0A
assert l
@@ -1629,8 +1629,9 @@
%22)) == 0
+%0A
|
39c86c69e55b89f2e4023c1fe9566f2efb8d33b0
|
Tidy up flag in ISS commit routine
|
hw/ip/otbn/dv/otbnsim/sim/loop.py
|
hw/ip/otbn/dv/otbnsim/sim/loop.py
|
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from typing import Dict, List, Optional
from .constants import ErrBits
from .trace import Trace
class TraceLoopStart(Trace):
def __init__(self, depth: int, iterations: int, bodysize: int):
self.depth = depth
self.iterations = iterations
self.bodysize = bodysize
def trace(self) -> str:
return ("Starting loop {}, {} iterations, bodysize: {}"
.format(self.depth, self.iterations, self.bodysize))
class TraceLoopIteration(Trace):
def __init__(self, depth: int, iteration: int, total: int):
self.depth = depth
self.iteration = iteration
self.total = total
def trace(self) -> str:
return ("Finished iteration {}/{} of loop {}"
.format(self.iteration, self.total, self.depth))
class LoopLevel:
'''An object representing a level in the current loop stack
start_addr is the first instruction inside the loop (the instruction
following the loop instruction). insn_count is the number of instructions
in the loop (and must be positive). restarts is one less than the number of
iterations, and must be non-negative. last_addr is the address of the last
instruction in the loop body.
'''
def __init__(self, start_addr: int, insn_count: int, restarts: int):
assert 0 <= start_addr
assert 0 < insn_count
assert 0 <= restarts
self.loop_count = 1 + restarts
self.restarts_left = restarts
self.start_addr = start_addr
self.last_addr = start_addr + 4 * insn_count - 4
def get_loop_insn_addr(self) -> int:
'''The address of the LOOP or LOOPI instruction.'''
assert self.start_addr >= 4
return self.start_addr - 4
class LoopStack:
'''An object representing the loop stack
The loop stack holds up to 8 LoopLevel objects, corresponding to nested
loops.
'''
stack_depth = 8
def __init__(self) -> None:
self.stack = [] # type: List[LoopLevel]
self.trace = [] # type: List[Trace]
self.err_flag = False
self._pop_stack_on_commit = False
def start_loop(self,
start_addr: int,
loop_count: int,
insn_count: int) -> None:
'''Start a loop.
start_addr is the address of the first instruction in the loop body.
loop_count must be positive and is the number of times to execute the
loop. insn_count must be positive and is the number of instructions in
the loop body.
'''
assert 0 <= start_addr
assert 0 < insn_count
assert 0 < loop_count
depth = len(self.stack)
if depth == LoopStack.stack_depth:
self.err_flag = True
self.trace.append(TraceLoopStart(depth, loop_count, insn_count))
self.stack.append(LoopLevel(start_addr, insn_count, loop_count - 1))
def is_last_insn_in_loop_body(self, pc: int) -> bool:
'''Is pc the last instruction address the current loop body?'''
if not self.stack:
return False
top = self.stack[-1]
return pc == top.last_addr
def check_insn(self, pc: int, insn_affects_control: bool) -> None:
'''Check for branch instructions at the end of a loop body'''
if self.is_last_insn_in_loop_body(pc) and insn_affects_control:
# We're about to execute the last instruction in the loop body.
# Make sure that it isn't a jump, branch or another loop
# instruction.
self.err_flag = True
def step(self, pc: int, warps: Dict[int, int]) -> Optional[int]:
'''Update loop stack. If we should loop, return new PC'''
self._pop_stack_on_commit = False
self.apply_warps(warps)
if self.is_last_insn_in_loop_body(pc):
assert self.stack
top = self.stack[-1]
assert top.restarts_left >= 0
# 1-based iteration number
loop_idx = top.loop_count - top.restarts_left
if not top.restarts_left:
self._pop_stack_on_commit = True
ret_val = None
else:
top.restarts_left -= 1
ret_val = top.start_addr
self.trace.append(TraceLoopIteration(len(self.stack),
loop_idx, top.loop_count))
return ret_val
return None
def err_bits(self) -> int:
return ErrBits.LOOP if self.err_flag else 0
def changes(self) -> List[Trace]:
return self.trace
def commit(self) -> None:
assert not self.err_flag
if self._pop_stack_on_commit:
self.stack.pop()
self.trace = []
def abort(self) -> None:
self.trace = []
self.err_flag = False
def apply_warps(self, warps: Dict[int, int]) -> None:
'''Apply any loop warping specified by warps.
Here, warps maps values for the innermost loop iteration count from
what they are currently to what they should be warped to.
'''
if not self.stack:
return
top = self.stack[-1]
cur_iter_count = top.loop_count - (1 + top.restarts_left)
new_iter_count = warps.get(cur_iter_count)
if new_iter_count is None:
return
assert cur_iter_count <= new_iter_count
assert new_iter_count + 1 <= top.loop_count
top.restarts_left = top.loop_count - new_iter_count - 1
|
Python
| 0
|
@@ -4868,16 +4868,62 @@
ck.pop()
+%0A self._pop_stack_on_commit = False
%0A%0A
|
0d2a8123b064204a5316ec1797faca3a5c30d454
|
use ip address for less characters
|
provisioner/models/machine.py
|
provisioner/models/machine.py
|
import os
import base64
import boto.ec2
# EU-WEST-1: RHEL7 HVM
AMI_ID = 'ami-25158352'
region = 'eu-west-1'
def generate_userdata():
'''Read environment vars and various other things to
get SSH keys, usernames and deploy scripts and inject them into an
instance as a userdata script
'''
ssh_key = os.environ["DEPLOY_SSH_KEY"]
deploy_script_location = os.environ["DEPLOY_SCRIPT_LOCATION"]
with open(deploy_script_location, 'r') as deploy_handle:
deploy_script = deploy_handle.readlines()
deploy_handle.close()
user_data = '''#!/bin/bash
printf '{0}' >> /home/ec2-user/.ssh/authorized_keys;
mkdir -p /opt/code-deploy
curl https://raw.githubusercontent.com/Financial-Times/paasport/master/code-deploy/app/src/main/resources/deploy.sh > /opt/code-deploy/deploy.sh
chmod a+x /opt/code-deploy/deploy.sh
chown -R ec2-user:ec2-user /opt/code-deploy
yum install -y java
echo "all done"
'''.format(ssh_key, deploy_script)
print user_data
return user_data
def create_many(definitions, cluster_id):
return map(format_instance, map(lambda data: create_new(data, cluster_id),
definitions))
def create_new(data, cluster_id):
# Always keep the nursery full
name = data['name'] if 'name' in data else 'unnamed instance'
nursery_instance = create_new_in_nursery(data)
instances = transfer_machine_from_nursery_to_cluster(cluster_id, new_name=name)
if len(instances) == 1:
return instances[0]
raise Exception("No available instances")
def create_new_in_nursery(data):
security_groups = [ 'sg-8a2574ef' ]
connection = boto.ec2.connect_to_region(region)
instance = connection.run_instances(AMI_ID, instance_type='m3.medium',
user_data=generate_userdata(), key_name="LukeBlaney",
security_group_ids=security_groups).instances[0]
connection.create_tags([instance.id], { 'cluster': '__nursery__' })
return instance
def transfer_machine_from_nursery_to_cluster(cluster_id, new_name="clustered_machine"):
connection = boto.ec2.connect_to_region(region)
# TODO: RACE CONDITIONS COULD OCCUR HERE! Need a DLM?
instance = connection.get_only_instances(filters={ 'instance-state-name':
'running', 'tag-key': 'cluster', 'tag-value':
'__nursery__'})[0]
connection.create_tags([instance.id],
tags={ 'cluster': str(cluster_id), 'Name': new_name })
# END OF RACE CONDITION TERRITORY
return [instance]
def get_instances_in_cluster(cluster_id):
connection = boto.ec2.connect_to_region(region)
return map(format_instance, connection.get_only_instances(filters={ 'tag-key': 'cluster',
'tag-value': cluster_id, 'instance-state-name': 'running' }))
def format_instance(instance):
return {
'id': instance.id,
'name': instance.tags['Name'],
# 'hostname': instance.private_ip_address,
'hostname': instance.public_dns_name,
# hardcoded m3.medium
'cpu': 1,
'memory': '4026531840',
'disk': '4',
'region': region,
'metadata': '{}',
'state': instance.state
}
def get_instance_info(instance_ids):
connection = boto.ec2.connect_to_region(region)
instance_states = connection.get_only_instances(instance_ids=instance_ids)
return instances
def delete_instance(instance_id, region):
connection = boto.ec2.connect_to_region(region)
connection.terminate_instances(instance_ids=[instance_id])
return True
|
Python
| 0.000001
|
@@ -2748,23 +2748,18 @@
nce.
-public_dns_name
+ip_address
,%0A#
|
7de37e0ea41054ac86dd6fd5226ff0dd846a5eb1
|
fix inputs
|
detect_tokens.py
|
detect_tokens.py
|
#!/usr/bin/env python
"""
Authentication tokens detection script.
"""
# -*- coding: utf-8 -*-
import signal
import argparse
import sqlite3
import os
import logging
from argparse import RawTextHelpFormatter
from termcolor import colored
from selenium.common.exceptions import TimeoutException
from httplib import BadStatusLine, CannotSendRequest
from urllib2 import URLError
from tldextract import TLDExtract
from authtokens import utils
__author__ = "Andrea Casini"
__license__ = "MIT"
__all__ = ["AuthenticationCrawler", "GhostCrawler"]
__version___ = '1.0.0'
# Logger setup.
FORMAT = '[%(levelname)s %(asctime)s] %(funcName)s: %(message)s'
formatter = logging.Formatter(FORMAT, datefmt='%H:%M:%S')
log = logging.getLogger('authtokenslog')
# Add console handler to print logs in stdout.
console_handler = logging.StreamHandler()
console_handler.setFormatter(formatter)
log.addHandler(console_handler)
log.setLevel(logging.DEBUG)
# TldExtract logger setup.
tld_log = logging.getLogger('tldextract')
tld_log.addHandler(console_handler)
tld_log.setLevel(logging.CRITICAL)
def timeout_handler(s, f):
raise TimeoutException
def main():
description = """
AUTHENTICATION TOKENS DETECTION
What it does
------------
1) Authenticates into given url(s);
2) Collects cookies;
3) Computes authentication token(s);
4) Saves results into a SQlite3 database (see schema.sql).
Usage example
-------------
> python detect_tokens.py -e=user@mail.com -u=username -n=nickname -p=password
-t=0.5 -i=http://example.com
"""
parser = argparse.ArgumentParser(description=description,
formatter_class=RawTextHelpFormatter)
# Inputs
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-f',
dest='filename',
help="path to file containing a list of urls.",
type=argparse.FileType('rt'))
group.add_argument('-i',
dest='url',
help='input url',
type=str)
# Options
parser.add_argument('-e',
dest='email',
help='your email',
required=True,
type=str)
parser.add_argument('-u',
dest='username',
help='your username',
required=True,
type=str)
parser.add_argument('-n',
dest='nickname',
help='your nickname',
type=str)
parser.add_argument('-p',
dest='password',
help='your password',
type=str)
parser.add_argument('-d',
dest='database',
help='output database name in which results are stored',
type=str,
default='cookies.db')
parser.add_argument('-t',
dest='thresh',
help='the authentication threshold',
type=float,
default=.3)
parser.add_argument('-k',
dest='maxtokens',
help='maximum number of authentication tokens to be found',
type=int,
default=None)
parser.add_argument('--ignore-alarm',
dest='ignore',
help='skip any alerts dialog',
action='store_true',
default=False)
parser.add_argument('--timeout',
dest='timeout',
help='maximum time to process a url',
type=int,
default=0)
group = parser.add_argument_group('manual mode')
group.add_argument('--manual',
dest='manual',
help='switch to manual login',
action='store_true',
default=False)
group.add_argument('-s',
dest='timetologin',
help='number of seconds that you have to login',
type=int,
default=30)
try:
args = parser.parse_args()
except IOError, msg:
parser.error(str(msg))
return
# Check if database already exists.
db_is_new = not os.path.exists(args.database)
# Open sqlite3 connection.
with sqlite3.connect(args.database) as conn:
if db_is_new:
log.info('Creating schema.\n')
with open('schema.sql', 'rt') as f:
schema = f.read()
conn.executescript(schema)
else:
log.info('Database exists, assume schema does, too.\n')
cursor = conn.cursor()
# !IMPORTANT Enable foreign key support.
# This is necessary for the delete on cascade queries.
cursor.execute("PRAGMA foreign_keys = ON")
# Start Firefox.
log.info('Starting Firefox.')
firefox = utils.firefox_setup(args.email,
args.username,
args.nickname,
args.password,
args.ignore,
args.thresh)
# Start PhantomJS.
log.info('Starting PhantomJS.\n')
ghost = utils.phantomjs_setup(args.email,
args.username,
args.nickname,
args.thresh)
# Split urls if a file is given.
urls = args.filename.read().split('\n') if args.filename else [args.url]
# Domain extractor (offline mode).
extract = TLDExtract(fetch=False)
for i, url in enumerate(urls):
print('## PROCESSING URL {0} of {1}'.format(i + 1, len(urls)))
if url.startswith('http://') or url.startswith('https://'):
log.info(colored(url, 'blue'))
# Clean up url from spaces.
url = url.replace(' ', '')
# Extract domain from url.
domain = extract(url).domain
log.info("Extracted domain: '{}'".format(domain))
unique_cookies = []
tokens = []
# Errors.
is_auth = False
is_ambiguous = False
# Start a global timer.
signal.signal(signal.SIGALRM, timeout_handler)
signal.alarm(args.timeout)
try:
# Ambiguity check.
if not firefox.is_authenticated(url):
if not args.manual:
log.info(colored('Automatic Mode Active\n', 'magenta'))
is_auth = firefox.authenticate(firefox.current_url)
else:
log.info(colored('Manual Mode Active', 'magenta'))
utils.start_timer(args.timetologin)
is_auth = firefox.is_authenticated(firefox.current_url)
else:
log.critical(colored('Page is ambiguos!\n', 'red'))
is_ambiguous = True
if is_auth and not is_ambiguous:
log.info(colored('Login successful!\n', 'green'))
# Get current url post authentication.
post_auth_url = firefox.current_url
cookies = firefox.get_cookies()
# !IMPORTANT Remove cookies duplicates to
# prevent unexpected behaviour in our
# detection method (see cookies policy).
unique_cookies = utils.delete_duplicates_cookies(cookies)
log.info('{} cookies collected. Detecting authentication tokens.\n'.format(len(unique_cookies)))
# Use the ghost to find authentication tokens.
tokens = ghost.detect_authentication_tokens(
post_auth_url,
unique_cookies,
max_tokens=args.maxtokens)
else:
log.info(colored('Login failed!\n', 'red'))
except (URLError, CannotSendRequest):
log.warning(colored('Connection error!\n', 'red'))
except TimeoutException:
log.warning(colored('Operation timed out!\n', 'red'))
except BadStatusLine:
log.warning(colored('Browser quits unexpectedly!\n', 'red'))
finally:
# Reset timer.
signal.alarm(0)
has_failed = not tokens
# If the analysis has failed do not save any cookies..
if has_failed:
unique_cookies = []
# Create website entry.
website = [domain, url, has_failed]
# Save results into database.
utils.add_entry(cursor, website, unique_cookies, tokens)
# Commit changes.
conn.commit()
else:
log.info("Url '{}' is not valid\n".format(url))
# Quit browsers.
log.info('Quitting browsers.')
firefox.quit()
ghost.quit()
if __name__ == '__main__':
main()
|
Python
| 0.000124
|
@@ -2618,32 +2618,71 @@
your nickname',%0A
+ required=True,%0A
@@ -2809,32 +2809,71 @@
your password',%0A
+ required=True,%0A
|
f0acf7fe8255e376ec602117709789cba4df5ea9
|
add sample data to pconf
|
examples/simple_test.py
|
examples/simple_test.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=line-too-long
#
# Georges Toth (c) 2013-2014 <georges@trypill.org>
# GOVCERT.LU (c) 2013-2017 <info@govcert.etat.lu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import json
import argparse
import datetime
import email.policy
import eml_parser
__author__ = 'Toth Georges'
__email__ = 'georges@trypill.org, georges.toth@govcert.etat.lu'
__copyright__ = 'Copyright 2013-2014 Georges Toth, Copyright 2013-present GOVCERT Luxembourg'
__license__ = 'AGPL v3+'
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime.datetime):
serial = obj.isoformat()
return serial
elif isinstance(obj, email.header.Header):
print(str(obj))
raise
raise TypeError("Type not serializable")
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-i', dest='msgfile',
help='input file', required=True)
parser.add_argument('-d', dest='debug', action='store_true',
help='debug (no hashing)')
parser.add_argument('-r', dest='fulldata', action='store_true',
help='includes raw data of attachments')
parser.add_argument('-w', dest='whitelist_ip',
help='whitelist IPv4 or IPv6 ip from parsing; comma-separated list of IPs, no spaces !')
parser.add_argument('-f', dest='whitelist_email',
help='whitelist an email in routing headers "For"; comma-separated list of e-mail addresses, no spaces !')
parser.add_argument('-b', dest='byhostentry',
help='collect the smtp injector IP using the "by" "host" in routing headers; comma-separated list of IPs, no spaces !')
options = parser.parse_args()
msgfile = options.msgfile
full = options.debug
fulldata = options.fulldata
pconf = {}
if options.whitelist_ip is not None:
pconf['whiteip'] = options.whitelist_ip.split(',')
if options.whitelist_email is not None:
pconf['whitefor'] = options.whitelist_email.split(',')
if options.byhostentry is not None:
pconf['byhostentry'] = options.byhostentry.split(',')
with open(msgfile, 'rb') as fhdl:
raw_email = fhdl.read()
m = eml_parser.eml_parser.decode_email_b(raw_email, include_raw_body=False, include_attachment_data=False, pconf=pconf)
print(json.dumps(m, default=json_serial))
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -2568,16 +2568,141 @@
f = %7B%7D%0A%0A
+ pconf%5B'whiteip'%5D = %5B'192.168.1.1'%5D%0A pconf%5B'whitefor'%5D = %5B'a@example.com'%5D%0A pconf%5B'byhostentry'%5D = %5B'example.com'%5D%0A%0A
if o
|
0be94fb1491179cbb8f9e1bf6682ff6c2db4ea0e
|
Update add method of the visualization modules
|
geomdl/vis.py
|
geomdl/vis.py
|
"""
.. module:: vis
:platform: Unix, Windows
:synopsis: Provides abstract base classes for visualization modules
.. moduleauthor:: Onur Rauf Bingol <orbingol@gmail.com>
"""
import abc
import six
class VisConfigAbstract(six.with_metaclass(abc.ABCMeta, object)):
""" Abstract base class for storing configuration of visualization classes
Uses Python's *Abstract Base Class* implementation to define a base for all visualization configurations
in NURBS-Python package.
"""
def __init__(self, **kwargs):
pass
class VisAbstract(six.with_metaclass(abc.ABCMeta, object)):
""" Abstract base class for visualization (general)
Uses Python's *Abstract Base Class* implementation to define a base for all common visualization options
in NURBS-Python package.
"""
def __init__(self, config=None):
if not isinstance(config, VisConfigAbstract):
raise TypeError("Config variable must be an instance of vis.VisAbstractConfig")
self._config = config
self._plots = []
self._plot_types = {'ctrlpts': 'points', 'evalpts': 'points', 'other': None}
def clear(self):
""" Clears the points, colors and names lists. """
self._plots[:] = []
def add(self, ptsarr=(), name=None, color=None, plot_type=0):
""" Adds points sets to the visualization instance for plotting.
:param ptsarr: control, curve or surface points
:type ptsarr: list, tuple
:param name: name of the point on the legend
:type name: str
:param color: color of the point on the legend
:type color: str
:param plot_type: type of the plot, control points (type = 1) or evaluated points (type = 0)
:type plot_type: int
"""
if ptsarr is None or len(ptsarr) == 0:
return
if not color or not name:
return
# Add points, size, plot color and name on the legend
elem = {'ptsarr': ptsarr, 'name': name, 'color': color, 'type': plot_type}
self._plots.append(elem)
@property
def plot_types(self):
""" Plot types
:getter: Gets the plot types
:type: tuple
"""
return self._plot_types
def set_plot_type(self, plot_type, type_value):
""" Sets the plot type.
The visualization module is mainly designed to plot the control points (*ctrlpts*) and the surface points
(*evalpts*). These are called as *plot types*. However, there is more than one way to plot the control points
and the surface points. For instance, a control points plot can be a scatter plot or a quad mesh, and a
surface points plot can be a scatter plot or a tessellated surface plot.
This function allows you to change the type of the plot, e.g. from scatter plot to tessellated surface plot.
On the other than, some visualization modules also defines some specialized classes for this purpose as it might
not be possible to change the type of the plot at the runtime due to visualization library internal API
differences (i.e. different backends for 2- and 3-dimensional plots).
By default, the following plot types and values are available:
**Curve**:
* For control points (*ctrlpts*): points
* For evaluated points (*evalpts*): points
**Surface**:
* For control points (*ctrlpts*): points, quads, quadmesh
* For evaluated points (*evalpts*): points, quads, triangles
**Volume**:
* For control points (*ctrlpts*): points
* For evaluated points (*evalpts*): points, voxels
:param plot_type: plot type
:type plot_type: str
:param type_value: type value
:type type_value: str
"""
if not isinstance(plot_type, str) or not isinstance(type_value, str):
raise TypeError("Plot type and its value should be string type")
if plot_type not in self._plot_types.keys():
raise KeyError(plot_type + " is not a type. Possible types: " +
", ".join([k for k in self._plot_types.keys()]))
self._plot_types[plot_type] = type_value
@abc.abstractmethod
def render(self, **kwargs):
""" Abstract method for rendering plots of the point sets.
This method must be implemented in all subclasses of ``VisAbstract`` class.
"""
# We need something to plot
if self._plots is None or len(self._plots) == 0:
raise ValueError("Nothing to plot")
# Remaining should be implemented
pass
class VisAbstractSurf(six.with_metaclass(abc.ABCMeta, VisAbstract)):
""" Abstract base class for surface visualization
Implements ``VisAbstract`` class and also uses Python's *Abstract Base Class* implementation to define a base
for **surface** visualization options in NURBS-Python package.
"""
def __init__(self, config=None):
super(VisAbstractSurf, self).__init__(config=config)
self._ctrlpts_offset = 0.0
def set_ctrlpts_offset(self, offset_value):
""" Sets an offset for the control points grid plot.
:param offset_value: offset value
:type offset_value: float
"""
self._ctrlpts_offset = float(offset_value)
@abc.abstractmethod
def render(self, **kwargs):
""" Abstract method for rendering plots of the point sets.
This method must be implemented in all subclasses of ``VisAbstractSurf`` class.
"""
# Calling parent function
super(VisAbstractSurf, self).render()
# Remaining should be implemented
pass
class VisAbstractVol(six.with_metaclass(abc.ABCMeta, VisAbstract)):
""" Abstract base class for volume visualization
Implements ``VisAbstract`` class and also uses Python's *Abstract Base Class* implementation to define a base
for **volume** visualization options in NURBS-Python package.
"""
def __init__(self, config=None):
super(VisAbstractVol, self).__init__(config=config)
@abc.abstractmethod
def render(self, **kwargs):
""" Abstract method for rendering plots of the point sets.
This method must be implemented in all subclasses of ``VisAbstractVol`` class.
"""
# Calling parent function
super(VisAbstractVol, self).render()
# Remaining should be implemented
pass
|
Python
| 0
|
@@ -1121,16 +1121,17 @@
, 'other
+s
': None%7D
@@ -1269,11 +1269,19 @@
sarr
-=()
+, plot_type
, na
@@ -1303,21 +1303,8 @@
None
-, plot_type=0
):%0A
@@ -1410,26 +1410,21 @@
trol
-, curve or surface
+ or evaluated
poi
@@ -1480,45 +1480,70 @@
ram
-name: name of the point on the legend
+plot_type: type of the plot, e.g. ctrlpts, evalpts, bbox, etc.
%0A
@@ -1553,19 +1553,24 @@
:type
-nam
+plot_typ
e: str%0A
@@ -1587,33 +1587,40 @@
ram
-color: color of the point
+name: name of the plot displayed
on
@@ -1644,21 +1644,20 @@
:type
-color
+name
: str%0A
@@ -1673,93 +1673,25 @@
ram
-plot_type: type of the
+color:
plot
-,
co
-ntrol points (type = 1) or evaluated points (type = 0)
+lor
%0A
@@ -1705,22 +1705,18 @@
ype
-plot_type: int
+color: str
%0A
@@ -1736,18 +1736,17 @@
-if
+#
ptsarr
is N
@@ -1745,88 +1745,88 @@
arr
-is None or len(ptsarr) == 0:%0A return%0A if not color or not name
+can be a list, a tuple or an array%0A if ptsarr is None or len(ptsarr) == 0
:%0A
|
5b626f65e3926ac34b579978303eb061fa33a09c
|
Change features widget to be a multiple checkbox.
|
us_ignite/apps/forms.py
|
us_ignite/apps/forms.py
|
from django import forms
from django.contrib.auth.models import User
from django.core.validators import validate_email
from django.forms.models import inlineformset_factory
from us_ignite.apps.models import (Application, ApplicationURL,
ApplicationImage)
def _get_status_choices():
"""Returns a list of valid user status for the ``Application``"""
available_status = [
Application.PUBLISHED,
Application.DRAFT,
Application.PRIVATE,
]
is_valid_status = lambda x: x[0] in available_status
return filter(is_valid_status, Application.STATUS_CHOICES)
class ApplicationForm(forms.ModelForm):
"""Model form for the ``Application`` with whitelisted fields."""
status = forms.ChoiceField(
choices=_get_status_choices(), initial=Application.DRAFT)
summary = forms.CharField(
max_length=140, widget=forms.Textarea,
help_text='Tweet-length pitch / summary of project.')
class Meta:
model = Application
fields = ('name', 'summary', 'impact_statement', 'description',
'image', 'domain', 'features', 'stage', 'roadmap',
'assistance', 'team_description', 'acknowledgments',
'tags', 'status',)
ApplicationLinkFormSet = inlineformset_factory(
Application, ApplicationURL, max_num=3, extra=3)
ApplicationImageFormSet = inlineformset_factory(
Application, ApplicationImage, max_num=10, extra=1)
def validate_member(email):
"""Validates the user has a valid email and it is registered."""
try:
validate_email(email)
except forms.ValidationError:
raise forms.ValidationError(
'``%s`` is an invalid email address.' % email)
try:
return User.objects.get(email=email)
except User.DoesNotExist:
raise forms.ValidationError(
'User with ``%s`` email is not registered.' % email)
class MembershipForm(forms.Form):
"""Form to validate the collaborators."""
collaborators = forms.CharField(
widget=forms.Textarea, help_text='Add registered users as '
'collaborators for this app. One email per line.')
def clean_collaborators(self):
"""Validates the payload is a list of registered usernames."""
collaborators_raw = self.cleaned_data.get('collaborators')
if collaborators_raw:
member_list = []
collaborator_list = [c for c in collaborators_raw.splitlines() if c]
for collaborator in collaborator_list:
collaborator = collaborator.strip()
member = validate_member(collaborator)
member_list.append(member)
return member_list
|
Python
| 0
|
@@ -1266,16 +1266,102 @@
atus',)%0A
+ widgets = %7B%0A 'features': forms.CheckboxSelectMultiple(),%0A %7D%0A
%0A%0AApplic
|
6143888a2fa396b868ed44c3ceab765a95abea45
|
Check function must always use all parameters.
|
tests/constraint_tests.py
|
tests/constraint_tests.py
|
from unittest import TestCase
from skypyblue.core import ConstraintSystem
from skypyblue.models import Method, Constraint, Strength
try:
from unittest.mock import MagicMock as Mock
except ImportError as e:
from mock import Mock
class ConstraintTests(TestCase):
def setUp(self):
self.cs = ConstraintSystem()
self.vars = self.cs.create_variables(["v1", "v2", "v3"], [4,5,3])
self.v1, self.v2, self.v3 = self.vars
m1_2 = Method(self.v1, self.v2, lambda x: x // 2)
m1_3 = Method(self.v1, self.v3, lambda x: x // 3)
self.cn = Constraint(lambda: True, Strength.STRONG, self.vars, [m1_3, m1_2])
self.cs.add_constraint(self.cn)
def tearDown(self):
pass
def test_adding_enforced_to_pplan(self):
self.cn.is_enforced = Mock(return_value = True)
self.assertIsNone(self.cn.mark)
mark = self.cs.marker.new_mark()
pplan = self.cn.add_to_pplan([], mark)
self.assertEqual([self.cn], pplan)
self.assertEqual(mark, self.cn.mark)
def test_adding_unenforced_to_pplan(self):
self.cn.is_enforced = Mock(return_value = False)
self.assertIsNone(self.cn.mark)
pplan = self.cn.add_to_pplan([], self.cs.marker.new_mark())
self.assertEqual([], pplan)
self.assertIsNone(self.cn.mark)
def test_adding_with_the_same_mark(self):
self.cn.is_enforced = Mock(return_value = True)
mark = self.cs.marker.new_mark()
self.cn.mark = mark
pplan = self.cn.add_to_pplan([], mark)
self.assertEqual([], pplan)
self.assertEqual(mark, self.cn.mark)
def test_adding_with_other_mark(self):
self.cn.is_enforced = Mock(return_value = True)
mark1 = self.cs.marker.new_mark()
mark2 = self.cs.marker.new_mark()
self.cn.mark = mark1
pplan = self.cn.add_to_pplan([], mark2)
self.assertEqual([self.cn], pplan)
self.assertEqual(mark2, self.cn.mark)
|
Python
| 0
|
@@ -380,10 +380,12 @@
%5B4,
+
5,
+
3%5D)%0A
@@ -566,16 +566,27 @@
t(lambda
+ v1, v2, v3
: True,
|
d3c10b103bd6c13fe27e999ae6145bce76e63ff4
|
add etree convenience function
|
server/util.py
|
server/util.py
|
# -*- coding: ISO-8859-15 -*-
# =================================================================
#
# $Id$
#
# Authors: Tom Kralidis <tomkralidis@hotmail.com>
#
# Copyright (c) 2010 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
from lxml import etree
import config
def get_today_and_now():
''' Get the date, right now, in ISO8601 '''
import time
return time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime())
def get_version_integer(version):
''' Get an integer of the OGC version valud x.y.z '''
if version is not None: # split and make integer
xyz = version.split('.')
if len(xyz) != 3:
return -1
try:
return int(xyz[0]) * 10000 + int(xyz[1]) * 100 + int(xyz[2])
except Exception, err:
raise RuntimeError('%s' % str(err))
else: # not a valid version string
return -1
def nspath_eval(xpath):
''' Return an etree friendly xpath '''
out = []
for chunks in xpath.split('/'):
namespace, element = chunks.split(':')
out.append('{%s}%s' % (config.NAMESPACES[namespace], element))
return '/'.join(out)
def xmltag_split(tag):
''' Return XML element bare tag name (without prefix) '''
return tag.split('}')[1]
def bbox2wkt(bbox):
''' Return OGC WKT Polygon of a simple bbox string '''
tmp = bbox.split(',')
minx = float(tmp[0])
miny = float(tmp[1])
maxx = float(tmp[2])
maxy = float(tmp[3])
return 'POLYGON((%.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f, %.2f %.2f))' \
% (minx, miny, minx, maxy, maxx, maxy, maxx, miny, minx, miny)
def query_not_bbox(bbox_data, bbox_input):
''' perform spatial disjoint query '''
if bbox_input is None:
return 'false'
if bbox_data is None:
return 'true'
from shapely.wkt import loads
bbox1 = loads(bbox2wkt(bbox_data))
bbox2 = loads(bbox2wkt(bbox_input))
if bbox1.disjoint(bbox2) is True:
return 'true'
else:
return 'false'
def query_bbox(bbox_data, bbox_input):
''' perform spatial intersects query '''
if bbox_data is None or bbox_input is None:
return 'false'
from shapely.wkt import loads
bbox1 = loads(bbox2wkt(bbox_data))
bbox2 = loads(bbox2wkt(bbox_input))
if bbox1.intersects(bbox2) is True:
return 'true'
else:
return 'false'
def query_anytext(xml, searchterm):
''' perform fulltext search against XML '''
exml = etree.fromstring(xml)
for element in exml.xpath('//text()'): # all elements
if element.lower().find(searchterm.lower()) != -1:
return 'true'
for att in exml.xpath('//attribute::*'): # all attributes
if att.lower().find(searchterm.lower()) != -1:
return 'true'
return 'false'
def query_xpath(xml, xpath_in, searchterm, matchcase=0):
''' perform search against XPath '''
exml = etree.fromstring(xml)
for xpath in exml.xpath(xpath_in, namespaces=config.NAMESPACES):
if matchcase == 1:
if xpath.text == searchterm:
return 'true'
else:
if xpath.text.lower() == searchterm.lower():
return 'true'
return 'false'
|
Python
| 0
|
@@ -1966,16 +1966,318 @@
urn -1%0A%0A
+def find_exml(val, attrib=False):%0A ''' Test that the XML value exists, return value, else return None '''%0A if val is not None:%0A if attrib == True: # it's an XML attribute%0A return val%0A else: # it's an XML value%0A return val.text%0A else:%0A return None%0A%0A
def nspa
|
f52a19fe28fa84b3d83ab20998fd678c795490dc
|
Remove generated code
|
lexgen/__init__.py
|
lexgen/__init__.py
|
__author__ = 'David'
|
Python
| 0.000001
|
@@ -1,21 +1 @@
-__author__ = 'David'
%0A
|
1aec0b6e6e8ada20d49e09b679406c9ad4e0f8c0
|
Fix /users/me API test (no redirect)
|
users/tests/test_api.py
|
users/tests/test_api.py
|
from django.core.urlresolvers import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from ..models import User
class UserTest(APITestCase):
"""Tests for /users/ API endpoints."""
def test_view_user_logged_out(self):
user = User.objects.create(name="Trey", email="trey@example.com")
url = reverse('user-detail', args=[user.pk])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
'id': user.id,
'name': user.name,
})
def test_same_user(self):
user = User.objects.create(name="Trey", email="trey@example.com")
url = reverse('user-detail', args=[user.pk])
self.client.force_authenticate(user=user)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
'id': user.id,
'name': user.name,
'email': user.email,
})
def test_different_user(self):
user1 = User.objects.create(name="User1", email="user1@example.com")
user2 = User.objects.create(name="User2", email="user2@example.com")
url = reverse('user-detail', args=[user1.pk])
self.client.force_authenticate(user=user2)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, {
'id': user1.id,
'name': user1.name,
})
def test_me_logged_out(self):
url = reverse('user-detail', args=['me'])
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_me_logged_in(self):
user = User.objects.create(name="Trey", email="trey@example.com")
url = reverse('user-detail', args=['me'])
full_user_url = "http://testserver{}".format(
reverse('user-detail', args=[user.pk]))
self.client.force_authenticate(user=user)
response = self.client.get(url, format='json')
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.assertEqual(response['location'], full_user_url)
|
Python
| 0
|
@@ -2000,114 +2000,8 @@
'%5D)%0A
- full_user_url = %22http://testserver%7B%7D%22.format(%0A reverse('user-detail', args=%5Buser.pk%5D))%0A
@@ -2164,17 +2164,14 @@
TTP_
-302_FOUND
+200_OK
)%0A
@@ -2205,33 +2205,115 @@
onse
-%5B'location'%5D, full_user_url
+.data, %7B%0A 'id': user.id,%0A 'name': user.name,%0A 'email': user.email,%0A %7D
)%0A
|
5ffebc701faa0230b3bda31ad356f2261e711c21
|
correct shipping address
|
lfs/order/utils.py
|
lfs/order/utils.py
|
# lfs imports
import lfs.voucher.utils
from lfs.cart import utils as cart_utils
from lfs.core.signals import order_submitted
from lfs.customer import utils as customer_utils
from lfs.order.models import Order
from lfs.order.models import OrderItem
from lfs.payment import utils as payment_utils
from lfs.shipping import utils as shipping_utils
from lfs.voucher.models import Voucher
def add_order(request):
"""Adds an order based on current cart for the current customer.
It assumes that the customer is prepared with all needed information. This
is within the responsibility of the checkout form.
"""
customer = customer_utils.get_customer(request)
order = None
invoice_address = customer.selected_invoice_address
if customer.selected_shipping_address:
shipping_address = customer.selected_shipping_address
else:
shipping_address = customer.selected_invoice_address
cart = cart_utils.get_cart(request)
if cart is None:
return order
cart_costs = cart_utils.get_cart_costs(request, cart, total=False)
shipping_method = shipping_utils.get_selected_shipping_method(request)
shipping_costs = shipping_utils.get_shipping_costs(request, shipping_method)
payment_method = payment_utils.get_selected_payment_method(request)
payment_costs = payment_utils.get_payment_costs(request, payment_method)
# Set email dependend on login state. An anonymous customer doesn't have a
# django user account, so we set the name of the invoice address to the
# customer name.
# Note: After this has been processed the order's customer email has an
# email in any case. That means you can use it to send emails to the
# customer.
if request.user.is_authenticated():
user = request.user
customer_email = user.email
else:
user = None
customer_email = customer.selected_invoice_address.email
# Calculate the totals
price = cart_costs["price"] + shipping_costs["price"] + payment_costs["price"]
tax = cart_costs["tax"] + shipping_costs["tax"] + payment_costs["tax"]
# Add voucher if one exists
try:
voucher_number = lfs.voucher.utils.get_current_voucher_number(request)
voucher = Voucher.objects.get(number=voucher_number)
except Voucher.DoesNotExist:
voucher = None
else:
is_voucher_effective, voucher_message = voucher.is_effective(cart)
if is_voucher_effective:
voucher_number = voucher.number
voucher_price = voucher.get_price_gross(cart)
voucher_tax = voucher.get_tax(cart)
price -= voucher_price
tax -= voucher_tax
else:
voucher = None
order = Order.objects.create(
user = user,
session = request.session.session_key,
price = price,
tax = tax,
customer_firstname = customer.selected_invoice_address.firstname,
customer_lastname = customer.selected_invoice_address.lastname,
customer_email = customer_email,
shipping_method = shipping_method,
shipping_price = shipping_costs["price"],
shipping_tax = shipping_costs["tax"],
payment_method = payment_method,
payment_price = payment_costs["price"],
payment_tax = payment_costs["tax"],
invoice_firstname = customer.selected_invoice_address.firstname,
invoice_lastname = customer.selected_invoice_address.lastname,
invoice_line1 = invoice_address.postal_address.line1,
invoice_line2 = invoice_address.postal_address.line2,
invoice_line3 = invoice_address.postal_address.line3,
invoice_line4 = invoice_address.postal_address.line4,
invoice_line5 = invoice_address.postal_address.line5,
invoice_country = invoice_address.postal_address.country,
invoice_phone = customer.selected_invoice_address.phone,
shipping_firstname = customer.selected_shipping_address.firstname,
shipping_lastname = customer.selected_shipping_address.lastname,
shipping_line1 = shipping_address.postal_address.line1,
shipping_line2 = shipping_address.postal_address.line2,
shipping_line3 = shipping_address.postal_address.line3,
shipping_line4 = shipping_address.postal_address.line4,
shipping_line5 = shipping_address.postal_address.line5,
shipping_country = shipping_address.postal_address.country,
shipping_phone = customer.selected_shipping_address.phone,
message = request.POST.get("message", ""),
)
requested_delivery_date = request.POST.get("requested_delivery_date", None)
if requested_delivery_date is not None:
order.requested_delivery_date = requested_delivery_date
order.save()
if voucher:
voucher.mark_as_used()
order.voucher_number = voucher_number
order.voucher_price = voucher_price
order.voucher_tax = voucher_tax
order.save()
# Copy bank account if one exists
if customer.selected_bank_account:
bank_account = customer.selected_bank_account
order.account_number = bank_account.account_number
order.bank_identification_code = bank_account.bank_identification_code
order.bank_name = bank_account.bank_name
order.depositor = bank_account.depositor
order.save()
# Copy cart items
for cart_item in cart.cartitem_set.all():
OrderItem.objects.create(
order=order,
price_net = cart_item.get_price_net(),
price_gross = cart_item.get_price_gross(),
tax = cart_item.get_tax(),
product = cart_item.product,
product_sku = cart_item.product.sku,
product_name = cart_item.product.get_name(),
product_amount=cart_item.amount,
product_price_net = cart_item.product.get_price_net(),
product_price_gross = cart_item.product.get_price_gross(),
product_tax = cart_item.product.get_tax(),
)
cart_item.product.decrease_stock_amount(cart_item.amount)
cart.delete()
order_submitted.send({"order" : order, "request" : request})
# Note: Save order for later use in thank you page. The order will be
# removed from the session if the thank you page has been called.
request.session["order"] = order
return order
|
Python
| 0.000002
|
@@ -4473,34 +4473,16 @@
phone =
-customer.selected_
shipping
|
57dee4545b3c34d1e66943def8d5e45ee95d66bd
|
Make sure cmd is UTF8 encoded before splitting. Fixes regression introduced by 80df2135e903bc167b70cd1a45e8d4eb803e87ed.
|
lib/ansible/runner/shell_plugins/powershell.py
|
lib/ansible/runner/shell_plugins/powershell.py
|
# (c) 2014, Chris Church <chris@ninemoreminutes.com>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import base64
import os
import re
import random
import shlex
import time
_common_args = ['PowerShell', '-NoProfile', '-NonInteractive']
# Primarily for testing, allow explicitly specifying PowerShell version via
# an environment variable.
_powershell_version = os.environ.get('POWERSHELL_VERSION', None)
if _powershell_version:
_common_args = ['PowerShell', '-Version', _powershell_version] + _common_args[1:]
def _escape(value, include_vars=False):
'''Return value escaped for use in PowerShell command.'''
# http://www.techotopia.com/index.php/Windows_PowerShell_1.0_String_Quoting_and_Escape_Sequences
# http://stackoverflow.com/questions/764360/a-list-of-string-replacements-in-python
subs = [('\n', '`n'), ('\r', '`r'), ('\t', '`t'), ('\a', '`a'),
('\b', '`b'), ('\f', '`f'), ('\v', '`v'), ('"', '`"'),
('\'', '`\''), ('`', '``'), ('\x00', '`0')]
if include_vars:
subs.append(('$', '`$'))
pattern = '|'.join('(%s)' % re.escape(p) for p, s in subs)
substs = [s for p, s in subs]
replace = lambda m: substs[m.lastindex - 1]
return re.sub(pattern, replace, value)
def _encode_script(script, as_list=False):
'''Convert a PowerShell script to a single base64-encoded command.'''
script = '\n'.join([x.strip() for x in script.splitlines() if x.strip()])
encoded_script = base64.b64encode(script.encode('utf-16-le'))
cmd_parts = _common_args + ['-EncodedCommand', encoded_script]
if as_list:
return cmd_parts
return ' '.join(cmd_parts)
def _build_file_cmd(cmd_parts):
'''Build command line to run a file, given list of file name plus args.'''
return ' '.join(_common_args + ['-ExecutionPolicy', 'Unrestricted', '-File'] + ['"%s"' % x for x in cmd_parts])
class ShellModule(object):
def env_prefix(self, **kwargs):
return ''
def join_path(self, *args):
return os.path.join(*args).replace('/', '\\')
def path_has_trailing_slash(self, path):
# Allow Windows paths to be specified using either slash.
return path.endswith('/') or path.endswith('\\')
def chmod(self, mode, path):
return ''
def remove(self, path, recurse=False):
path = _escape(path)
if recurse:
return _encode_script('''Remove-Item "%s" -Force -Recurse;''' % path)
else:
return _encode_script('''Remove-Item "%s" -Force;''' % path)
def mkdtemp(self, basefile, system=False, mode=None):
basefile = _escape(basefile)
# FIXME: Support system temp path!
return _encode_script('''(New-Item -Type Directory -Path $env:temp -Name "%s").FullName | Write-Host -Separator '';''' % basefile)
def md5(self, path):
path = _escape(path)
script = '''
If (Test-Path -PathType Leaf "%(path)s")
{
(Get-FileHash -Path "%(path)s" -Algorithm MD5).Hash.ToLower();
}
ElseIf (Test-Path -PathType Container "%(path)s")
{
Write-Host "3";
}
Else
{
Write-Host "1";
}
''' % dict(path=path)
return _encode_script(script)
def build_module_command(self, env_string, shebang, cmd, rm_tmp=None):
cmd_parts = shlex.split(cmd, posix=False)
if not cmd_parts[0].lower().endswith('.ps1'):
cmd_parts[0] = '%s.ps1' % cmd_parts[0]
script = _build_file_cmd(cmd_parts)
if rm_tmp:
rm_tmp = _escape(rm_tmp)
script = '%s; Remove-Item "%s" -Force -Recurse;' % (script, rm_tmp)
return _encode_script(script)
|
Python
| 0
|
@@ -3974,32 +3974,66 @@
, rm_tmp=None):%0A
+ cmd = cmd.encode('utf-8')%0A
cmd_part
|
6ff357974998810a7701c42471fc8806b0de66fc
|
fix tap() which need to delete old code
|
src/AppiumLibrary/keywords/_touch.py
|
src/AppiumLibrary/keywords/_touch.py
|
# -*- coding: utf-8 -*-
from appium.webdriver.common.touch_action import TouchAction
from AppiumLibrary.locators import ElementFinder
from .keywordgroup import KeywordGroup
class _TouchKeywords(KeywordGroup):
def __init__(self):
self._element_finder = ElementFinder()
# Public, element lookups
def zoom(self, locator, percent="200%", steps=1):
"""
Zooms in on an element a certain amount.
"""
driver = self._current_application()
element = self._element_find(locator, True, True)
driver.zoom(element=element, percent=percent, steps=steps)
def pinch(self, locator, percent="200%", steps=1):
"""
Pinch in on an element a certain amount.
"""
driver = self._current_application()
element = self._element_find(locator, True, True)
driver.pinch(element=element, percent=percent, steps=steps)
def swipe(self, start_x, start_y, offset_x, offset_y, duration=1000):
"""
Swipe from one point to another point, for an optional duration.
Args:
- start_x - x-coordinate at which to start
- start_y - y-coordinate at which to start
- offset_x - x-coordinate distance from start_x at which to stop
- offset_y - y-coordinate distance from start_y at which to stop
- duration - (optional) time to take the swipe, in ms.
Usage:
| Swipe | 500 | 100 | 100 | 0 | 1000 |
*!Important Note:* Android `Swipe` is not working properly, use ``offset_x`` and ``offset_y``
as if these are destination points.
"""
driver = self._current_application()
driver.swipe(start_x, start_y, offset_x, offset_y, duration)
def scroll(self, start_locator, end_locator):
"""
Scrolls from one element to another
Key attributes for arbitrary elements are `id` and `name`. See
`introduction` for details about locating elements.
"""
el1 = self._element_find(start_locator, True, True)
el2 = self._element_find(end_locator, True, True)
driver = self._current_application()
driver.scroll(el1, el2)
def scroll_down(self, locator):
"""Scrolls down to element"""
driver = self._current_application()
element = self._element_find(locator, True, True)
driver.execute_script("mobile: scroll", {"direction": 'down', 'element': element.id})
def scroll_up(self, locator):
"""Scrolls up to element"""
driver = self._current_application()
element = self._element_find(locator, True, True)
driver.execute_script("mobile: scroll", {"direction": 'up', 'element': element.id})
def long_press(self, locator):
""" Long press the element """
driver = self._current_application()
element = self._element_find(locator, True, True)
long_press = TouchAction(driver).long_press(element)
long_press.perform()
def tap(self, locator, x_offset=None, y_offset=None, count=1):
""" Tap element identified by ``locator``.
Args:
- ``x_offset`` - (optional) x coordinate to tap, relative to the top left corner of the element.
- ``y_offset`` - (optional) y coordinate. If y is used, x must also be set, and vice versa
- ``count`` - can be used for multiple times of tap on that element
"""
driver = self._current_application()
el = self._element_find(locator, True, True)
action = TouchAction(driver)
action.tap(el,x_offset,y_offset, count).perform()
driver = self._current_application()
el = self._element_find(locator, True, True)
action = TouchAction(driver)
action.tap(el).perform()
def click_a_point(self, x=0, y=0, duration=100):
""" Click on a point"""
self._info("Clicking on a point (%s,%s)." % (x,y))
driver = self._current_application()
action = TouchAction(driver)
try:
action.press(x=float(x), y=float(y)).wait(float(duration)).release().perform()
except:
assert False, "Can't click on a point at (%s,%s)" % (x,y)
def click_element_at_coordinates(self, coordinate_X, coordinate_Y):
""" click element at a certain coordinate """
self._info("Pressing at (%s, %s)." % (coordinate_X, coordinate_Y))
driver = self._current_application()
action = TouchAction(driver)
action.press(x=coordinate_X, y=coordinate_Y).release().perform()
|
Python
| 0.000005
|
@@ -3699,182 +3699,8 @@
%0D%0A%0D%0A
- driver = self._current_application()%0D%0A el = self._element_find(locator, True, True)%0D%0A action = TouchAction(driver)%0D%0A action.tap(el).perform()%0D%0A%0D%0A
|
e21056572381b92926b4fa8c7d347a0e129a6471
|
Fix OSError in functional test due to incomplete teardown condition
|
tests/functional_tests.py
|
tests/functional_tests.py
|
#!/usr/bin/env python
from __future__ import absolute_import, print_function
import os
import subprocess
from os.path import dirname, abspath
from time import time, sleep
from re import match
from unittest import TestCase, main, skip
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from statsdmetrics.client import Client
from statsdmetrics.client.tcp import TCPClient
TEST_DIR = dirname(abspath(__file__))
BASE_DIR = dirname(TEST_DIR)
CONF_FILE = os.path.join(TEST_DIR, 'fixtures', 'functional_test_config.ini')
APP_BIN = os.path.join(BASE_DIR, 'bin', 'navdoon_src')
class TestNavdoonStatsdServer(TestCase):
tcp_port = 8126
udp_port = 8125
flush_interval = 2
@classmethod
def create_server_program_args(cls):
return [
APP_BIN,
"--config", CONF_FILE,
'--flush-interval', str(cls.flush_interval),
'--collect-udp', '127.0.0.1:{}'.format(cls.udp_port)
]
def setUp(self):
self.app_args = self.create_server_program_args()
self.app_proc = subprocess.Popen(
self.app_args,
stderr=subprocess.PIPE, stdout=subprocess.PIPE,
universal_newlines=True,
)
try:
self._wait_until_server_starts_collecting()
except RuntimeError as error:
self.app_proc.kill()
raise error
def _wait_until_server_starts_collecting(self, timeout=10):
self._wait_until_log_matches('.*server.+collectors.+are\s+running', timeout, 0.05)
def _wait_until_server_shuts_down(self, timeout=10):
self._wait_until_log_matches('.*server shutdown successfully', timeout, 0.05)
def _wait_until_log_matches(self, pattern, timeout=10, sleep_time=None):
start_time = time()
while True:
if time() - start_time > timeout:
raise RuntimeError(
"waiting for pattern {} in server logs timedout".format(
pattern))
line = self.app_proc.stderr.readline().strip()
if match(pattern, line):
break
if sleep_time:
sleep(sleep_time)
def tearDown(self):
self.app_proc.kill()
@skip("test is incomplete")
def test_udp_collectors_flushing_stdout(self):
client = Client("localhost", self.__class__.udp_port)
for i in range(1, 5):
client.increment("event")
for i in range(1, 5):
client.timing("process", 10.1)
# wait for at least 1 flush
sleep(self.__class__.flush_interval)
self.app_proc.terminate()
self._wait_until_server_shuts_down()
flushed_metrics, logs = self.app_proc.communicate()
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -2258,32 +2258,110 @@
tearDown(self):%0A
+ self.app_proc.poll()%0A if self.app_proc.returncode is None:%0A
self.app
@@ -2388,17 +2388,17 @@
kip(
-%22
+'
test is
inco
@@ -2397,18 +2397,20 @@
is
-i
n
+ot
complete
%22)%0A
@@ -2405,17 +2405,17 @@
complete
-%22
+'
)%0A de
@@ -2882,16 +2882,166 @@
icate()%0A
+ flushed_metrics = %5Bl.strip() for l in flushed_metrics.split()%5D%0A self.assertGreater(len(flushed_metrics), 6, 'flushed enough metrics')%0A%0A
%0A%0Aif __n
|
c39be26dbcafe411741d7098974162280377b61c
|
Fix Python 2.x bytes/str bug
|
test_walls.py
|
test_walls.py
|
# -*- coding: utf-8 -*-
"""Test walls."""
from walls import Walls, load_config, stderr_and_exit, main
import py
import pytest
@pytest.fixture
def config(tmpdir):
f = tmpdir.join('config.ini')
f.write('''
[walls]
api_key = myapikey
api_secret = myapisecret
tags = sanfrancisco
image_dir = {0}
width = 1920
height = 1080
'''.format(tmpdir))
return str(f)
@pytest.fixture
def walls(config):
"""Create a Walls object with a default config."""
cfg = load_config(['walls', config])
return Walls(cfg)
class SystemExitContext(object):
"""Run pytest.raises, and check the error message."""
def __init__(self, msg, capsys):
self.raises = pytest.raises(SystemExit)
self.capsys = capsys
self.msg = msg
def __enter__(self):
return self.raises.__enter__()
def __exit__(self, *args):
assert self.capsys.readouterr()[1] == self.msg
return self.raises.__exit__(*args)
@pytest.fixture
def errmsg(capsys):
"""Make sure we exit with the given error message."""
def fixture(msg):
return SystemExitContext(msg, capsys)
return fixture
def test_stderr_and_exit(errmsg):
"""Make sure that stderr_and_exit (and therefore errmsg) works."""
with errmsg('Some error message'):
stderr_and_exit('Some error message')
def test_usage(errmsg):
"""Make sure we print out the usage if the arguments are invalid."""
with errmsg('Usage: walls [config_file]\n'):
load_config(['walls', 'config_file', 'blah'])
def test_default_config(config, monkeypatch):
"""Override expanduser to point to our temporary config file."""
def my_expanduser(path):
if path == '~/.wallsrc':
return config
return path
monkeypatch.setattr('os.path.expanduser', my_expanduser)
cfg = load_config(['walls'])
assert cfg.get('walls', 'api_key') == 'myapikey'
def test_supplied_config(config):
"""Test a config file passed as a command line argument."""
cfg = load_config(['walls', config])
assert cfg.get('walls', 'api_key') == 'myapikey'
def test_invalid_config(errmsg):
"""Make sure an error is raised if the config file can't be read."""
with errmsg("Couldn't load config fake.ini\n"):
load_config(['walls', 'fake.ini'])
def test_config_no_walls(tmpdir, errmsg):
"""Check for missing [walls] section."""
f = tmpdir.join('config.ini')
f.write('\n')
with errmsg('Config missing [walls] section.\n'):
load_config(['walls', str(f)])
def test_config_missing(tmpdir, errmsg):
"""Check behavior on missing config values."""
f = tmpdir.join('config.ini')
f.write('''
[walls]
api_secret = myapisecret
image_dir = {0}
width = 1920
height = 1080
'''.format(tmpdir))
with errmsg("Missing config keys: 'api_key', 'tags'\n"):
load_config(['walls', str(f)])
def test_config_types(tmpdir, errmsg):
"""Check behavior on missing config values."""
f = tmpdir.join('config.ini')
f.write('''
[walls]
api_key = myapikey
api_secret = myapisecret
tags = sanfrancisco
image_dir = {0}
width = abc
height = def
'''.format(tmpdir))
with errmsg("The following must be integers: 'width', 'height'\n"):
load_config(['walls', str(f)])
def test_config_dest(tmpdir, errmsg):
"""Nonexistent destination directory."""
cfg = '''
[walls]
api_key = myapikey
api_secret = myapisecret
tags = sanfrancisco
image_dir = {0}
width = 1920
height = 1080
'''
f = tmpdir.join('config1.ini')
f.write(cfg.format('/does/not/exist'))
with errmsg('The directory /does/not/exist does not exist.\n'):
load_config(['walls', str(f)])
f = tmpdir.join('config2.ini')
f.write(cfg.format(f))
with errmsg('The directory {0} does not exist.\n'.format(f)):
load_config(['walls', str(f)])
def test_smallest_url(walls):
data = {
'sizes': {'size': [
{
'width': '1280',
'height': '720',
'source': 'url1',
},
{
'width': '1920',
'height': '1080',
'source': 'url2',
},
{
'width': '2560',
'height': '1440',
'source': 'url3',
},
]},
}
walls.flickr.photos_getSizes = lambda **kw: data
assert walls.smallest_url('fake') == 'url2'
def test_first_photo_invalid(walls, errmsg):
data = None
walls.flickr.photos_getSizes = lambda **kw: data
walls.flickr.walk = lambda **kw: [{'id': '1'}]
for d in [[], {}, {'sizes': 1}, {'sizes': []}, {'sizes': {'size': 1}},
{'sizes': {'size': [1]}}, {'sizes': {'size': [{}]}}]:
data = d
with errmsg('Unexpected data from Flickr.\n'):
walls.first_photo()
def test_first_photo(walls):
def smallest_url(pid):
if pid == '2':
return '#{0}'.format(pid)
walls.smallest_url = smallest_url
walls.flickr.walk = lambda **kw: [{'id': '1'}, {'id': '2'}]
assert walls.first_photo() == '#2'
walls.flickr.walk = lambda **kw: []
assert walls.first_photo() is None
def test_run_invalid(walls, errmsg):
walls.first_photo = lambda **kw: None
with errmsg('No matching photos found.\n'):
walls.run()
def test_main(monkeypatch, config):
monkeypatch.setattr('sys.argv', ['walls', config])
monkeypatch.setattr('walls.Walls.run', lambda self: None)
main()
def test_download(monkeypatch, walls):
class FakeResponse(object):
def __init__(self, *a, **kw):
pass
def raise_for_status(self):
pass
def iter_content(self, *a, **kw):
for b in b'this is the data':
yield bytes([b])
monkeypatch.setattr('requests.get', FakeResponse)
walls.download('file.txt')
p = py.path.local(walls.config.get('walls', 'image_dir'), expanduser=True)
assert p.join('file.txt').read() == 'this is the data'
|
Python
| 0.000813
|
@@ -5735,14 +5735,13 @@
for
-b
+c
in
-b
'thi
@@ -5782,17 +5782,17 @@
eld
-bytes(%5Bb%5D
+c.encode(
)%0A%0A
|
aff805625f465421277447e5bd2a53a552dd175f
|
Fix assertion and error
|
exp/util/MCEvaluator.py
|
exp/util/MCEvaluator.py
|
import numpy
import numpy.testing as nptst
class MCEvaluator(object):
"""
A class to evaluate machine learning performance for the matrix completion
problem.
"""
def __init__(self):
pass
@staticmethod
def meanSqError(testX, predX):
"""
Find the mean squared error between two sparse matrices testX and predX.
Note that the matrices must have nonzero elements in the same places.
"""
nptst.assert_array_equal(testX.nonzero()[0], predX.nonzero()[0])
nptst.assert_array_equal(testX.nonzero()[1], predX.nonzero()[1])
diff = testX - predX
if diff.data.shape[0] != 0:
return numpy.mean(diff.data**2)
else:
return 0
@staticmethod
def rootMeanSqError(testX, predX):
"""
Find the root mean squared error between two sparse matrices testX and predX.
"""
return numpy.sqrt(MCEvaluator.meanSqError(testX, predX))
|
Python
| 0.000012
|
@@ -468,46 +468,81 @@
-%0A nptst.assert_array_equal(test
+#Note that some predictions might be zero %0A assert numpy.in1d(pred
X.no
@@ -553,20 +553,20 @@
o()%5B0%5D,
-pred
+test
X.nonzer
@@ -572,16 +572,23 @@
ro()%5B0%5D)
+.all()
%0A
@@ -592,37 +592,30 @@
-nptst.assert_array_equal(test
+assert numpy.in1d(pred
X.no
@@ -626,20 +626,20 @@
o()%5B1%5D,
-pred
+test
X.nonzer
@@ -645,16 +645,23 @@
ro()%5B1%5D)
+.all()
%0A
@@ -708,75 +708,25 @@
-%0A if diff.data.shape%5B0%5D != 0: %0A return
+error =
numpy.
-mean
+sum
(dif
@@ -739,29 +739,29 @@
**2)
- %0A else: %0A
+/testX.data.shape%5B0%5D%0A
@@ -775,10 +775,14 @@
urn
-0
+error%0A
%0A
|
b7256a0696331b5b0889708449ebb93ef90fab4a
|
add language and save function.
|
git-ignore.py
|
git-ignore.py
|
#! /usr/bin/env python2
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2014 Ciel <imwithye@gmail.com>
#
# Distributed under terms of the MIT license.
import sys
# print version
def version():
print "git ignore, version 0.1."
print
print "http://github.com/imwithye/git-ignore"
print "git ignore, copyright Ciel <imwithye@gmail.com>"
# print usage
def usage():
print "usage: git ignore <subcommand>"
print
print "Available subcommands are:"
print " language Add gitignore files. Try use 'git ignore language Python C'"
print " save Save current .gitignore file as a template"
print " usage Show this help message and exit"
print " version Show version and exit"
print
print "http://github.com/imwithye/git-ignore"
print "git ignore, copyright Ciel <imwithye@gmail.com>"
# subcommand router
def select( argv ):
if argv[1] == "language":
print "language"
elif argv[1] == "save":
print "save"
elif argv[1] == "help" or argv[1] == "usage":
usage()
exit()
elif argv[1] == "version":
version()
exit()
else:
print "unknown subcommand"
usage()
exit()
if __name__ == "__main__":
if len(sys.argv)==1:
sys.argv.append("usage")
select(sys.argv)
|
Python
| 0
|
@@ -170,183 +170,86 @@
ys%0A%0A
-# print version%0Adef version():%0A%09print %22git ignore, version 0.1.%22%0A%09print%0A%09print %22http://github.com/imwithye/git-ignore%22%0A%09print %22git ignore, copyright Ciel %3Cimwithye@gmail.com%3E%22
+def language(languages):%0A%09print languages%0A%0Adef save(filename):%0A%09print filename
%0A%0A#
@@ -725,16 +725,193 @@
.com%3E%22%0A%0A
+# print version%0Adef version():%0A%09print %22git ignore, version 0.1.%22%0A%09print%0A%09print %22http://github.com/imwithye/git-ignore%22%0A%09print %22git ignore, copyright Ciel %3Cimwithye@gmail.com%3E%22%0A%0A
# subcom
@@ -975,23 +975,16 @@
:%0A%09%09
-print %22
language
%22%0A%09e
@@ -979,17 +979,35 @@
language
-%22
+(argv%5B2:%5D)%0A%09%09exit()
%0A%09elif a
@@ -1030,20 +1030,31 @@
:%0A%09%09
-print %22save%22
+save(argv%5B2:%5D)%0A%09%09exit()
%0A%09el
|
9ac48a3eb3c39d0cad0f6a095a53abde6f88c388
|
Fix socket timeout parameter
|
experiments/counters.py
|
experiments/counters.py
|
from django.conf import settings
from django.utils.functional import cached_property
import redis
from redis.sentinel import Sentinel
from redis.exceptions import ConnectionError, ResponseError
COUNTER_CACHE_KEY = 'experiments:participants:%s'
COUNTER_FREQ_CACHE_KEY = 'experiments:freq:%s'
class Counters(object):
@cached_property
def _redis(self):
if getattr(settings, 'EXPERIMENTS_REDIS_SENTINELS', None):
sentinel = Sentinel(settings.EXPERIMENTS_REDIS_SENTINELS)
host, port = sentinel.discover_master(settings.EXPERIMENTS_REDIS_MASTER_NAME, socket_timeout=settings.EXPERIMENTS_REDIS_SENTINELS_TIMEOUT)
else:
host = getattr(settings, 'EXPERIMENTS_REDIS_HOST', 'localhost')
port = getattr(settings, 'EXPERIMENTS_REDIS_PORT', 6379)
password = getattr(settings, 'EXPERIMENTS_REDIS_PASSWORD', None)
db = getattr(settings, 'EXPERIMENTS_REDIS_DB', 0)
return redis.Redis(host=host, port=port, password=password, db=db)
def increment(self, key, participant_identifier, count=1):
if count == 0:
return
try:
cache_key = COUNTER_CACHE_KEY % key
freq_cache_key = COUNTER_FREQ_CACHE_KEY % key
new_value = self._redis.hincrby(cache_key, participant_identifier, count)
# Maintain histogram of per-user counts
if new_value > count:
self._redis.hincrby(freq_cache_key, new_value - count, -1)
self._redis.hincrby(freq_cache_key, new_value, 1)
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
pass
def clear(self, key, participant_identifier):
try:
# Remove the direct entry
cache_key = COUNTER_CACHE_KEY % key
pipe = self._redis.pipeline()
freq, _ = pipe.hget(cache_key, participant_identifier).hdel(cache_key, participant_identifier).execute()
# Remove from the histogram
freq_cache_key = COUNTER_FREQ_CACHE_KEY % key
self._redis.hincrby(freq_cache_key, freq, -1)
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
pass
def get(self, key):
try:
cache_key = COUNTER_CACHE_KEY % key
return self._redis.hlen(cache_key)
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
return 0
def get_frequency(self, key, participant_identifier):
try:
cache_key = COUNTER_CACHE_KEY % key
freq = self._redis.hget(cache_key, participant_identifier)
return int(freq) if freq else 0
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
return 0
def get_frequencies(self, key):
try:
freq_cache_key = COUNTER_FREQ_CACHE_KEY % key
# In some cases when there are concurrent updates going on, there can
# briefly be a negative result for some frequency count. We discard these
# as they shouldn't really affect the result, and they are about to become
# zero anyway.
return dict((int(k), int(v)) for (k, v) in self._redis.hgetall(freq_cache_key).items() if int(v) > 0)
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
return tuple()
def reset(self, key):
try:
cache_key = COUNTER_CACHE_KEY % key
self._redis.delete(cache_key)
freq_cache_key = COUNTER_FREQ_CACHE_KEY % key
self._redis.delete(freq_cache_key)
return True
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
return False
def reset_pattern(self, pattern_key):
#similar to above, but can pass pattern as arg instead
try:
cache_key = COUNTER_CACHE_KEY % pattern_key
for key in self._redis.keys(cache_key):
self._redis.delete(key)
freq_cache_key = COUNTER_FREQ_CACHE_KEY % pattern_key
for key in self._redis.keys(freq_cache_key):
self._redis.delete(key)
return True
except (ConnectionError, ResponseError):
# Handle Redis failures gracefully
return False
|
Python
| 0.000001
|
@@ -492,16 +492,77 @@
ENTINELS
+, socket_timeout=settings.EXPERIMENTS_REDIS_SENTINELS_TIMEOUT
)%0A
@@ -647,69 +647,8 @@
NAME
-, socket_timeout=settings.EXPERIMENTS_REDIS_SENTINELS_TIMEOUT
)%0A
|
b6ab29eed44fa0b63043d1481d835a1b25418a22
|
Remove unused code
|
tests/http/test_router.py
|
tests/http/test_router.py
|
from __future__ import unicode_literals
import os
import unittest
import mock
from mopidy import http
class TestRouter(http.Router):
name = 'test'
static_file_path = os.path.join(os.path.dirname(__file__), 'static')
class TestRouterMissingName(http.Router):
pass
class HttpRouterTest(unittest.TestCase):
def setUp(self):
self.config = {
'http': {
'hostname': '127.0.0.1',
'port': 6680,
'static_dir': None,
'zeroconf': '',
}
}
self.core = mock.Mock()
def test_keeps_reference_to_config_and_core(self):
router = TestRouter(self.config, self.core)
self.assertIs(router.config, self.config)
self.assertIs(router.core, self.core)
def test_undefined_name_raises_error(self):
with self.assertRaises(ValueError):
TestRouterMissingName(self.config, self.core)
def test_undefined_request_handlers_raises_error(self):
router = TestRouter(self.config, self.core)
with self.assertRaises(NotImplementedError):
router.get_request_handlers()
|
Python
| 0
|
@@ -38,18 +38,8 @@
ls%0A%0A
-import os%0A
impo
@@ -143,81 +143,8 @@
st'%0A
- static_file_path = os.path.join(os.path.dirname(__file__), 'static')%0A
%0A%0Acl
|
29b8b8b7415431547e85564aa7b426e244542b85
|
Insert comment about rigs for future reference
|
ext/killmails/poster.py
|
ext/killmails/poster.py
|
import typing
import tinydb
from discord.ext import commands
from utils.esicog import EsiCog
from utils.log import get_logger
ESI_SWAGGER_JSON = 'https://esi.evetech.net/latest/swagger.json'
def setup(bot: commands.Bot):
bot.add_cog(KillmailPoster(bot))
class KillmailPoster(EsiCog):
def __init__(self, bot: commands.Bot):
super(KillmailPoster, self).__init__(bot)
self.logger = get_logger(__name__, bot)
self.bot = bot
self.relevancy_table = self.bot.tdb.table("killmails.relevancies")
self.relevancy = tinydb.Query()
async def on_killmail(self, package: dict, **dummy_kwargs):
if not await self.is_relevant(package):
self.logger.debug("Ignoring irrelevant killmail")
return
self.logger.info("esi_app")
async def is_relevant(self, package: dict) -> bool:
victim = package["killmail"]["victim"]
if await self.is_corporation_relevant(victim["corporation_id"]):
return True
for attacker in package["killmail"]["attackers"]:
if "corporation_id" not in attacker:
continue # Some NPCs do not have a corporation.
if await self.is_corporation_relevant("corporation_id"):
return True
return False
async def is_corporation_relevant(self, corporation_id: int) -> bool:
if corporation_id in await self.get_relevant_corporations():
return True
if corporation_id in await self.get_relevant_alliances():
return True
return False
async def get_relevant_corporations(self) -> typing.List[int]:
corps = self.relevancy_table.search(
self.relevancy.type == "corporation")
return [entry["value"] for entry in corps]
async def get_relevant_alliances(self) -> typing.List[int]:
corp_ids = set()
alliances = self.relevancy_table.search(
self.relevancy.type == "alliance")
alliance_ids = [entry["value"] for entry in alliances]
for alliance_id in alliance_ids:
alliance_corp_ids = await self.get_alliance_corporations(
alliance_id)
corp_ids.update(alliance_corp_ids)
return list(corp_ids)
async def get_alliance_corporations(self,
alliance_id: int) -> typing.List[int]:
esi_app = await self.get_esi_app()
operation = esi_app.op["get_alliances_alliance_id_corporations"](
alliance_id=alliance_id)
esi_client = await self.get_esi_client()
# response = esi_client.request(operation)
response = await self.esi_request(self.bot.loop, esi_client, operation)
return response.data
|
Python
| 0
|
@@ -762,16 +762,66 @@
return%0A
+ # Rig group_ids are 781-786 inclusive%0A
|
094ff00f180926d44e2102f119beb33354fc7122
|
Remove unused import
|
tests/base.py
|
tests/base.py
|
import logging
import os
import shutil
from tvrenamr.config import Config
from tvrenamr.main import File, TvRenamr
from . import mock_requests # noqa
logging.disable(logging.CRITICAL)
class BaseTest(object):
def setup(self):
# absolute path to the file is pretty useful
self.path = os.path.abspath(os.path.dirname(__file__))
def join_path(path):
return os.path.join(self.path, path)
self.files = join_path('files')
self.subfolder = join_path('subfolder')
self.organised = join_path('organised')
self.renamed = join_path('renamed')
# if `file` isn't there, make it
if not os.path.exists(self.files):
os.mkdir(self.files)
if not os.path.exists(self.subfolder):
os.mkdir(self.subfolder)
for path in (self.files, self.subfolder):
self.build_files(path)
# instantiate tvr
self.config = Config()
self.config.config['defaults']['renamed'] = self.files
self.tv = TvRenamr(self.files, self.config, cache=False)
self._file = File('The Big Bang Theory', '3', ['01'], '.mp4')
self._file.episodes[0].title = 'The Electric Can Opener Fluctuation'
def teardown(self):
shutil.rmtree(self.files)
shutil.rmtree(self.subfolder)
def build_files(self, path):
# build the file list
with open(os.path.join(self.path, 'file_list'), 'r') as f:
for fn in f.readlines():
filepath = os.path.abspath(os.path.join(path, fn.strip()))
with open(filepath, 'w') as f:
f.write('')
|
Python
| 0.000001
|
@@ -114,45 +114,8 @@
mr%0A%0A
-from . import mock_requests # noqa%0A%0A
%0Alog
|
1638c588ee2fe3aa27e82691fbd600345dd659d2
|
Fix date widget error in Django 1.3
|
ain7/widgets.py
|
ain7/widgets.py
|
# -*- coding: utf-8 -*-
#
# widgets.py
#
# Copyright © 2007-2011 AIn7 Devel Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
#
#
# Widget from http://www.djangosnippets.org/snippets/391/
# widgets.py
#
# To use you have to put calendar/ (from http://www.dynarch.com/projects/calendar/)
# to your MEDIA folder and then include such links on your page:
# <!-- calendar -->
# <link rel="stylesheet" type="text/css" href="{{ MEDIA_URL }}calendar/calendar-win2k-cold-2.css" />
#<script type="text/javascript" src="{{ MEDIA_URL }}calendar/calendar.js"></script>
# <!-- this is translation file - choose your language here -->
#<script type="text/javascript" src="{{ MEDIA_URL }}calendar/lang/calendar-pl.js"></script>
#<script type="text/javascript" src="{{ MEDIA_URL }}calendar/calendar-setup.js"></script>
#<!-- /calendar -->
from datetime import datetime, date
from time import strptime
from django import forms
from django.conf import settings
# DATETIMEWIDGET
calbtn = u"""<img src="%simages/calbutton.gif" alt="calendar" id="%s_btn" style="cursor: pointer; border: 1px solid #8888aa;" title="Select date and time"
onmouseover="this.style.background='#444444';" onmouseout="this.style.background=''" />
<script type="text/javascript">
Calendar.setup({
inputField : "%s",
ifFormat : "%s",
button : "%s_btn",
singleClick : true,
showsTime : true
});
</script>"""
class DateTimeWidget(forms.widgets.TextInput):
dformat = '%d/%m/%Y %H:%M'
def render(self, name, value, attrs=None):
if value is None: value = ''
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
if value != '':
try:
final_attrs['value'] = \
value.strftime(self.dformat)
except:
final_attrs['value'] = \
value
if not final_attrs.has_key('id'):
final_attrs['id'] = u'%s_id' % (name)
id = final_attrs['id']
jsdformat = self.dformat #.replace('%', '%%')
cal = calbtn % (settings.MEDIA_URL, id, id, jsdformat, id)
a = u'<input%s />%s' % (forms.util.flatatt(final_attrs), cal)
return a
def value_from_datadict(self, data, files, name):
dtf = forms.fields.DEFAULT_DATETIME_INPUT_FORMATS
empty_values = forms.fields.EMPTY_VALUES
value = data.get(name, None)
# print "Value = %s " % value
if value in empty_values:
return None
if isinstance(value, datetime):
return value
if isinstance(value, date):
return datetime(value.year, value.month, value.day)
try:
return datetime(*strptime(value, self.dformat)[:6])
except ValueError:
pass
for format in dtf:
try:
return datetime(*strptime(value, format)[:6])
except ValueError:
continue
return None
|
Python
| 0.000064
|
@@ -1664,16 +1664,50 @@
ttings%0D%0A
+from django.utils import formats%0D%0A
%0D%0A%0D%0A# DA
@@ -3164,25 +3164,24 @@
form
-s.fields.DEFAULT_
+ats.get_format('
DATE
@@ -3198,16 +3198,18 @@
_FORMATS
+')
%0D%0A
|
0c0a1d0ec480c7df9dd8821d40af7791e46db453
|
Fix for wrong test: create_semester_accounts
|
tests/lib/test_finance.py
|
tests/lib/test_finance.py
|
# Copyright (c) 2013 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
from tests import OldPythonTestCase
__author__ = 'felix_kluge'
from pycroft.lib.finance import create_semester
from pycroft.lib.config import get,config
from pycroft.model.finance import FinanceAccount
from sqlalchemy.orm import backref
from pycroft.model import session
import time
from datetime import date
class Test_010_Semester(OldPythonTestCase):
def test_0010_create_semester_accounts(self):
"""
This test should verify that all semester-related finance-accounts have
been created.
"""
new_semester = create_semester("NewSemesterName", 2500, 1500, date(2013, 9, 1), date(2014, 2, 1))
config._configpath = "../tests/example/test_config.json"
for account in config["finance"]["semester_accounts"]:
for new_account in new_semester.accounts:
if(new_account.tag == account["tag"]):
new_account_equivalent = new_account
compare_account = FinanceAccount(type=account["type"],name=account["name"],semester=new_semester,tag=account["tag"])
self.assertEqual(new_account_equivalent.name, compare_account.name)
self.assertEqual(new_account_equivalent.type, compare_account.type)
|
Python
| 0.000161
|
@@ -313,16 +313,28 @@
semester
+, import_csv
%0Afrom py
@@ -361,16 +361,17 @@
ort get,
+
config%0Af
@@ -417,16 +417,39 @@
eAccount
+, Journal, JournalEntry
%0Afrom sq
@@ -547,16 +547,26 @@
ort date
+, datetime
%0A%0A%0Aclass
@@ -842,38 +842,155 @@
me%22,
- 2500, 1500, date(2013, 9, 1),
+%0A 2500, 1500,%0A date(2013, 9, 1),%0A
dat
@@ -1148,275 +1148,150 @@
-for new_account in new_semester.accounts:%0A if(new_account.tag == account%5B%22tag%22%5D):%0A new_account_equivalent = new_account%0A compare_account = FinanceAccount(type=account%5B%22type%22%5D,name=account%5B%22name%22%5D,semester=new_semester,tag=
+new_created_account = FinanceAccount.q.filter(%0A FinanceAccount.semester == new_semester,%0A FinanceAccount.tag ==
acco
@@ -1301,16 +1301,24 @@
%5B%22tag%22%5D)
+.first()
%0A
@@ -1347,53 +1347,45 @@
new_
-account_equivalent.name, compare_
+created_account.name,
account
-.
+%5B%22
name
+%22%5D
)%0A
@@ -1419,51 +1419,76 @@
new_
-account_equivalent.type, compare_
+created_account.type,
account
-.
+%5B%22
type
+%22%5D)%0A session.session.commit(
)%0A
|
38de328a3899b96542f702a51eb180efb47a5556
|
Fix town regroup bugs
|
geodjangofla/management/commands/fixtowns.py
|
geodjangofla/management/commands/fixtowns.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This code is free software; you can redistribute it and/or modify it
# under the terms of the BSD License (see the file COPYING included with
# the distribution).
import os
from optparse import make_option
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.gdal import DataSource
from django.core.management.base import BaseCommand, CommandError
from geodjangofla import settings
from geodjangofla import models
from geodjangofla.utils import dbf
class Command(BaseCommand):
help = 'Regroupe les arrondissements en une seule commune'
def handle(self, *args, **options):
for commune in models.Commune.objects.filter(
nom_comm__endswith='-ARRONDISSEMENT').all():
items = commune.nom_comm.split('--')
if len(items) < 3:
items = commune.nom_comm.split('-')
nb_ardt = items[-2]
if nb_ardt[0:2] != '1E':
commune.delete()
continue
commune.nom_comm = "-".join(items[0:-2])
if commune.nom_comm.endswith('-'):
commune.nom_comm = commune.nom_comm[:-1]
commune.save()
self.stdout.write('Regroup done\n')
|
Python
| 0.000003
|
@@ -647,16 +647,36 @@
tions):%0A
+ limits = %7B%7D%0A
@@ -954,16 +954,355 @@
ems%5B-2%5D%0A
+ nom_comm = %22-%22.join(items%5B0:-2%5D)%0A if nom_comm.endswith('-'):%0A nom_comm = nom_comm%5B:-1%5D%0A key = (nom_comm, commune.insee_com%5B0:2%5D)%0A if key not in limits:%0A limits%5Bkey%5D = commune.limite%0A else:%0A limits%5Bkey%5D = limits%5Bkey%5D.union(commune.limite)%0A
@@ -1419,37 +1419,24 @@
_comm =
-%22-%22.join(items%5B0:-2%5D)
+nom_comm
%0A
@@ -1444,19 +1444,16 @@
-if
commune.
nom_
@@ -1452,38 +1452,84 @@
une.
-nom_comm.endswith('-'):%0A
+save()%0A for nom_comm, dpt in limits:%0A print nom_comm, dpt%0A
@@ -1538,43 +1538,191 @@
+
com
-mune.nom_comm = commune.
+ = models.Commune.objects.get(nom_comm__startswith=nom_comm,%0A insee_com__startswith=dpt)%0A com.limite = limits%5B(
nom_comm
%5B:-1
@@ -1709,36 +1709,38 @@
limits%5B(nom_comm
-%5B:-1
+, dpt)
%5D%0A co
@@ -1732,36 +1732,32 @@
%0A com
-mune
.save()%0A
|
9a6ceeecd91b783b95783a07952e5f2f6328b1fd
|
Fix G1 bug
|
news.py
|
news.py
|
import feedparser
import nltk
import requests
from bs4 import BeautifulSoup as bs
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
import networkx as nx
import numpy as np
from nltk.corpus import stopwords
from nltk.stem.snowball import SnowballStemmer
from nltk import word_tokenize
from string import punctuation
from detection import detect_language
def rss_to_links(rssurl):
"""Input: url of the rss.
Output: list of links to news."""
feed = feedparser.parse(rssurl)
return [link['link'] for link in feed['entries']]
def scrape_page(url):
"""
Input: url
Output: title, text, top image and its caption.
"""
page = requests.get(url, timeout=2).text
tree = bs(page, 'html.parser')
# Remove script and style tags
for script in tree(['script','style']):
script.extract()
# Get title
title = tree.title.string
# Get top image if there is any
try:
content_images = tree.find_all('figure')
content_image = content_images[0]
top_image = content_image.find_all('img')
top_image = re.findall(r'src="\S+"',str(top_image[0]))[0][4:]
image_caption = content_image.find_all('figcaption')[0].get_text()
except:
top_image = None
image_caption = None
# Geta all p tags
ps = tree.find_all('p')
text = []
# Get parents of the p tags
parents = set([p.parent for p in ps])
# Iterate over the parents and try to get the p tags with depth of 1.
for parent in parents:
pss = [p for p in parent.find_all('p', recursive=False) if len(p.get_text().split()) > 10]
text.append(pss)
# Get only list with length greater than one.
useful = [txt for txt in text if len(txt) > 1]
news = []
for dummy in useful:
news = news + [use.get_text() for use in dummy]
# join into a single string
plain_text = " ".join(news)
return title, plain_text, top_image, image_caption
def textrank(document, language):
#print document
nltk.data.path.append('./nltk_data/')
sentence_tokenizer = nltk.data.load("tokenizers/punkt/" + language + ".pickle")
sentences = sentence_tokenizer.tokenize(document)
stopword = stopwords.words(language)
punct = list(punctuation)
non_words = stopword + punct
non_words = set(non_words)
# Stemmer
stemmer = SnowballStemmer(language)
filtered_sentences = []
if language != "turkish":
for sentence in sentences:
words = word_tokenize(sentence)
words = [stemmer.stem(word.lower()) for word in words if word not in non_words]
#words = [(word.lower()) for word in words if word not in non_words]
sentence = " ".join(words)
filtered_sentences.append(sentence)
else:
for sentence in sentences:
words = word_tokenize(sentence)
#words = [stemmer.stem(word.lower()) for word in words if word not in non_words]
words = [(word.lower()) for word in words if word not in non_words]
sentence = " ".join(words)
filtered_sentences.append(sentence)
normalized = TfidfVectorizer(ngram_range=(1,1)).fit_transform(filtered_sentences)
#rows, columns = normalized.shape
#svd = TruncatedSVD(n_components=100, n_iter=10, random_state=42)
#normalized = svd.fit_transform(normalized)
#print normalized.shape
#print type(normalized)
similarity_graph = normalized * normalized.T
nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)
scores = nx.pagerank(nx_graph)
return sorted(((scores[i], i, s) for i,s in enumerate(sentences)),
reverse=True)
def summarization(ranked):
cut = 3
size = len(ranked)
if size >= cut:
shrink = int(size ** 0.55) #int(0.40*size)#size/cut
most_relevant = ranked[:shrink]
else:
most_relevant = ranked
most_relevant.sort(key=lambda x:x[1])
sentences = []
for ranks in most_relevant:
sentences.append(ranks[2])
return " ".join(sentences)
def get_summary(url):
"""Input: url
Output: summary"""
title, document, _, _ = scrape_page(url)
language = detect_language(document)
ranked = textrank(document, language)
summary = summarization(ranked)
return title, summary.strip(), language
def wordrank(document, language):
#print document
nltk.data.path.append('./nltk_data/')
sentence_tokenizer = nltk.data.load("tokenizers/punkt/" + language + ".pickle")
sentences = sentence_tokenizer.tokenize(document)
stopword = stopwords.words(language)
punct = list(punctuation)
non_words = stopword + punct
non_words = set(non_words)
# Stemmer
#stemmer = RSLPStemmer()
filtered_sentences = []
for sentence in sentences:
words = word_tokenize(sentence)
#words = [stemmer.stem(word.lower()) for word in words if word not in non_words]
words = [(word.lower()) for word in words if word not in non_words]
sentence = " ".join(words)
filtered_sentences.append(sentence)
vectorizer = TfidfVectorizer(ngram_range=(1,1))
normalized = vectorizer.fit_transform(filtered_sentences)
similarity_graph = normalized.T * normalized
nx_graph = nx.from_scipy_sparse_matrix(similarity_graph)
scores = nx.pagerank(nx_graph)
wordrank = []
for word, feature in vectorizer.vocabulary_.iteritems():
wordrank.append((scores[feature], word))
wordrank.sort(key=lambda x: x[0], reverse=True)
return wordrank
def keyword_extraction(ranked):
keywords = [word[1] for word in ranked]
return keywords[:5]
def get_keywords(url):
"""Input: url
Output: summary"""
_, document, _, _ = scrape_page(url)
language = detect_language(document)
ranked = wordrank(document, language)
keywords = keyword_extraction(ranked)
return keywords
# TODO: deal with pages with small content like: http://gshow.globo.com/novelas/rock-story/Vem-por-ai/noticia/2017/01/banda-44-ganha-nova-integrante.html
# TODO: french codification
|
Python
| 0.000001
|
@@ -399,24 +399,54 @@
ect_language
+%0Afrom newspaper import Article
%0A%0Adef rss_to
@@ -1981,24 +1981,186 @@
join(news)%0A%0A
+ if not plain_text or not title:%0A g1 = Article(url=url)%0A g1.download()%0A g1.parse()%0A title = g1.title%0A plain_text = g1.text%0A%0A
return t
@@ -2171,24 +2171,26 @@
, plain_text
+ #
, top_image,
@@ -4388,38 +4388,32 @@
title, document
-, _, _
= scrape_page(u
@@ -5986,14 +5986,9 @@
ment
-, _, _
+
= s
|
76ebb0c53db5a466e7d8d0ae5a5039b854dededb
|
Add crunchbang.
|
dev/tabumemex.py
|
dev/tabumemex.py
|
# Data:
# DataStructure = {'size', 'pos'}
# MemoryBank = {'capacity', 'pos'}
# Conflict = {'cost', 'state', 'a', 'b'}
# X = [[]]
import random
import sys
import memproblem
from greedyMemex import greedymemex
from randommemex import randommemex
def initialmemex(p ,A ):
"""Decides on an initial solution for the tabu memex
Choose between GreedyMemex or Random Memex
Keyword arguments:
p -- memproblem object
a -- bool deciding randomMemex
Returns a solution with a cost.
"""
if A:
return randommemex(p)
else:
return greedymemex(p)
pass
def tabumemex(p, NtMax):
"""Solves the memory allocation problem using tabu search.
Keyword arguments:
p -- memproblem object
NtMax - number of iterations
Returns a solution with a cost.
"""
tabList = []
A = random.randint(0,1)
# f = current solution cost
f = ftab = initialmemex( p, A) # initial cost in allocation
#ftab = sys.float_info.max # f* proposed solution
NT = NtMax
fillTabuList(p.X, tabList)
Xtab = None # current iteration best solution
Xbest = None # best solution overall
iter = 0
while iter<NT and f>0:
i, h, j = explore_neighborhood_0(p, tabList)
f = p.calculate_cost()
if f < ftab:
ftab = f
Xbest =p.X
tabList.append(str(i)+str(j))
iter += 1
return f
def explore_neighborhood_0(p, tabList):
"""Explores the neighborhood of the current solution
Keyword arguments:
X -- current solution to expand
tabList -- current tabu list
Returns a solution with a cost.
"""
candidate = "" # candidate to add to the tabu list
h = None # Current membank for datastruct i
for i in range(len(p.X)):
for j in range(len(p.X[i])):
candidate = str(i)+str(j)
if not candidate in tabList:
if p.cap_used[i] + p.datastructs[i]['size'] <= p.membanks[j]['capacity']:
h = getCurrentMembank(p.X,i,j)
p.X[i][h] = False
p.X[i][j] = True
p.cap_used[j] += p.datastructs[i]['size']
p.cap_used[h] -= p.datastructs[i]['size']
return i, h, j
pass
pass
return i, -1, j
def getCurrentMembank(X, i, j):
for h in range(len(X[i])):
if X[i][h] == True:
return h
def fillTabuList(X, tabList):
"""Fills the tabu list with strings of the current allocations.
Keyword arguments:
X -- current solution
Updates the tabu list
"""
for i in range(len(X)):
for j in range(len(X[i])):
if X[i][j]==True:
tabList.append(str(i)+str(j))
pass
pass
if __name__ == '__main__':
for i in range(1,20):
problem = memproblem.read_problem(sys.argv[1])
cost = tabumemex(problem, 100)
print(cost)
|
Python
| 0.000002
|
@@ -1,8 +1,31 @@
+#!/usr/bin/env python2%0A
# Data:%0A
|
bd8de499bbc8ec4b987c241f6b0fbd12ebf16818
|
Adapt the import
|
app/tests/euid/test_kvm_player.py
|
app/tests/euid/test_kvm_player.py
|
import unittest
import os
import kvm_player
@unittest.skipIf(os.geteuid() != 0 or kvm_player.virt_install != 0,
"TestKVMDiscovery need privilege and virt-install")
class TestKernelVirtualMachinePlayer(kvm_player.KernelVirtualMachinePlayer):
@classmethod
def setUpClass(cls):
cls.check_requirements()
cls.set_rack0()
cls.set_api()
cls.set_bootcfg()
cls.set_dnsmasq()
cls.set_lldp()
cls.pause(cls.wait_setup_teardown)
def test(self):
marker = "%s" % TestKernelVirtualMachinePlayer.__name__.lower()
destroy, undefine = ["virsh", "destroy", "%s" % marker], ["virsh", "undefine", "%s" % marker]
try:
virt_install = [
"virt-install",
"--name",
"%s" % marker,
"--network=bridge:rack0,model=virtio",
"--memory=1024",
"--vcpus=1",
"--pxe",
"--disk",
"none",
"--os-type=linux",
"--os-variant=generic",
"--noautoconsole",
"--boot=network"
]
self.virsh(virt_install, assertion=True)
self.pause(5)
finally:
self.virsh(destroy), os.write(1, "\r")
self.virsh(undefine), os.write(1, "\r")
# This have to raise
# class TestKernelVirtualMachinePlayerRaise(kvm_player.KernelVirtualMachinePlayer):
# def test(self):
# pass
if __name__ == '__main__':
unittest.main()
|
Python
| 0.998929
|
@@ -19,17 +19,143 @@
port os%0A
-%0A
+import sys%0A%0Atry:%0A import kvm_player%0Aexcept ImportError:%0A sys.path.append(os.path.dirname(os.path.abspath(__file__)))%0A
import k
|
5f1a708b04e67aec5f943530a702a20fe6529a6c
|
Implement filtering by query arguments
|
s3pac/wsgi.py
|
s3pac/wsgi.py
|
import os, io, json
from datetime import datetime
from dateutil import parser as dateparser
from flask import Flask, Response, request, redirect, url_for, abort, send_file
from werkzeug import secure_filename
from s3pac.model import DateTimeProperty
from s3pac.package import Package, write_database_file, write_signature_file
from s3pac.database import PackageDatabase
# -----------------------------------------------------------------------------
_TO_JSON = {
DateTimeProperty: datetime.isoformat,
}
def _json_from_pkg(pkg):
return Package.store(_TO_JSON, pkg)
# -----------------------------------------------------------------------------
app = Flask(__name__, instance_path=os.getcwd(),
instance_relative_config=True)
app.config.from_pyfile("s3pac.conf.py")
pkgdb = PackageDatabase(
access_key_id = app.config.get('AWS_ACCESS_KEY_ID', None),
secret_access_key = app.config.get('AWS_SECRET_ACCESS_KEY', None),
region_name = app.config.get('AWS_REGION_NAME'),
sdb_domain_name = app.config.get('AWS_SDB_DOMAIN_NAME'),
s3_bucket_name = app.config.get('AWS_S3_BUCKET_NAME'),
s3_prefix = app.config.get('AWS_S3_PREFIX', "")
)
def _data_abspath(relpath):
root = app.config.get('DATA_ROOT', "data")
return os.path.join(os.getcwd(), root, relpath)
# -----------------------------------------------------------------------------
def _get_database_file(repo, sysarch):
# collect packages with given system architecture or 'any'
pkgs = []
for arch in (sysarch, 'any'):
pkgs.extend(pkgdb.find(repo=repo, arch=arch))
# write package database file
dbfilepath = _data_abspath("%s.db.tar.gz" % repo)
with open(dbfilepath, 'wb') as dbfile:
write_database_file(dbfile, pkgs)
# send the database file
return send_file(dbfilepath, mimetype='application/octet-stream')
def _get_package_file(repo, filename):
pkg = pkgdb.findone(repo=repo, filename=filename)
if not pkg:
abort(404)
return redirect(pkgdb.url(pkg))
def _get_package_signature_file(repo, sigfilename):
pkgfilename = sigfilename[:-4]
pkg = pkgdb.findone(repo=repo, filename=pkgfilename)
if not pkg or not pkg.pgpsig:
abort(404)
sigfile = io.BytesIO()
write_signature_file(sigfile, pkg)
sigfile.seek(0)
return send_file(sigfile,
attachment_filename=sigfilename,
as_attachment=True)
@app.route("/r/<repo>/<arch>/<filename>", methods=['GET'])
def get_file(repo, arch, filename):
"""Pacman repository interface."""
if filename.endswith(".pkg.tar.xz"):
return _get_package_file(repo, filename)
if filename.endswith(".pkg.tar.xz.sig"):
return _get_package_signature_file(repo, filename)
if filename.endswith(".db") or filename.endswith(".db.tar.gz"):
return _get_database_file(repo, arch)
abort(404)
@app.route("/p/<repo>/", methods=['GET'])
def get_package_list(repo):
"""Return all packages in a repository."""
filters = {}
filters['repo'] = repo
pkgs = pkgdb.find(**filters)
_json = list(map(_json_from_pkg, pkgs))
return json.dumps(_json)
@app.route("/p/<repo>/<arch>/", methods=['GET'])
def get_package_list_arch(repo, arch):
"""Return all packages in a repository with given architecture."""
filters = {}
filters['repo'] = repo
filters['arch'] = arch
pkgs = pkgdb.find(**filters)
_json = list(map(_json_from_pkg, pkgs))
return json.dumps(_json)
@app.route("/p/<repo>/<arch>/<name>", methods=['GET'])
def get_package(repo, arch, name):
"""Return a specific package."""
pkg = pkgdb.findone(repo=repo, arch=arch, name=name)
if not pkg:
abort(404)
return json.dumps(_json_from_pkg(pkg))
@app.route("/p/<repo>/<arch>/<name>", methods=['DELETE'])
def delete_package(repo, arch, name):
"""Delete a package from a repository."""
if not pkgdb.delete(repo=repo, arch=arch, name=name):
abort(404)
return ""
@app.route("/p/<repo>/", methods=['POST'])
def post_package_file(repo):
"""Upload and publish a package."""
pkgupload = request.files.get('package', None)
if not pkgupload or not pkgupload.filename.endswith(".pkg.tar.xz"):
abort(401)
sigupload = request.files.get('signature', None)
if sigupload and not sigupload.filename.endswith(".pkg.tar.xz.sig"):
abort(401)
def _save_upload(upload):
filepath = _data_abspath(secure_filename(upload.filename))
upload.save(filepath)
return filepath
pkgfilepath = _save_upload(pkgupload)
sigfilepath = _save_upload(sigupload) if sigupload else None
pkgfile = open(pkgfilepath, 'rb')
sigfile = open(sigfilepath, 'rb') if sigfilepath else None
pkg = pkgdb.publish(repo, pkgfile, sigfile)
pkgurl = url_for('get_package', repo=repo, arch=pkg.arch, name=pkg.name)
return redirect(pkgurl)
|
Python
| 0.000019
|
@@ -226,16 +226,30 @@
l import
+ LongProperty,
DateTim
@@ -526,71 +526,305 @@
%7D%0A%0A
-def _json_from_pkg(pkg):%0A return Package.store(_TO_JSON, pkg
+_FROM_QUERY = %7B%0A LongProperty: int,%0A DateTimeProperty: dateparser.parse,%0A %7D%0A%0Adef _json_from_pkg(pkg):%0A return Package.store(_TO_JSON, pkg)%0A%0Adef _filters_from_args(args):%0A filters = %7B key: args.getlist(key) for key in args.keys() %7D%0A return Package.convertdict(_FROM_QUERY, filters
)%0A%0A#
@@ -3278,34 +3278,64 @@
%22%0A filters =
-%7B%7D
+_filters_from_args(request.args)
%0A filters%5B're
@@ -3630,10 +3630,40 @@
s =
-%7B%7D
+_filters_from_args(request.args)
%0A
|
b09f37d603da598d553d6b18ff231b25cde19d34
|
Create perms_required decorator, add some permissions stuff
|
iatidataquality/usermanagement.py
|
iatidataquality/usermanagement.py
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from flask import Flask, render_template, flash, request, Markup, \
session, redirect, url_for, escape, Response, abort, send_file, current_app
from flask.ext.login import (LoginManager, current_user, login_required,
login_user, logout_user, UserMixin, AnonymousUser,
confirm_login, fresh_login_required)
from flask.ext.principal import Principal, Identity, AnonymousIdentity, \
identity_changed, identity_loaded, Permission, RoleNeed, \
UserNeed
from collections import namedtuple
from functools import partial, wraps
from iatidataquality import app
from iatidataquality import db
from iatidq import dqusers
users = [{
'username': 'mark',
'password': '1234',
'name': 'Mark',
'permissions': [{
'permission_name': 'admin',
'permission_method': 'full'
}]
},
{
'username': 'fred',
'password': '1234',
'name': 'Fred',
'permissions': [{
'permission_name': 'tests',
'permission_method': 'edit',
'permission_value': '1'
},{
'permission_name': 'tests',
'permission_method': 'edit',
'permission_value': '2'
},{
'permission_name': 'tests',
'permission_method': 'edit',
'permission_value': '3'
}]
}]
for user in users:
dqusers.addUser(user)
the_user = dqusers.user_by_username(user['username'])
for permission in user['permissions']:
permission["user_id"]=the_user.id
dqusers.addUserPermission(permission)
principals = Principal(app)
login_manager = LoginManager()
login_manager.setup_app(app)
login_manager.login_view = "login"
@login_manager.user_loader
def load_user(id):
return dqusers.user(id)
TestNeed = namedtuple('test', ['method', 'value'])
EditTestNeed = partial(TestNeed, 'edit')
admin_permission = Permission(RoleNeed('admin'))
def check_perms(name, method=None, value=None):
if admin_permission.can():
return True
if (name == 'tests'):
if ((method=='edit') or (method=='delete')):
return EditTestPermission(value).can()
return False
def perms_required(name, method=None, value=None):
def wrap(f):
def wrapped_f(*args, **kwargs):
if name == 'tests':
if not check_perms(name, method, kwargs['id']):
flash('You must log in to access that page.', 'error')
return redirect(url_for('home'))
return f(*args, **kwargs)
return wrapped_f
return wrap
class EditTestPermission(Permission):
def __init__(self, test_id):
need = EditTestNeed(unicode(test_id))
super(EditTestPermission, self).__init__(need)
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
# Set the identity user object
identity.user = current_user
permissions = dqusers.userPermissions(identity.id)
if hasattr(current_user, 'id'):
identity.provides.add(UserNeed(current_user.id))
def set_permissions(permission):
if (permission.permission_name=='tests' and permission.permission_method=='edit'):
identity.provides.add(EditTestNeed(unicode(permission.permission_value)))
if (permission.permission_name=='admin'):
identity.provides.add(RoleNeed(permission.permission_name))
for permission in permissions:
set_permissions(permission)
@app.route("/login/", methods=["GET", "POST"])
def login():
if request.method == "POST" and "username" in request.form:
user = dqusers.user_by_username(request.form["username"])
if (user and user.check_password(request.form["password"])):
remember = request.form.get("remember", "no") == "yes"
if login_user(user, remember=remember):
flash("Logged in!", "success")
identity_changed.send(current_app._get_current_object(),
identity=Identity(user.id))
return redirect(request.args.get("next") or url_for("home"))
else:
flash("Sorry, but you could not log in.", "error")
else:
flash(u"Invalid username or password.", "error")
return render_template("login.html")
@app.route('/logout/')
@login_required
def logout():
logout_user()
# Remove session keys set by Flask-Principal
for key in ('identity.name', 'identity.auth_type'):
session.pop(key, None)
# Tell Flask-Principal the user is anonymous
identity_changed.send(current_app._get_current_object(),
identity=AnonymousIdentity())
flash('Logged out', 'success')
return redirect(request.args.get('next') or '/')
|
Python
| 0
|
@@ -2190,16 +2190,90 @@
er(id)%0A%0A
+def role_permission(rolename):%0A return Permission(RoleNeed(rolename))%0A%0A
TestNeed
@@ -2356,23 +2356,32 @@
'edit')%0A
-admin_p
+%0Aclass EditTestP
ermissio
@@ -2381,19 +2381,17 @@
rmission
- =
+(
Permissi
@@ -2396,26 +2396,143 @@
sion
-(RoleNeed('admin')
+):%0A def __init__(self, test_id):%0A need = EditTestNeed(unicode(test_id))%0A super(EditTestPermission, self).__init__(need
)%0A%0Ad
@@ -2557,37 +2557,38 @@
e, method=None,
-value
+kwargs
=None):%0A if a
@@ -2587,16 +2587,37 @@
-if admin
+#check to see if %0A if role
_per
@@ -2623,16 +2623,25 @@
rmission
+('admin')
.can():%0A
@@ -2682,24 +2682,53 @@
= 'tests'):%0A
+ value = kwargs%5B'id'%5D%0A
if (
@@ -2856,24 +2856,29 @@
equired(name
+=None
, method=Non
@@ -2910,16 +2910,34 @@
rap(f):%0A
+ @wraps(f)%0A
@@ -2972,44 +2972,8 @@
s):%0A
- if name == 'tests':%0A
@@ -3023,21 +3023,11 @@
args
-%5B'id'%5D):%0A
+):%0A
@@ -3093,20 +3093,16 @@
error')%0A
-
@@ -3226,181 +3226,8 @@
ap%0A%0A
-class EditTestPermission(Permission):%0A def __init__(self, test_id):%0A need = EditTestNeed(unicode(test_id))%0A super(EditTestPermission, self).__init__(need)%0A%0A
@ide
|
211aff9fd57775a87409e1eab50f8803a9efe9f7
|
add imports for chart and effects
|
gimp_be/draw/__init__.py
|
gimp_be/draw/__init__.py
|
from gimp_be.draw.draw import *
from gimp_be.draw.tree import *
from gimp_be.draw.polygon import *
|
Python
| 0
|
@@ -84,16 +84,83 @@
olygon import *%0A
+from gimp_be.draw.effects import *%0Afrom gimp_be.draw.chart import *
|
d362d3770b999293db854a08e9627cfb96557544
|
disable pagination
|
api/__init__.py
|
api/__init__.py
|
import os, bcrypt
from eve import Eve
from flask.ext.bootstrap import Bootstrap
from eve_docs import eve_docs
from eve.auth import BasicAuth
class BCryptAuth(BasicAuth):
def check_auth(self, username, password, allowed_roles, resource, method):
accounts = app.data.driver.db['accounts']
account = accounts.find_one({'username': username})
return (
account and
bcrypt.hashpw(password, account['password']) == account['password']
)
accounts = {
'public_methods': [],
'public_item_methods': [],
'schema': {
'username': {
'type': 'string',
'minlength': 5,
'required': True,
'unique': True
},
'password': {
'type': 'string',
'required': True
}
}
}
gitcommits = {
'datasource': {
'default_sort': [('datetime',1)],
},
'schema': {
'project': {
'type': 'string',
'minlength': 3,
'maxlength': 50,
'required': True,
},
'message': {
'type': 'string',
'minlength': 5,
'required': True,
},
'datetime': {
'type': 'datetime',
'required': True,
},
'sha1': {
'type': 'string',
'required': True,
},
'deletions': {
'type': 'integer',
'required': True,
},
'lines': {
'type': 'integer',
'required': True,
},
'insertions': {
'type': 'integer',
'required': True,
},
'files': {
'type': 'integer',
'required': True,
},
}
}
settings = {
#'SERVER_NAME': '127.0.0.1:5000', # dev
'SERVER_NAME': 'api.the-huck.com', # prod
'MONGO_HOST': 'localhost',
'MONGO_PORT': '27017',
#'MONGO_USERNAME': 'user',
#'MONGO_PASSWORD': 'user',
'MONGO_DBNAME': 'apieve',
'RESOURCE_METHODS': ['GET', 'POST', 'DELETE'],
'ITEM_METHODS': ['GET', 'PATCH', 'PUT', 'DELETE'],
'PUBLIC_METHODS': ['GET'],
'PUBLIC_ITEM_METHODS': ['GET'],
'CACHE_CONTROL': 'max-age=0',
'CACHE_EXPIRES': 0,
'DOMAIN': {
'accounts': accounts,
'gitcommits': gitcommits
}
}
app = Eve(auth=BCryptAuth, settings=settings)
Bootstrap(app)
app.register_blueprint(eve_docs, url_prefix='/docs')
|
Python
| 0.000002
|
@@ -1932,16 +1932,39 @@
ES': 0,%0A
+ 'PAGINATION': False,%0A
'DOMAI
|
248ad807e98aa379f24cb41b6fcf0af753c4f169
|
Test _assert_eq_nan with scalars and 0-D arrays
|
tests/test__test_utils.py
|
tests/test__test_utils.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
import numpy as np
import dask.array as da
import dask_ndmeasure._test_utils
nan = np.nan
@pytest.mark.parametrize("match, a, b", [
[True] + 2 * [np.random.randint(10, size=(15, 16))],
[True] + 2 * [da.random.randint(10, size=(15, 16), chunks=(5, 5))],
[True, np.array([2, nan]), np.array([2, nan])],
[False, np.array([2, nan]), np.array([3, nan])],
[False, np.array([2, nan]), np.array([2, 3])],
[True, np.array([2, 3]), da.from_array(np.array([2, 3]), chunks=1)],
[True, np.array([nan]), da.from_array(np.array([nan]), chunks=1)],
[False, np.array([2]), da.from_array(np.array([nan]), chunks=1)],
[False, np.array([nan]), da.from_array(np.array([2]), chunks=1)],
[True, np.array([2, nan]), da.from_array(np.array([2, nan]), chunks=1)],
[False, np.array([2, nan]), da.from_array(np.array([3, nan]), chunks=1)],
[False, np.array([2, nan]), da.from_array(np.array([2, 3]), chunks=1)],
])
def test_assert_eq_nan(match, a, b):
if match:
dask_ndmeasure._test_utils._assert_eq_nan(a, b)
else:
with pytest.raises(AssertionError):
dask_ndmeasure._test_utils._assert_eq_nan(a, b)
|
Python
| 0
|
@@ -194,16 +194,220 @@
, b%22, %5B%0A
+ %5BTrue%5D + 2 * %5Bnp.array(2)%5B()%5D%5D,%0A %5BTrue%5D + 2 * %5Bnp.array(nan)%5B()%5D%5D,%0A %5BTrue%5D + 2 * %5Bnp.array(2)%5D,%0A %5BTrue%5D + 2 * %5Bnp.array(nan)%5D,%0A %5BTrue%5D + %5Bnp.array(1.0), da.ones(tuple(), chunks=tuple())%5D,%0A
%5BTru
|
6b481426ac7c33a755fa5de2f0dddc3f45210e60
|
use FixedEnergyRangeBuilder in test
|
tests/test_arsenic_fit.py
|
tests/test_arsenic_fit.py
|
"""
pytest-catchlog is required by this test
"""
from glob import glob
import logging
import os
import sys
import tempfile
from sklearn.linear_model import LinearRegression
from mrfitty.base import AdaptiveEnergyRangeBuilder, ReferenceSpectrum, Spectrum
from mrfitty.combination_fit import AllCombinationFitTask
logging.basicConfig(level=logging.DEBUG, filename='test_arsenic_fit.log')
log = logging.getLogger(name=__name__)
def test_arsenic_1(caplog, request):
"""
Test fits for known arsenic data and reference_spectra.
Expect to find PRM, data, and reference files in a directory called 'test_arsenic_fit'.
See also: http://stackoverflow.com/questions/29627341/pytest-where-to-store-expected-data.
:param request: pytest fixture with information about the path to this test file
:return:
"""
caplog.set_level(logging.INFO)
test_arsenic_fit_fp = request.module.__file__
log.info('test_arsenic_fit_fp: {}'.format(test_arsenic_fit_fp))
test_arsenic_fit_dir_path, _ = os.path.splitext(test_arsenic_fit_fp)
#reference_file_path_pattern = os.path.join(test_arsenic_fit_dir_path, 'reference', 'arsenate_*.e')
reference_file_path_pattern = os.path.join(test_arsenic_fit_dir_path, 'reference', '*.e')
data_file_path = os.path.join(test_arsenic_fit_dir_path, 'data', 'OTT3_55_spot0.e')
reference_spectrum_list = [
ReferenceSpectrum.read_file(file_path)
for file_path
in glob(reference_file_path_pattern)
]
log.info(reference_spectrum_list)
unknown_spectrum = Spectrum.read_file(data_file_path)
log.info(unknown_spectrum)
task = AllCombinationFitTask(
ls=LinearRegression,
energy_range_builder=AdaptiveEnergyRangeBuilder(),
reference_spectrum_list=reference_spectrum_list,
unknown_spectrum_list=[unknown_spectrum, ],
best_fits_plot_limit=1,
component_count_range=range(1, 3+1)
)
with tempfile.TemporaryDirectory() as plots_pdf_dp:
task.fit_all(plots_pdf_dp=plots_pdf_dp)
unknown_spectrum_fit = task.fit_table[unknown_spectrum]
assert unknown_spectrum_fit.best_fit.interpolant_incident_energy.shape == unknown_spectrum_fit.best_fit.fit_spectrum_b.shape
assert unknown_spectrum_fit.best_fit.interpolant_incident_energy.shape == unknown_spectrum_fit.best_fit.unknown_spectrum_b.shape
assert unknown_spectrum_fit.best_fit.interpolant_incident_energy.shape == unknown_spectrum_fit.best_fit.residuals.shape
assert 3 == len(unknown_spectrum_fit.best_fit.reference_spectra_seq)
def test_arsenic_2(caplog, request):
"""
Test fits for a single reference against all reference_spectra..
Expect to find PRM, data, and reference files in a directory called 'test_arsenic_fit'.
See also: http://stackoverflow.com/questions/29627341/pytest-where-to-store-expected-data.
This test is not reliable. Fix it.
:param request: pytest fixture with information about the path to this test file
:return:
"""
caplog.set_level(logging.INFO)
test_arsenic_fit_fp = request.module.__file__
log.info('test_arsenic_fit_fp: {}'.format(test_arsenic_fit_fp))
test_arsenic_fit_dir_path, _ = os.path.splitext(test_arsenic_fit_fp)
#reference_file_path_pattern = os.path.join(test_arsenic_fit_dir_path, 'reference', 'arsenate_*.e')
reference_file_path_pattern = os.path.join(test_arsenic_fit_dir_path, 'reference', '*.e')
#data_file_path = os.path.join(test_arsenic_fit_dir_path, 'reference', 'arsenate_aqueous_avg_als_cal.e')
reference_spectrum_list = [
ReferenceSpectrum.read_file(file_path)
for file_path
in glob(reference_file_path_pattern)
]
log.info(reference_spectrum_list)
unknown_spectrum = reference_spectrum_list[0]
log.info(unknown_spectrum)
task = AllCombinationFitTask(
ls=LinearRegression,
energy_range_builder=AdaptiveEnergyRangeBuilder(),
reference_spectrum_list=reference_spectrum_list,
unknown_spectrum_list=[unknown_spectrum],
best_fits_plot_limit=1,
component_count_range=range(1, 3+1)
)
with tempfile.TemporaryDirectory() as plots_pdf_dp:
task.fit_all(plots_pdf_dp=plots_pdf_dp)
unknown_spectrum_fit = task.fit_table[unknown_spectrum]
best_fit_ref_count = len(unknown_spectrum_fit.best_fit.reference_spectra_seq)
assert 2 <= best_fit_ref_count <= 3
|
Python
| 0
|
@@ -93,19 +93,8 @@
os%0A
-import sys%0A
impo
@@ -209,16 +209,41 @@
Builder,
+ FixedEnergyRangeBuilder,
Referen
@@ -3941,32 +3941,29 @@
nge_builder=
-Adaptive
+Fixed
EnergyRangeB
@@ -3961,32 +3961,73 @@
rgyRangeBuilder(
+energy_start=11834.5, energy_stop=12096.0
),%0A refer
|
8533400ecf69aac64c6210cb9fca1dfe90d0e6b7
|
work around hanging issue on Windows (#27)
|
tests/test_code_format.py
|
tests/test_code_format.py
|
import os
import sys
import subprocess
def test_flake8():
"""Test source code for pyFlakes and PEP8 conformance"""
this_dir = os.path.dirname(os.path.abspath(__file__))
source_dir = os.path.join(this_dir, '..', 'osrf_pycommon')
cmd = ['flake8', source_dir, '--count']
if sys.version_info < (3,4):
# Unless Python3, skip files with new syntax, like `yield from`
cmd.append('--exclude=*async_execute_process_asyncio/impl.py')
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
print(stdout)
assert p.returncode == 0, \
"Command '{0}' returned non-zero exit code '{1}'".format(' '.join(cmd), p.returncode)
|
Python
| 0
|
@@ -279,16 +279,113 @@
count'%5D%0A
+ # work around for https://gitlab.com/pycqa/flake8/issues/179%0A cmd.extend(%5B'--jobs', '1'%5D)%0A
if s
|
63483418d2169cde88649a29754846d30c6cb5c4
|
Improve fuel.downloaders.basic unit tests
|
tests/test_downloaders.py
|
tests/test_downloaders.py
|
import os
from fuel.downloaders.base import download, default_manager
iris_url = ('https://archive.ics.uci.edu/ml/machine-learning-databases/' +
'iris/iris.data')
iris_first_line = '5.1,3.5,1.4,0.2,Iris-setosa\n'
def test_download_no_path():
download(iris_url)
with open('iris.data') as f:
first_line = f.readline()
assert first_line == iris_first_line
os.remove('iris.data')
def test_download_path_is_dir():
os.mkdir('tmp')
download(iris_url, 'tmp')
with open('tmp/iris.data') as f:
first_line = f.readline()
assert first_line == iris_first_line
os.remove('tmp/iris.data')
os.rmdir('tmp')
def test_download_path_is_file():
download(iris_url, 'iris_tmp.data')
with open('iris_tmp.data') as f:
first_line = f.readline()
assert first_line == iris_first_line
os.remove('iris_tmp.data')
def test_default_manager_save():
class DummyArgs:
pass
args = DummyArgs()
args.directory = '.'
args.clear = False
default_manager([iris_url], ['iris.data'])(args)
with open('iris.data') as f:
first_line = f.readline()
assert first_line == iris_first_line
os.remove('iris.data')
def test_default_manager_clear():
open('tmp.data', 'a').close()
class DummyArgs:
pass
args = DummyArgs()
args.directory = '.'
args.clear = True
default_manager([None], ['tmp.data'])(args)
assert not os.path.isfile('tmp.data')
|
Python
| 0
|
@@ -1,12 +1,27 @@
+import hashlib%0A
import os%0A%0Af
@@ -194,52 +194,81 @@
ris_
-first_line = '5.1,3.5,1.4,0.2,Iris-setosa%5Cn'
+hash = %226f608b71a7317216319b4d27b4d9bc84e6abd734eda7872b71a458569e2656c0%22
%0A%0A%0Ad
@@ -335,32 +335,37 @@
open('iris.data'
+, 'r'
) as f:%0A
@@ -368,74 +368,64 @@
-first_line = f.readline()%0A assert first_line == iris_first_line
+assert hashlib.sha256(f.read()).hexdigest() == iris_hash
%0A
@@ -554,32 +554,37 @@
('tmp/iris.data'
+, 'r'
) as f:%0A
@@ -587,74 +587,64 @@
-first_line = f.readline()%0A assert first_line == iris_first_line
+assert hashlib.sha256(f.read()).hexdigest() == iris_hash
%0A
@@ -792,24 +792,29 @@
is_tmp.data'
+, 'r'
) as f:%0A
@@ -821,74 +821,64 @@
-first_line = f.readline()%0A assert first_line == iris_first_line
+assert hashlib.sha256(f.read()).hexdigest() == iris_hash
%0A
@@ -1123,16 +1123,21 @@
is.data'
+, 'r'
) as f:%0A
@@ -1148,74 +1148,64 @@
-first_line = f.readline()%0A assert first_line == iris_first_line
+assert hashlib.sha256(f.read()).hexdigest() == iris_hash
%0A
|
169a8612eb06410a5ae7e110227f7bea010d2ba9
|
Make stdout and stderr into strings.
|
tests/test_ghostscript.py
|
tests/test_ghostscript.py
|
import subprocess
import unittest
class GhostscriptTest(unittest.TestCase):
def test_installed(self):
process = subprocess.Popen(
['gs', '--version'],
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
stdout, stderr = process.communicate()
self.assertEqual(process.returncode, 0)
self.assertEqual(stderr, "")
self.assertRegexpMatches(stdout, r'9\.\d\d')
|
Python
| 0.000493
|
@@ -398,22 +398,27 @@
rtEqual(
+str(
stderr
+)
, %22%22)%0A
@@ -448,22 +448,27 @@
Matches(
+str(
stdout
+)
, r'9%5C.%5C
|
62f5019c7603212e138a8f14c4968673aedbf8b3
|
Move prometheus metrics to /management/metrics
|
alerta/app/management/views.py
|
alerta/app/management/views.py
|
import time
import datetime
import logging
from flask import request, Response, url_for, jsonify, render_template
from flask.ext.cors import cross_origin
from alerta.app import app, db
from alerta.app.auth import auth_required
from alerta.app.switch import Switch, SwitchState
from alerta.app.metrics import Gauge, Counter, Timer
from alerta import build
from alerta.version import __version__
LOG = logging.getLogger(__name__)
switches = [
Switch('auto-refresh-allow', 'Allow consoles to auto-refresh alerts', SwitchState.ON),
# Switch('console-api-allow', 'Allow consoles to use the alert API', SwitchState.ON), # TODO(nsatterl)
# Switch('sender-api-allow', 'Allow alerts to be submitted via the API', SwitchState.ON), # TODO(nsatterl)
]
total_alert_gauge = Gauge('alerts', 'total', 'Total alerts', 'Total number of alerts in the database')
started = time.time() * 1000
@app.route('/management', methods=['OPTIONS', 'GET'])
@cross_origin()
def management():
endpoints = [
url_for('manifest'),
url_for('properties'),
url_for('switchboard'),
url_for('health_check'),
url_for('status')
]
return render_template('management/index.html', endpoints=endpoints)
@app.route('/management/manifest', methods=['OPTIONS', 'GET'])
@cross_origin()
@auth_required
def manifest():
manifest = {
"label": "Alerta",
"release": __version__,
"build": build.BUILD_NUMBER,
"date": build.BUILD_DATE,
"revision": build.BUILD_VCS_NUMBER,
"description": "The Guardian's Alerta monitoring system",
"built-by": build.BUILT_BY,
"built-on": build.HOSTNAME,
}
return jsonify(alerta=manifest)
@app.route('/management/properties', methods=['OPTIONS', 'GET'])
@cross_origin()
@auth_required
def properties():
properties = ''
for k, v in app.__dict__.items():
properties += '%s: %s\n' % (k, v)
for k, v in app.config.items():
properties += '%s: %s\n' % (k, v)
return Response(properties, content_type='text/plain')
@app.route('/management/switchboard', methods=['OPTIONS', 'GET', 'POST'])
@cross_origin()
@auth_required
def switchboard():
if request.method == 'POST':
for switch in Switch.get_all():
try:
value = request.form[switch.name]
switch.set_state(value)
LOG.warning('Switch %s set to %s', switch.name, value)
except KeyError:
pass
return render_template('management/switchboard.html', switches=switches)
else:
switch = request.args.get('switch', None)
if switch:
return render_template('management/switchboard.html',
switches=[Switch.get(switch)])
else:
return render_template('management/switchboard.html', switches=switches)
@app.route('/management/healthcheck', methods=['OPTIONS', 'GET'])
@cross_origin()
def health_check():
try:
heartbeats = db.get_heartbeats()
for heartbeat in heartbeats:
delta = datetime.datetime.utcnow() - heartbeat.receive_time
threshold = float(heartbeat.timeout) * 4
if delta.seconds > threshold:
return 'HEARTBEAT_STALE: %s' % heartbeat.origin , 503
except Exception as e:
return 'HEALTH_CHECK_FAILED: %s' % e, 503
return 'OK'
@app.route('/management/status', methods=['OPTIONS', 'GET'])
@cross_origin()
@auth_required
def status():
total_alert_gauge.set(db.get_count())
metrics = Gauge.get_gauges(format='json')
metrics.extend(Counter.get_counters(format='json'))
metrics.extend(Timer.get_timers(format='json'))
auto_refresh_allow = {
"group": "switch",
"name": "auto_refresh_allow",
"type": "text",
"title": "Alert console auto-refresh",
"description": "Allows auto-refresh of alert consoles to be turned off remotely",
"value": "ON" if Switch.get('auto-refresh-allow').is_on() else "OFF",
}
metrics.append(auto_refresh_allow)
now = int(time.time() * 1000)
return jsonify(application="alerta", version=__version__, time=now, uptime=int(now - started), metrics=metrics)
@app.route('/metrics', methods=['OPTIONS', 'GET'])
@cross_origin()
def prometheus_metrics():
total_alert_gauge.set(db.get_count())
output = Gauge.get_gauges(format='prometheus')
output += Counter.get_counters(format='prometheus')
output += Timer.get_timers(format='prometheus')
return Response(output, content_type='text/plain; version=0.0.4; charset=utf-8')
|
Python
| 0.000001
|
@@ -888,16 +888,17 @@
* 1000%0A%0A
+%0A
@app.rou
@@ -1142,24 +1142,63 @@
or('status')
+,%0A url_for('prometheus_metrics')
%0A %5D%0A r
@@ -4303,16 +4303,27 @@
route('/
+management/
metrics'
@@ -4364,24 +4364,118 @@
ss_origin()%0A
+# @auth_required # FIXME - prometheus only supports Authorization header with %22Bearer%22 token%0A
def promethe
|
ba37080645153d66a8ae1c8df10312806999f8ec
|
Add use of fmisid to tests.
|
tests/test_observation.py
|
tests/test_observation.py
|
import unittest
from datetime import datetime
from dateutil.tz import tzutc
from fmi import FMI
class TestObservations(unittest.TestCase):
def test_lappeenranta(self):
now = datetime.now(tz=tzutc())
f = FMI(place='Lappeenranta')
for point in f.observations():
assert point.time < now
assert isinstance(point.temperature, float)
|
Python
| 0
|
@@ -376,8 +376,153 @@
float)%0A
+%0A for point in f.observations(fmisid=101237):%0A assert point.time %3C now%0A assert isinstance(point.temperature, float)%0A
|
94f6fcc00ed8f6b1f920bfc8242b9efd34f42fa9
|
test get_regional_indicator_emoji on april fools
|
tests/test_reactor_bot.py
|
tests/test_reactor_bot.py
|
#!/usr/bin/env python3
import reactor_bot
import datetime
from freezegun import freeze_time
class TestReactorBot:
def test_extract_emoji(self):
lines_and_emojis = {
' M)-ystery meat': 'M',
'🐕 dog sandwiches': '🐕',
'3 blind mice': '3',
'🇺🇸 flags': '🇺🇸',
'<:python3:232720527448342530> python3!': '<:python3:232720527448342530>',
}
for input, output in lines_and_emojis.items():
assert reactor_bot.extract_emoji(input) == output
def test_emojify(self):
# custom emoji extraction is the only feature unique to emojify()
# so we'll test the other functionality in other tests
assert reactor_bot.emojify('<:python3:232720527448342530>') == ':python3:232720527448342530'
assert reactor_bot.emojify('asdfghjkl;') == 'asdfghjkl;'
def test_get_regional_indicator_emoji(self):
io_map = {
'A': '🇦',
'B': '🇧',
'C': '🇨',
'D': '🇩',
'E': '🇪',
'F': '🇫',
'G': '🇬',
'H': '🇭',
'I': '🇮',
'J': '🇯',
'K': '🇰',
'L': '🇱',
'M': '🇲',
'N': '🇳',
'O': '🇴',
'P': '🇵',
'Q': '🇶',
'R': '🇷',
'S': '🇸',
'T': '🇹',
'U': '🇺',
'V': '🇻',
'W': '🇼',
'X': '🇽',
'Y': '🇾',
'Z': '🇿'
}
for input, output in io_map.items():
assert reactor_bot.get_regional_indicator_emoji(input) == output
def test_get_digit_emoji(self):
io_map = {
'0': '0⃣',
'1': '1⃣',
'2': '2⃣',
'3': '3⃣',
'4': '4⃣',
'5': '5⃣',
'6': '6⃣',
'7': '7⃣',
'8': '8⃣',
'9': '9⃣',
}
for input, output in io_map.items():
assert reactor_bot.get_digit_emoji(input) == output
def test_april_fools(self):
with freeze_time("2017-10-31"):
assert not reactor_bot.april_fools()
with freeze_time("2018-04-01"):
assert reactor_bot.april_fools()
|
Python
| 0.000002
|
@@ -1162,24 +1162,179 @@
': '%F0%9F%87%BF'%0A%09%09%7D%0A%0A
+%09%09# one of these tests will fail on april fools%0A%09%09# (hint: it's %22B%22)%0A%09%09# unless we force the date to not be april fools%0A%09%09with freeze_time(%222018-01-01%22):%0A%09
%09%09for input,
@@ -1352,32 +1352,33 @@
io_map.items():%0A
+%09
%09%09%09assert reacto
@@ -1422,32 +1422,129 @@
put) == output%0A%0A
+%09%09with freeze_time(%222018-04-01%22):%0A%09%09%09assert reactor_bot.get_regional_indicator_emoji('B') == '%F0%9F%85%B1'%0A
%0A%09def test_get_d
|
9369f72c4fe9a544e24f10a1db976589dc013424
|
Add dependency on apache module
|
plinth/modules/sso/__init__.py
|
plinth/modules/sso/__init__.py
|
#
# This file is part of Plinth.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Plinth module to configure Single Sign On services.
"""
from plinth import actions
from django.utils.translation import ugettext_lazy as _
version = 1
is_essential = True
depends = ['security']
name = _('Single Sign On')
managed_packages = ['libapache2-mod-auth-pubtkt', 'openssl', 'python3-openssl']
def setup(helper, old_version=None):
"""Install the required packages"""
helper.install(managed_packages)
actions.superuser_run('auth-pubtkt', ['enable-mod'])
actions.superuser_run('auth-pubtkt', ['create-key-pair'])
|
Python
| 0
|
@@ -885,16 +885,26 @@
ecurity'
+, 'apache'
%5D%0A%0Aname
|
3de616fa0bfc8e075e4c7aa4a5e8e9108e168f7c
|
fix bugs
|
plstackapi/planetstack/urls.py
|
plstackapi/planetstack/urls.py
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
from plstackapi.planetstack.views.roles import RoleListCreate, RoleRetrieveUpdateDestroy
from plstackapi.planetstack.views.roles import SiteListCreate, SiteRetrieveUpdateDestroy
from plstackapi.planetstack.views.api_root import api_root
from plstackapi.planetstack.models import Site
from rest_framework import generics
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'planetstack.views.home', name='home'),
# url(r'^planetstack/', include('planetstack.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^plstackapi/$', api_root),
url(r'^plstackapi/roles/$', RoleListCreate.as_view(), name='role-list'),
url(r'^plstackapi/roles/(?P<pk>[a-zA-Z0-9]+)/$', RoleRetrieveUpdateDestroy.as_view(), name='role-detail'),
url(r'^plstackapi/sites/$', SiteListCreate.as_view(), name='site-list'),
url(r'^plstackapi/sites/(?P<pk>[0-9]+)/$', SiteRetrieveUpdateDestroy.as_view(), name='site-detail'),
#url(r'^plstackapi/slices/$', views.SliceList.as_view(), name='slice-list'),
#url(r'^plstackapi/slices/(?P<pk>[0-9]+)/$', views.SliceDetail.as_view(), name='slice-detail'),
#url(r'^plstackapi/slivers/$', views.SliverList.as_view()),
#url(r'^plstackapi/slivers/(?P<pk>[0-9]+)/$', views.SliverDetail.as_view()),
#url(r'^plstackapi/nodes/$', views.NodeList.as_view(), name='node-list'),
#url(r'^plstackapi/nodes/(?P<pk>[0-9]+)/$', views.NodeDetail.as_view(), name='node-detail'),
#url(r'^plstackapi/deploymentnetworks/$', views.DeploymentNetworkList.as_view(), name='deploymentnetwork-list'),
#url(r'^plstackapi/deploymentnetworks/(?P<pk>[0-9]+)/$', views.DeploymentNetworkDetail.as_view(), name='deploymentnetwork-detail'),
#url(r'^plstackapi/sitedeploymentnetworks/$', views.SiteDeploymentNetworkList.as_view(), name='sitedeploymentnetwork-list'),
#url(r'^plstackapi/sitedeploymentnetworks/(?P<pk>[0-9]+)/$', views.SiteDeploymentNetworkDetail.as_view(), name='sitedeploymentnetwork-detail'),
#Adding in rest_framework urls
url(r'^plstackapi/', include('rest_framework.urls', namespace='rest_framework')),
)
|
Python
| 0.000001
|
@@ -246,35 +246,35 @@
anetstack.views.
-rol
+sit
es import SiteLi
|
99cbe9fc333525608842be098c87f842ac9e8906
|
Remove extra import
|
api/database.py
|
api/database.py
|
from pymongo import MongoClient
from gridfs import GridFS
from bson.objectid import ObjectId
from datetime import datetime
from os import environ
from const import RequestType
from flask import current_app
class MongoConnection(object):
def __init__(self, db=None):
if db:
self.db = db
else:
self.db = self.GetDb()
#instance of Mongo DB Connection config connection and db values
def GetDb(self):
client = MongoClient('data', 27017)
return client.aaf_db
#get the collection for the request type
def GetCollection(self, request_type):
if request_type == RequestType.ASSISTANCE:
return self.db.assistance_requests
elif request_type == RequestType.DONATION:
return self.db.donation_requests
else:
raise Exception('Invalid Request Collection')
def GetGridFS(self):
return GridFS(self.db, collection='request_documents')
class MongoInterface(object):
def _getObjectId(self, obj):
return str(obj)
def findDocuments(self, collection, query, sort=None):
return_value = { }
results = [ ]
#simple pagination. Can be costly with later pages in larger result sets
search_results = collection.find(query)
return search_results
def getDocument(self, collection, id):
doc = collection.find_one({'_id':ObjectId(id)})
if doc:
doc['_id'] = self._getObjectId(doc['_id'])
return doc
else:
return None
def insertDocument(self, collection, data):
result = collection.insert_one(data).inserted_id
return self._getObjectId(result)
def updateDocument(self, collection, data, id, **kwargs):
update_data = {'$set' : data}
if 'push_data' in kwargs:
update_data['$push'] = kwargs['push_data']
if 'pull_data' in kwargs:
update_data['$pull'] = kwargs['pull_data']
doc = collection.update({'_id':ObjectId(id)}, update_data)
return self._getObjectId(doc)
def getFile(self, collection, id):
file = collection.get(ObjectId(id))
return file.read()
def insertFile(self, collection, data):
file = collection.put(data.encode("UTF-8"))
return self._getObjectId(file)
"""
if __name__ == '__main__':
conn = MongoConnection()
interface = MongoInterface()
collection = conn.GetCollection(RequestType.ASSISTANCE)
print(interface.findDocuments(collection, {"user_id" : "10705332"}))
test_data = {"user_id" : "10705332", "user_name" : "Trevor Robinson", "value_1" : "test value", "value_2" : "val 2"}
id = interface.insertDocument(collection, test_data)
print(id)
doc = interface.getDocument(collection, id)
print(doc)
del doc['_id']
del doc["value_2"]
doc["value_1"] = "test value update"
doc["value_3"] = "new value"
print(doc)
result = interface.updateDocument(collection, doc, id)
print(result)
last_val = interface.getDocument(collection, id)
print(last_val)
input_file = open('./test.txt', 'rb')
fs = conn.GetGridFS()
file_id = interface.insertFile(fs, input_file)
print(file_id)
input_file.close()
output_file = open('./test_out.txt', 'wb')
output_file.write(interface.getFile(fs, file_id))
output_file.close()
"""
|
Python
| 0.000001
|
@@ -173,39 +173,8 @@
ype%0A
-from flask import current_app%0A%0A
%0Acla
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.