commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
f12dbc789d32d981795db3cbfb51cce9aece9efc
|
change focal length (add wood height)
|
fluxghost/utils/laser_pattern.py
|
fluxghost/utils/laser_pattern.py
|
# !/usr/bin/env python3
from math import pi, sin, cos
# import cv2
laser_on = False
def to_image(buffer_data, img_width, img_height):
int_data = list(buffer_data)
print(int_data[:10])
assert len(int_data) == img_width * img_height, "data length != width * height, %d != %d * %d" % (len(int_data), img_width, img_height)
image = [int_data[i * img_width: (i + 1) * img_width] for i in range(img_height)]
return image
def rxy(x, y):
return
def moveTo(x, y, offsetX, offsetY, rotation, ratio):
x -= offsetX
y -= offsetY
x2 = x * cos(rotation) + y * sin(rotation)
y2 = x * -sin(rotation) + y * cos(rotation)
return ["G1 F600 X" + str((x2) / ratio) + " Y" + str((y2) / ratio)]
def drawTo(x, y, offsetX, offsetY, rotation, ratio, slow=False):
x -= offsetX
y -= offsetY
x2 = x * cos(rotation) + y * sin(rotation)
y2 = x * -sin(rotation) + y * cos(rotation)
if slow:
return ["G1 F50 X" + str((x2) / ratio) + " Y" + str((y2) / ratio) + ";Draw to"]
else:
return ["G1 F200 X" + str((x2) / ratio) + " Y" + str((y2) / ratio) + ";Draw to"]
def turnOn():
global laser_on
laser_on = True
return ["G4 P1", "@X9L0"]
def turnOff():
global laser_on
laser_on = False
return ["G4 P1", "@X9L255"]
def turnHalf():
global laser_on
laser_on = False
return ["@X9L220"]
def laser_pattern(buffer_data, img_width, img_height, ratio):
gcode = []
# print(buffer_data)
gcode.append("@X5H2000")
gcode.append("M666 X-1.95 Y-0.4 Z-2.1 R97.4 H241.2")
gcode.append(";Flux image laser")
gcode.append(";Image size:%d * %d" % (img_width, img_height))
gcode.append("G28")
gcode.append(";G29")
gcode.append("G1 F3000 Z11")
pix = to_image(buffer_data, img_width, img_height)
# pix = cv2.imread('S.png')
# pix = cv2.cvtColor(pix, cv2.COLOR_BGR2GRAY)
# print pix.shape
# input()
offsetX = img_width / 2.
offsetY = img_height / 2.
rotation = pi / 4.
# ratio = 6.
# pixel_size = 100 / ratio
last_i = 0
gcode += turnOff()
# gcode += ["M104 S200"]
gcode += turnOff()
gcode += turnHalf()
#Align process
for k in range(3):
gcode += moveTo(0, 0, offsetX, offsetY, rotation, ratio)
gcode += ["G4 P300"]
gcode += moveTo(0, img_height, offsetX, offsetY, rotation, ratio)
gcode += ["G4 P300"]
gcode += moveTo(img_width, img_height, offsetX, offsetY, rotation, ratio)
gcode += ["G4 P300"]
gcode += moveTo(img_width, 0, offsetX, offsetY, rotation, ratio)
gcode += ["G4 P300"]
gcode += moveTo(0, 0, offsetX, offsetY, rotation, ratio)
gcode += ["G4 P300"]
#column iteration
for h in range(0, img_height):
#row iteration
itera = range(0, img_width)
final_x = img_width
if h % 2 == 1:
final_x = 0
itera = reversed(range(0, img_width))
for w in itera:
if pix[h][w] < 50:
if not laser_on:
last_i = w
gcode += moveTo(w, h, offsetX, offsetY, rotation, ratio)
gcode += turnOn()
else:
if laser_on:
if abs(w - last_i) < 2: # Single dot
pass
gcode += ["G4 P100"]
elif final_x > 0:
gcode += drawTo(w - 1 / 2, h, offsetX, offsetY, rotation, ratio, abs(w - last_i) < 10)
else:
gcode += drawTo(w + 1 / 2, h, offsetX, offsetY, rotation, ratio, abs(w - last_i) < 10)
gcode += turnOff()
if laser_on:
gcode += drawTo(final_x, h, offsetX, offsetY, rotation, ratio)
gcode += turnOff()
# gcode += ["M104 S0"]
gcode += ["G28"]
store = False
if store:
with open('./S.gcode', 'w') as f:
print("\n".join(gcode) + "\n", file=f)
# print >> f, "\n".join(gcode) + "\n"
else:
pass
return "\n".join(gcode) + "\n"
# laser_pattern('', 200, 352, 4)
|
Python
| 0
|
@@ -665,32 +665,36 @@
F600 X%22 + str((
+0 -
x2) / ratio) + %22
@@ -777,16 +777,16 @@
o, s
-low=Fals
+peed=Non
e):%0A
@@ -926,11 +926,12 @@
if s
-low
+peed
:%0A
@@ -949,18 +949,34 @@
n %5B%22G1 F
-50
+%22 + str(speed) + %22
X%22 + st
@@ -970,32 +970,36 @@
) + %22 X%22 + str((
+0 -
x2) / ratio) + %22
@@ -1081,16 +1081,20 @@
+ str((
+0 -
x2) / ra
@@ -1742,16 +1742,37 @@
%22;G29%22)%0A
+ focal_l = 11 + 3%0A
gcod
@@ -1791,18 +1791,36 @@
F3000 Z
-11
+%22 + str(focal_l) + %22
%22)%0A%0A
|
f37846159a379922954b76736719c9683fed7541
|
add HTTPError __str__
|
framework/exceptions/__init__.py
|
framework/exceptions/__init__.py
|
# -*- coding: utf-8 -*-
'''Custom exceptions for the framework.'''
import copy
import httplib as http
from flask import request
class FrameworkError(Exception):
"""Base class from which framework-related errors inherit."""
pass
class HTTPError(FrameworkError):
error_msgs = {
http.BAD_REQUEST: {
'message_short': 'Bad request',
'message_long': ('If this should not have occurred and the issue persists, '
'please report it to <a href="mailto:support@osf.io">support@osf.io</a>.'),
},
http.UNAUTHORIZED: {
'message_short': 'Unauthorized',
'message_long': 'You must <a href="/login/">log in</a> to access this resource.',
},
http.FORBIDDEN: {
'message_short': 'Forbidden',
'message_long': ('You do not have permission to perform this action. '
'If this should not have occurred and the issue persists, '
'please report it to <a href="mailto:support@osf.io">support@osf.io</a>.'),
},
http.NOT_FOUND: {
'message_short': 'Page not found',
'message_long': ('The requested resource could not be found. If this '
'should not have occurred and the issue persists, please report it '
'to <a href="mailto:support@osf.io">support@osf.io</a>.'),
},
http.GONE: {
'message_short': 'Resource deleted',
'message_long': ('The requested resource has been deleted. If this should '
'not have occurred and the issue persists, please report it to '
'<a href="mailto:support@osf.io">support@osf.io</a>.'),
},
http.SERVICE_UNAVAILABLE: {
'message_short': 'Service is currently unavailable',
'message_long': ('The requested service is unavailable. If this should '
'not have occurred and the issue persists, please report it to '
'<a href="mailto:support@osf.io">support@osf.io</a>.'),
},
}
def __init__(self, code, message=None, redirect_url=None, data=None):
super(HTTPError, self).__init__(message)
self.code = code
self.redirect_url = redirect_url
self.data = data or {}
try:
self.referrer = request.referrer
except RuntimeError:
self.referrer = None
def __repr__(self):
class_name = self.__class__.__name__
return '{ClassName}(code={code}, data={data})'.format(
ClassName=class_name,
code=self.code,
data=self.to_data(),
)
def to_data(self):
data = copy.deepcopy(self.data)
if self.code in self.error_msgs:
data = {
'message_short': self.error_msgs[self.code]['message_short'],
'message_long': self.error_msgs[self.code]['message_long']
}
else:
data['message_short'] = 'Unable to resolve'
data['message_long'] = ('OSF was unable to resolve your request. If this '
'issue persists, please report it to '
'<a href="mailto:support@osf.io">support@osf.io</a>.')
data.update(self.data)
data['code'] = self.code
data['referrer'] = self.referrer
return data
class PermissionsError(FrameworkError):
"""Raised if an action cannot be performed due to insufficient permissions
"""
pass
|
Python
| 0.000124
|
@@ -2641,16 +2641,66 @@
)%0A%0A
+ def __str__(self):%0A return repr(self)%0A%0A
def
|
c9d2c71970779afba7d277bcc5b89799b4a425a5
|
fix pygments bug that appeared in python3 migration
|
frontend/src/cloogle_pygments.py
|
frontend/src/cloogle_pygments.py
|
import urllib
import sys
import codecs
import pygments
import pygments.lexers
import pygments.formatters
from pygments.token import Literal, Name, Operator
_escape_html_table = {
ord('&'): u'&',
ord('<'): u'<',
ord('>'): u'>',
ord('"'): u'"',
ord("'"): u''',
}
CLEAN_SYNTAX_TOKENS = ['=', '=:', ':==', '|', '->', '(', ')', ':', '::', '::!',
'..', '_', '\\', '.', '#', '#!', '!', '\\\\', '<-', '<-:', '<-|', '&',
'|*|', '|*->*|', '|*->*->*|']
# This is blatantly stolen from the HTML formatter
# Original code has a BSD licence
class CloogleHtmlFormatter(pygments.formatters.HtmlFormatter):
def _format_lines(self, tokensource):
"""
Just format the tokens, without any wrapping tags.
Yield individual lines.
"""
nocls = self.noclasses
lsep = self.lineseparator
# for <span style=""> lookup only
getcls = self.ttype2class.get
c2s = self.class2style
escape_table = _escape_html_table
tagsfile = self.tagsfile
lspan = ''
line = []
for ttype, value in tokensource:
if nocls:
cclass = getcls(ttype)
while cclass is None:
ttype = ttype.parent
cclass = getcls(ttype)
cspan = cclass and '<span style="%s">' % c2s[cclass][0] or ''
else:
cls = self._get_css_classes(ttype)
cspan = cls and '<span class="%s">' % cls or ''
safe_value = value.translate(escape_table)
if ttype in [Name.Class, Name, Operator, Literal] \
and value not in CLEAN_SYNTAX_TOKENS:
value = u'<a href="/#%s">%s</a>' % (
urllib.parse.quote(value), safe_value)
else:
value = safe_value
parts = value.split('\n')
if tagsfile and ttype in Name:
filename, linenumber = self._lookup_ctag(value)
if linenumber:
base, filename = os.path.split(filename)
if base:
base += '/'
filename, extension = os.path.splitext(filename)
url = self.tagurlformat % {'path': base, 'fname': filename,
'fext': extension}
parts[0] = "<a href=\"%s#%s-%d\">%s" % \
(url, self.lineanchors, linenumber, parts[0])
parts[-1] = parts[-1] + "</a>"
# for all but the last line
for part in parts[:-1]:
if line:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, part,
(cspan and '</span>'), lsep))
else: # both are the same
line.extend((part, (lspan and '</span>'), lsep))
yield 1, ''.join(line)
line = []
elif part:
yield 1, ''.join(
(cspan, part, (cspan and '</span></a>'), lsep))
else:
yield 1, lsep
# for the last line
if line and parts[-1]:
if lspan != cspan:
line.extend(((lspan and '</span>'), cspan, parts[-1]))
lspan = cspan
else:
line.append(parts[-1])
elif parts[-1]:
line = [cspan, parts[-1]]
lspan = cspan
# else we neither have to open a new span nor set lspan
if line:
line.extend(((lspan and '</span>'), lsep))
yield 1, ''.join(line)
try:
with open(sys.argv[1], 'rb') as f:
inp = u''
for l in f:
inp += l.decode('latin1')
outp = pygments.highlight(
inp,
pygments.lexers.get_lexer_by_name('clean'),
CloogleHtmlFormatter(
full=False,
linenos=True,
linespans='line',
encoding='latin1',
hl_lines=[] if len(sys.argv) == 1 else [int(a) for a in sys.argv[2:]],
))
print(codecs.decode(outp, 'utf-8', 'ignore'))
except:
print('<p>Could not highlight file.</p>')
|
Python
| 0
|
@@ -6,16 +6,22 @@
t urllib
+.parse
%0Aimport
@@ -3797,17 +3797,14 @@
)%0A%0A%0A
+#
try:%0A
-
with
@@ -3834,28 +3834,24 @@
) as f:%0A
-
-
inp = u''%0A
@@ -3848,28 +3848,24 @@
p = u''%0A
-
-
for l in f:%0A
@@ -3872,20 +3872,16 @@
-
-
inp += l
@@ -3898,20 +3898,16 @@
atin1')%0A
-
outp = p
@@ -3933,21 +3933,13 @@
-
-
inp,%0A
-
@@ -3986,20 +3986,16 @@
'),%0A
-
-
CloogleH
@@ -4016,20 +4016,16 @@
-
full=Fal
@@ -4032,28 +4032,24 @@
se,%0A
-
-
linenos=True
@@ -4050,20 +4050,16 @@
s=True,%0A
-
@@ -4084,20 +4084,16 @@
-
-
encoding
@@ -4103,20 +4103,16 @@
atin1',%0A
-
@@ -4194,19 +4194,11 @@
-
-
))%0A
-
prin
@@ -4243,16 +4243,17 @@
'))%0A
+#
except:%0A
@@ -4248,16 +4248,17 @@
except:%0A
+#
prin
|
a049ec4ebfa59e4f0ffc98f113f6e9f3cf7a336d
|
Fix checkpoint fullstack
|
karbor/tests/fullstack/test_checkpoints.py
|
karbor/tests/fullstack/test_checkpoints.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from karbor.common import constants
from karbor.tests.fullstack import karbor_base
from karbor.tests.fullstack import karbor_objects as objects
class CheckpointsTest(karbor_base.KarborBaseTest):
"""Test Checkpoints operation """
def setUp(self):
super(CheckpointsTest, self).setUp()
self.provider_id = self.provider_id_noop
def test_checkpoint_create(self):
self.skipTest('Requires cinder protection plugin adjustment')
volume = self.store(objects.Volume())
volume.create(1)
plan = self.store(objects.Plan())
plan.create(self.provider_id_os, [volume, ])
backups = self.cinder_client.backups.list()
before_num = len(backups)
checkpoint = self.store(objects.Checkpoint())
checkpoint.create(self.provider_id, plan.id)
backups = self.cinder_client.backups.list()
after_num = len(backups)
self.assertEqual(1, after_num - before_num)
def test_checkpoint_delete(self):
volume = self.store(objects.Volume())
volume.create(1)
plan = self.store(objects.Plan())
plan.create(self.provider_id, [volume, ])
checkpoints = self.karbor_client.checkpoints.list(self.provider_id)
before_num = len(checkpoints)
checkpoint = objects.Checkpoint()
checkpoint.create(self.provider_id, plan.id)
# sanity
checkpoint_item = self.karbor_client.checkpoints.get(self.provider_id,
checkpoint.id)
self.assertEqual(constants.CHECKPOINT_STATUS_AVAILABLE,
checkpoint_item.status)
checkpoint.close()
checkpoints = self.karbor_client.checkpoints.list(self.provider_id)
after_num = len(checkpoints)
self.assertEqual(before_num, after_num)
def test_checkpoint_list(self):
volume = self.store(objects.Volume())
volume.create(1)
plan = self.store(objects.Plan())
plan.create(self.provider_id, [volume, ])
checkpoints = self.karbor_client.checkpoints.list(self.provider_id)
before_num = len(checkpoints)
checkpoint = self.store(objects.Checkpoint())
checkpoint.create(self.provider_id, plan.id)
checkpoints = self.karbor_client.checkpoints.list(self.provider_id)
after_num = len(checkpoints)
self.assertEqual(1, after_num - before_num)
def test_checkpoint_get(self):
volume = self.store(objects.Volume())
volume.create(1)
plan = self.store(objects.Plan())
plan.create(self.provider_id, [volume, ])
checkpoint = self.store(objects.Checkpoint())
checkpoint.create(self.provider_id, plan.id)
# sanity
checkpoint_item = self.karbor_client.checkpoints.get(self.provider_id,
checkpoint.id)
self.assertEqual(constants.CHECKPOINT_STATUS_AVAILABLE,
checkpoint_item.status)
self.assertEqual(checkpoint.id, checkpoint_item.id)
def test_checkpoint_for_server_attached_volume(self):
"""Test checkpoint for server which has attached some volumes"""
volume = self.store(objects.Volume())
volume.create(1)
server = self.store(objects.Server())
server.create()
server.attach_volume(volume.id)
plan0 = self.store(objects.Plan())
plan0.create(self.provider_id, [server, ])
checkpoints = self.karbor_client.checkpoints.list(self.provider_id)
before_checkpoints_num = len(checkpoints)
backups = self.cinder_client.backups.list()
before_backups_num = len(backups)
checkpoint = self.store(objects.Checkpoint())
checkpoint.create(self.provider_id, plan0.id, timeout=900)
checkpoints = self.karbor_client.checkpoints.list(self.provider_id)
after_checkpoints_num = len(checkpoints)
self.assertEqual(1, after_checkpoints_num - before_checkpoints_num)
backups = self.cinder_client.backups.list()
after_backups_num = len(backups)
self.assertEqual(1, after_backups_num - before_backups_num)
plan1 = self.store(objects.Plan())
plan1.create(self.provider_id, [server, volume])
checkpoints = self.karbor_client.checkpoints.list(self.provider_id)
before_checkpoints_num = len(checkpoints)
backups = self.cinder_client.backups.list()
before_backups_num = len(backups)
checkpoint = self.store(objects.Checkpoint())
checkpoint.create(self.provider_id, plan1.id, timeout=720)
checkpoints = self.karbor_client.checkpoints.list(self.provider_id)
after_checkpoints_num = len(checkpoints)
self.assertEqual(1, after_checkpoints_num - before_checkpoints_num)
backups = self.cinder_client.backups.list()
after_backups_num = len(backups)
self.assertEqual(1, after_backups_num - before_backups_num)
server.detach_volume(volume.id)
|
Python
| 0.000143
|
@@ -3755,24 +3755,94 @@
volumes%22%22%22%0A
+ self.skipTest('Requires cinder protection plugin adjustment')%0A
volu
|
4eb90c33557fbc8ecd0ed0a88976ffe5c91607e3
|
get event date from segment U 'data_ocorrencia' position: 138-145
|
l10n_br_cnab_import/file_cnab240_parser.py
|
l10n_br_cnab_import/file_cnab240_parser.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Luis Felipe Mileo - mileo at kmee.com.br
# Fernando Marcato Rodrigues
# Copyright 2015 KMEE - www.kmee.com.br
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import tempfile
import datetime
from decimal import Decimal
from openerp.tools.translate import _
from openerp.exceptions import Warning as UserError
try:
from cnab240.tipos import Arquivo
import codecs
except:
raise Exception(_('Please install python lib cnab240'))
class Cnab240Parser(object):
"""Class for defining parser for OFX file format."""
@classmethod
def parser_for(cls, parser_name):
"""Used by the new_bank_statement_parser class factory. Return true if
the providen name is 'ofx_so'.
"""
return parser_name == 'cnab240_so'
@staticmethod
def determine_bank(nome_impt):
if nome_impt == 'bradesco_pag_for':
from cnab240.bancos import bradescoPagFor
return bradescoPagFor
elif nome_impt == 'bradesco_cobranca_240':
from cnab240.bancos import bradesco
return bradesco
elif nome_impt == 'itau_cobranca_240':
from cnab240.bancos import itau
return itau
elif nome_impt == 'sicoob_240':
from cnab240.bancos import sicoob
return sicoob
else:
raise UserError(_('Modo de importação não encontrado.'))
def parse(self, data, banco_impt):
"""Launch the parsing itself."""
cnab240_file = tempfile.NamedTemporaryFile()
cnab240_file.seek(0)
cnab240_file.write(data)
cnab240_file.flush()
ret_file = codecs.open(cnab240_file.name, encoding='ascii')
# Nome_modo_impt é o nome da pasta do json. Código do banco é inválido
# nessa situação
arquivo = Arquivo((self.determine_bank(banco_impt)), arquivo=ret_file)
cnab240_file.close()
transacoes = []
total_amt = Decimal(0.00)
for lote in arquivo.lotes:
for evento in lote.eventos:
if evento.servico_segmento == 'T':
transacoes.append({
'name': evento.sacado_nome,
'date': datetime.datetime.strptime(
str(evento.vencimento_titulo), '%d%m%Y'),
'amount': evento.valor_titulo,
'ref': evento.numero_documento,
'label': evento.sacado_inscricao_numero, # cnpj
'transaction_id': evento.numero_documento,
# nosso numero, Alfanumérico
'unique_import_id': str(arquivo.header.arquivo_sequencia) + '-' + str(evento.numero_documento),
'servico_codigo_movimento': evento.servico_codigo_movimento,
'errors': evento.motivo_ocorrencia # 214-221
})
else:
# set amount from segment U, it has with juros
# Formula:
# amount = base_value + interest - (discount + rebate)
base_value = transacoes[-1]['amount']
interest = evento.titulo_acrescimos
discount = evento.titulo_desconto
rebate = evento.titulo_abatimento
if evento.servico_segmento == 'U':
transacoes[-1]['amount'] = base_value + interest - (discount + rebate)
total_amt += evento.titulo_liquido
vals_bank_statement = {
'name': '%s - %s' % (arquivo.header.nome_do_banco,
arquivo.header.arquivo_data_de_geracao),
'date': datetime.datetime.strptime(
str(arquivo.header.arquivo_data_de_geracao), '%d%m%Y'),
'balance_start': 0.00,
'balance_end_real': total_amt,
'currency_code': u'BRL', # Código da moeda
'account_number': arquivo.header.cedente_conta,
'transactions': transacoes
}
return [vals_bank_statement]
def get_st_line_vals(self, line, *args, **kwargs):
"""This method must return a dict of vals that can be passed to create
method of statement line in order to record it. It is the
responsibility of every parser to give this dict of vals, so each one
can implement his own way of recording the lines.
:param: line: a dict of vals that represent a line of
result_row_list
:return: dict of values to give to the create method of statement
line
"""
return {
'name': line.get('name', ''),
'date': line.get('date', datetime.datetime.now().date()),
'amount': line.get('amount', 0.0),
'ref': line.get('ref', '/'),
'label': line.get('label', ''),
'transaction_id': line.get('transaction_id', '/'),
'commission_amount': line.get('commission_amount', 0.0),
'servico_codigo_movimento': line.get('servico_codigo_movimento', 0)
}
|
Python
| 0.999995
|
@@ -3793,16 +3793,36 @@
amount
+and data_ocorrencia
from seg
@@ -4317,32 +4317,242 @@
count + rebate)%0A
+ # replace vencimento with data_ocorrencia%0A transacoes%5B-1%5D%5B'date'%5D = datetime.datetime.strptime(%0A str(evento.data_ocorrencia), '%25d%25m%25Y')%0A
|
4812f64b651ab64881510d38d4e35ce4ce22b04f
|
Fix method has same name as property
|
examples/speech_recognition/data/asr_dataset.py
|
examples/speech_recognition/data/asr_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import numpy as np
from fairseq.data import FairseqDataset
from . import data_utils
from .collaters import Seq2SeqCollater
class AsrDataset(FairseqDataset):
"""
A dataset representing speech and corresponding transcription.
Args:
aud_paths: (List[str]): A list of str with paths to audio files.
aud_durations_ms (List[int]): A list of int containing the durations of
audio files.
tgt (List[torch.LongTensor]): A list of LongTensors containing the indices
of target transcriptions.
tgt_dict (~fairseq.data.Dictionary): target vocabulary.
ids (List[str]): A list of utterance IDs.
speakers (List[str]): A list of speakers corresponding to utterances.
num_mel_bins (int): Number of triangular mel-frequency bins (default: 80)
frame_length (float): Frame length in milliseconds (default: 25.0)
frame_shift (float): Frame shift in milliseconds (default: 10.0)
"""
def __init__(
self, aud_paths, aud_durations_ms, tgt,
tgt_dict, ids, speakers,
num_mel_bins=80, frame_length=25.0, frame_shift=10.0
):
assert frame_length > 0
assert frame_shift > 0
assert all(x > frame_length for x in aud_durations_ms)
self.frame_sizes = [
int(1 + (d - frame_length) / frame_shift)
for d in aud_durations_ms
]
assert len(aud_paths) > 0
assert len(aud_paths) == len(aud_durations_ms)
assert len(aud_paths) == len(tgt)
assert len(aud_paths) == len(ids)
assert len(aud_paths) == len(speakers)
self.aud_paths = aud_paths
self.tgt_dict = tgt_dict
self.tgt = tgt
self.ids = ids
self.speakers = speakers
self.num_mel_bins = num_mel_bins
self.frame_length = frame_length
self.frame_shift = frame_shift
def __getitem__(self, index):
import torchaudio
import torchaudio.compliance.kaldi as kaldi
tgt_item = self.tgt[index] if self.tgt is not None else None
path = self.aud_paths[index]
if not os.path.exists(path):
raise FileNotFoundError("Audio file not found: {}".format(path))
sound, sample_rate = torchaudio.load_wav(path)
output = kaldi.fbank(
sound,
num_mel_bins=self.num_mel_bins,
frame_length=self.frame_length,
frame_shift=self.frame_shift
)
output_cmvn = data_utils.apply_mv_norm(output)
self.collater = Seq2SeqCollater(
0, 1, pad_index=self.tgt_dict.pad(),
eos_index=self.tgt_dict.eos(), move_eos_to_beginning=True
)
return {"id": index, "data": [output_cmvn.detach(), tgt_item]}
def __len__(self):
return len(self.aud_paths)
def collater(self, samples):
"""Merge a list of samples to form a mini-batch.
Args:
samples (List[int]): sample indices to collate
Returns:
dict: a mini-batch suitable for forwarding with a Model
"""
return self.collater.collate(samples)
def num_tokens(self, index):
return self.frame_sizes[index]
def size(self, index):
"""Return an example's size as a float or tuple. This value is used when
filtering a dataset with ``--max-positions``."""
return (
self.frame_sizes[index],
len(self.tgt[index]) if self.tgt is not None else 0,
)
def ordered_indices(self):
"""Return an ordered list of indices. Batches will be constructed based
on this order."""
return np.arange(len(self))
|
Python
| 0.000007
|
@@ -2711,24 +2711,28 @@
self.
+s2s_
collater = S
@@ -3290,16 +3290,20 @@
rn self.
+s2s_
collater
|
08c29fcae3c622b0f47a0b73338b372ddcee42eb
|
support py2
|
utils/search.py
|
utils/search.py
|
#!/usr/bin/env python
"""
Filter tweet JSON based on a regular expression to apply to the text of the
tweet.
search.py <regex> file1
Or if you want a case insensitive match:
search.py -i <regex> file1
"""
import re
import sys
import json
import argparse
import fileinput
from twarc import json2csv
if len(sys.argv) == 1:
sys.exit("usage: search.py <regex> file1 file2")
parser = argparse.ArgumentParser(description="filter tweets by regex")
parser.add_argument('-i', '--ignore', dest='ignore', action='store_true',
help='ignore case')
parser.add_argument('regex')
parser.add_argument('files', metavar='FILE', nargs='*', default=['-'], help='files to read, if empty, stdin is used')
args = parser.parse_args()
flags = 0
if args.ignore:
flags = re.IGNORECASE
try:
regex = re.compile(args.regex, flags)
except Exception as e:
sys.exit("error: regex failed to compile: {}".format(e))
for line in fileinput.input(files=args.files):
tweet = json.loads(line)
text = json2csv.text(tweet)
if regex.search(text):
print(line, end='')
|
Python
| 0
|
@@ -213,16 +213,55 @@
1%0A%0A%22%22%22%0A%0A
+from __future__ import print_function%0A%0A
import r
|
57435f651d1d517e1ae60b7676dcfff1c1130eb8
|
add proper type cast()
|
examples/viewports_override_layer_attributes.py
|
examples/viewports_override_layer_attributes.py
|
# Copyright (c) 2022, Manfred Moitzi
# License: MIT License
from pathlib import Path
import ezdxf
from ezdxf.layouts import Paperspace
MESH_SIZE = 20
DIR = Path("~/Desktop/Outbox").expanduser()
if not DIR.exists():
DIR = Path(".")
COUNT = 7
LAYER_NAME = "Layer{}"
PAPER_WIDTH = 22
PAPER_HEIGHT = 17
MARGIN = 1
def create_modelspace_content(msp):
x1 = 0
x2 = 100
for index in range(COUNT):
y = index * 10
layer_name = LAYER_NAME.format(index)
# color by layer
# linetype by layer
# linewidth by layer
msp.add_line((x1, y), (x2, y), dxfattribs={"layer": layer_name})
def original(vp_handle, doc):
pass
def override_aci(vp_handle, doc):
for index in range(COUNT):
layer = doc.layers.get(LAYER_NAME.format(index))
override = layer.get_vp_overrides()
override.set_color(vp_handle, index + 1)
override.commit()
RGB = [
(206, 25, 230),
(11, 84, 244),
(237, 141, 18),
(87, 242, 246),
(137, 109, 186),
(246, 246, 145),
(126, 235, 61),
]
def override_rgb(vp_handle, doc):
for index in range(COUNT):
layer = doc.layers.get(LAYER_NAME.format(index))
override = layer.get_vp_overrides()
override.set_rgb(vp_handle, RGB[index])
override.commit()
LTYPES = [
"DASHED2",
"DOT2",
"DASHED2",
"DOT2",
"DASHED2",
"DOT2",
"DASHED2",
"DOT2",
]
def override_ltype(vp_handle, doc):
for index in range(COUNT):
layer = doc.layers.get(LAYER_NAME.format(index))
override = layer.get_vp_overrides()
override.set_linetype(vp_handle, LTYPES[index])
override.commit()
LW = [13, 18, 25, 35, 50, 70, 100, 140]
def override_lw(vp_handle, doc):
for index in range(COUNT):
layer = doc.layers.get(LAYER_NAME.format(index))
override = layer.get_vp_overrides()
override.set_lineweight(vp_handle, LW[index])
override.commit()
def create_viewports(paperspace: Paperspace):
# Define viewports in paper space:
# center, size=(width, height) defines the viewport in paper space.
# view_center_point and view_height defines the area in model space
# which is displayed in the viewport.
doc = paperspace.doc
vp_height = 15
vp_width = 3
cx = vp_width / 2
cy = (PAPER_HEIGHT - 2 * MARGIN) / 2
for func in (
original,
override_aci,
override_rgb,
override_ltype,
override_lw,
):
vp = paperspace.add_viewport(
center=(cx, cy),
size=(vp_width, vp_height),
view_center_point=(50, 30),
view_height=70,
)
func(vp.dxf.handle, doc)
cx += vp_width + MARGIN
def main():
def make(dxfversion, filename):
doc = ezdxf.new(dxfversion, setup=True)
doc.header["$LWDISPLAY"] = 1 # show linewidth in DXF viewer
msp = doc.modelspace()
vp_layer = doc.layers.add("VIEWPORTS")
# switch viewport layer off to hide the viewport border lines
vp_layer.off()
for index in range(COUNT):
doc.layers.add(LAYER_NAME.format(index))
create_modelspace_content(msp)
psp: Paperspace = doc.layout("Layout1") # type: ignore
psp.page_setup(
size=(PAPER_WIDTH, PAPER_HEIGHT),
margins=(MARGIN, MARGIN, MARGIN, MARGIN),
units="inch",
)
create_viewports(psp)
doc.set_modelspace_vport(60, (50, 30))
try:
doc.saveas(DIR / filename)
except IOError as e:
print(str(e))
make("R2000", "viewport_overrides_R2000.dxf")
make("R2007", "viewport_overrides_R2007.dxf")
make("R2018", "viewport_overrides_R2018.dxf")
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -53,16 +53,40 @@
License%0A
+from typing import cast%0A
from pat
@@ -3248,18 +3248,24 @@
psp
-:
+= cast(
Paperspa
@@ -3266,18 +3266,17 @@
perspace
- =
+,
doc.lay
@@ -3293,24 +3293,9 @@
t1%22)
- # type: ignore
+)
%0A
|
23302ebfe91545f7f88ba763753f558df8b224ce
|
allow insecure content
|
lg_common/src/lg_common/managed_browser.py
|
lg_common/src/lg_common/managed_browser.py
|
import sys
import rospy
import socket
import shutil
import os
from lg_common import ManagedApplication, ManagedWindow
from lg_common.tcp_relay import TCPRelay
from lg_common.msg import ApplicationState
from tornado.websocket import websocket_connect
DEFAULT_BINARY = '/usr/bin/google-chrome'
DEFAULT_ARGS = [
'--enable-gpu-rasterization',
'--no-first-run',
'--no-sandbox',
'--test-type', # only needed to ignore --no-sandbox's warning message
'--allow-file-access-from-files',
'--disable-default-apps',
'--disable-java',
'--disable-session-storage',
'--disable-translate',
'--touch-events=enabled',
'--disable-pinch',
'--overscroll-history-navigation=0',
'--disable-touch-editing',
'--v=1',
'--enable-webgl',
'--ignore-gpu-blacklist'
]
class ManagedBrowser(ManagedApplication):
def __init__(
self,
url=None,
slug=None,
kiosk=True,
geometry=None,
binary=DEFAULT_BINARY,
remote_debugging_port=None,
app=False,
shell=True,
command_line_args=[],
default_args_removal=[],
disk_cache_size=314572800,
log_level=0,
extensions=[],
log_stderr=False,
user_agent='',
pepper_flash_dir='/home/lg/inc/PepperFlash',
pnacl_dir='/home/lg/inc/pnacl',
**kwargs
):
# If no slug provided, attempt to use the node name.
if slug is None:
try:
slug = rospy.get_name().lstrip('/')
except Exception as e:
sys.stderr.write('Could not resolve slug for this browser!')
sys.stderr.write(' * Has your node been initialized?')
raise e
cmd = [binary]
if user_agent:
cmd.append('--user-agent={}'.format(user_agent))
# If no debug port provided, pick one.
if remote_debugging_port is None:
remote_debugging_port = ManagedBrowser.get_os_port()
self.debug_port = ManagedBrowser.get_os_port()
self.relay = TCPRelay(self.debug_port, remote_debugging_port)
if log_stderr:
cmd.append('--enable-logging=stderr')
else:
cmd.append('--enable-logging')
cmd.append('--remote-debugging-port={}'.format(self.debug_port))
cmd.append('--log-level={}'.format(log_level))
self.tmp_dir = '/tmp/lg_browser_{}'.format(slug)
self.clear_tmp_dir()
self.pepper_flash_dir = pepper_flash_dir
self.pnacl_dir = pnacl_dir
self.init_tmp_dir()
cmd.append('--user-data-dir={}'.format(self.tmp_dir))
cmd.append('--disk-cache-dir={}'.format(self.tmp_dir))
cmd.append('--crash-dumps-dir={}/crashes'.format(self.tmp_dir))
if extensions:
for extension in extensions:
if not os.path.isdir(extension):
extensions.remove(extension)
rospy.logwarn("Could not load extension from %s because dir does not exist" % extension)
if extensions:
cmd.append('--load-extension={}'.format(','.join(extensions)))
for _cmd in default_args_removal:
if _cmd in DEFAULT_ARGS:
DEFAULT_ARGS.remove(_cmd)
cmd.extend(DEFAULT_ARGS)
if command_line_args != []:
cmd.extend(command_line_args)
# All remaining kwargs are mapped to command line args.
# _ is replaced with -.
def consume_kwarg(item):
key, value = item
arg = '--{}'.format(key.replace('_', '-'))
if value is None:
return arg
if isinstance(value, bool):
arg += '=' + str(value).lower()
else:
arg += '=' + str(value)
return arg
args = map(consume_kwarg, kwargs.iteritems())
cmd.extend(args)
if app:
cmd.append('--app={}'.format(url))
else:
if kiosk:
cmd.append('--kiosk')
pass
if url is not None:
cmd.append(url)
# finishing command line and piping output to logger
rospy.logdebug("Starting cmd: %s" % cmd)
# Different versions of Chrome use different window instance names.
# Matching the tmp_dir should work for all of them.
w_instance = '\\({}\\)'.format(self.tmp_dir)
window = ManagedWindow(w_instance=w_instance, geometry=geometry, chrome_kiosk_workaround=kiosk)
rospy.logdebug("Command {}".format(cmd))
# clean up after thyself
rospy.on_shutdown(self.clear_tmp_dir)
super(ManagedBrowser, self).__init__(cmd=cmd, window=window)
def post_init(self):
super(ManagedBrowser, self).post_init()
self.add_respawn_handler(self.clear_tmp_dir)
self.add_respawn_handler(self.init_tmp_dir)
self.add_state_handler(self.control_relay)
def init_tmp_dir(self):
"""
Creates the tmp dir
then links in the path to Chrome components like PepperFlash
then replaces the path in the latest-copmponent-updated-flash file
"""
try:
os.mkdir(self.tmp_dir)
os.mkdir(self.tmp_dir + '/PepperFlash')
except:
rospy.logerr("Error trying to make the tmp dir, could exist already")
# Link NaCl component. https://github.com/EndPointCorp/lg_ros_nodes/issues/357
try:
os.symlink(self.pnacl_dir, os.path.join(self.tmp_dir, 'pnacl'))
rospy.loginfo("Linked `pnacl` directory %s" % self.pnacl_dir)
except Exception, e:
rospy.logerr("Error linking pNaCl, %s" % e)
try:
os.symlink(self.pepper_flash_dir + '/flash_dir', "%s/PepperFlash/flash_dir" % self.tmp_dir)
with open("%s/latest-component-updated-flash" % self.pepper_flash_dir, "r") as f:
out = f.read()
with open("%s/PepperFlash/latest-component-updated-flash" % self.tmp_dir, "w") as f:
f.write(out.replace("${TMP_DIR}", self.tmp_dir))
except Exception, e:
rospy.logerr("Error copying pepper flash into the tmp dir, %s" % e)
def clear_tmp_dir(self):
"""
Clears out all temporary files and disk cache for this instance.
"""
try:
rospy.logdebug("Purging ManagedBrowser directory: %s" % self.tmp_dir)
shutil.rmtree(self.tmp_dir)
except OSError, e:
rospy.logdebug("Could not purge the %s directory because %s" % (self.tmp_dir, e))
@staticmethod
def get_os_port():
"""
Lets the OS assign a port number.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
return port
def send_debug_sock_msg(self, msg):
"""
Writes a string to the browser's debug web socket.
"""
rospy.warn(
'ManagedBrowser.send_debug_sock_msg() probably not yet working'
)
ws_url = 'ws://localhost:{}'.format(self.debug_port)
conn = yield websocket_connect(ws_url, connect_timeout=1)
conn.write_message(msg)
conn.close()
def control_relay(self, state):
if state == ApplicationState.STOPPED:
self.relay.stop()
elif state == ApplicationState.SUSPENDED:
self.relay.start()
elif state == ApplicationState.HIDDEN:
self.relay.start()
elif state == ApplicationState.VISIBLE:
self.relay.start()
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
Python
| 0
|
@@ -699,16 +699,56 @@
ion=0',%0A
+ '--allow-running-insecure-content',%0A
'--d
|
4dc1552bbbdbfb060eb4559c5cded6a5c8e8fd02
|
Migrate kythe for Bazel 0.27 (#3769)
|
kythe/cxx/tools/fyi/testdata/compile_commands.bzl
|
kythe/cxx/tools/fyi/testdata/compile_commands.bzl
|
"""Rule for generating compile_commands.json.in with appropriate inlcude directories."""
load("@bazel_tools//tools/cpp:toolchain_utils.bzl", "find_cpp_toolchain")
_TEMPLATE = """ {{
"directory": "OUT_DIR",
"command": "clang++ -c {filename} -std=c++11 -Wall -Werror -I. -IBASE_DIR {system_includes}",
"file": "{filename}",
}}"""
def _compile_commands_impl(ctx):
system_includes = " ".join([
"-I{}".format(d)
for d in find_cpp_toolchain(ctx).built_in_include_directories
])
ctx.actions.write(
output = ctx.outputs.compile_commands,
content = "[\n{}]\n".format(",\n".join([
_TEMPLATE.format(filename = name, system_includes = system_includes)
for name in ctx.attr.filenames
])),
)
compile_commands = rule(
attrs = {
"filenames": attr.string_list(
mandatory = True,
allow_empty = False,
),
# Do not add references, temporary attribute for find_cpp_toolchain.
# See go/skylark-api-for-cc-toolchain for more details.
"_cc_toolchain": attr.label(
default = Label("@bazel_tools//tools/cpp:current_cc_toolchain"),
),
},
doc = "Generates a compile_commannds.json.in template file.",
outputs = {
"compile_commands": "compile_commands.json.in",
},
implementation = _compile_commands_impl,
)
|
Python
| 0
|
@@ -1385,10 +1385,71 @@
s_impl,%0A
+ toolchains = %5B%22@bazel_tools//tools/cpp:toolchain_type%22%5D,%0A
)%0A
|
d003765047a8a6796e28531a28ba1364950ccd27
|
Remove request.POST save - incompatible with DRF v3.6.3.
|
lms/djangoapps/bulk_enroll/views.py
|
lms/djangoapps/bulk_enroll/views.py
|
"""
API views for Bulk Enrollment
"""
import json
from edx_rest_framework_extensions.authentication import JwtAuthentication
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from bulk_enroll.serializers import BulkEnrollmentSerializer
from enrollment.views import EnrollmentUserThrottle
from instructor.views.api import students_update_enrollment
from openedx.core.lib.api.authentication import OAuth2Authentication
from openedx.core.lib.api.permissions import IsStaff
from util.disable_rate_limit import can_disable_rate_limit
@can_disable_rate_limit
class BulkEnrollView(APIView):
"""
**Use Case**
Enroll multiple users in one or more courses.
**Example Request**
POST /api/bulk_enroll/v1/bulk_enroll/ {
"auto_enroll": true,
"email_students": true,
"action": "enroll",
"courses": "course-v1:edX+Demo+123,course-v1:edX+Demo2+456",
"identifiers": "brandon@example.com,yamilah@example.com"
}
**POST Parameters**
A POST request can include the following parameters.
* auto_enroll: When set to `true`, students will be enrolled as soon
as they register.
* email_students: When set to `true`, students will be sent email
notifications upon enrollment.
* action: Can either be set to "enroll" or "unenroll". This determines the behabior
**Response Values**
If the supplied course data is valid and the enrollments were
successful, an HTTP 200 "OK" response is returned.
The HTTP 200 response body contains a list of response data for each
enrollment. (See the `instructor.views.api.students_update_enrollment`
docstring for the specifics of the response data available for each
enrollment)
"""
authentication_classes = JwtAuthentication, OAuth2Authentication
permission_classes = IsStaff,
throttle_classes = EnrollmentUserThrottle,
def post(self, request):
serializer = BulkEnrollmentSerializer(data=request.data)
if serializer.is_valid():
request.POST = request.data
response_dict = {
'auto_enroll': serializer.data.get('auto_enroll'),
'email_students': serializer.data.get('email_students'),
'action': serializer.data.get('action'),
'courses': {}
}
for course in serializer.data.get('courses'):
response = students_update_enrollment(self.request, course_id=course)
response_dict['courses'][course] = json.loads(response.content)
return Response(data=response_dict, status=status.HTTP_200_OK)
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
Python
| 0
|
@@ -2173,48 +2173,8 @@
():%0A
- request.POST = request.data%0A
|
cabae8d7732cca922e3fb56db205e41a20186aa3
|
Remove markdown
|
DataCleaning/data_cleaning.py
|
DataCleaning/data_cleaning.py
|
# -*- coding: utf-8 -*-
"""
Script for Importing data from MySQL database and cleaning
"""
import os
import pymysql
import pandas as pd
from bs4 import BeautifulSoup
from ftfy import fix_text
## Getting Data
# Changing directory
os.chdir("")
# Running the file containing MySQL information
execfile("connection_config.py")
# Connecting to MySQL
connection = pymysql.connect(host=hostname, user=usr, password=pwd, db=db, charset='utf8mb4', cursorclass=pymysql.cursors.DictCursor)
# Fetching data from the database
with connection.cursor() as cursor:
sql = "SELECT * FROM answers"
cursor.execute(sql)
result = cursor.fetchall()
# Closing connection
connection.close()
# Saving the data as a dataframe
data = pd.DataFrame(result)
# Saving the data to an excel file
#data.to_excel("answers.xlsx")
# Importing data from the excel file
#data = pd.read_excel("answers.xlsx")
## Data cleaning
data["body"] = data["body"].fillna("") # Filling missing values
# Cleaning html and fixing unicode
nrow = data.shape[0]
body = list()
for i in range(0, nrow):
body.append(BeautifulSoup(data["body"][i], "html"))
body[i] = body[i].get_text() # Remove html
body[i] = fix_text(body[i]) # Fix unicode characters
body = pd.Series(body)
# Strip whitespace
body_new = body.str.strip()
body_new = body_new.str.replace("[\s]{2,}", "")
# Cleaning special characters
body_new = body_new.str.replace("[\r\n\t$\xa0]", "")
body_new = body_new.str.replace("[\\\\]{1,}", " ")
# Putting the cleaned up data back in the dataframe
data["body"] = body_new
|
Python
| 0.000033
|
@@ -1391,16 +1391,83 @@
%22, %22%22)%0A%0A
+# Remove markdown%0Abody_new = body_new.str.replace(%22%5B$#~%5E*_%5D%22, %22%22)%0A%0A
# Cleani
@@ -1528,17 +1528,16 @@
%22%5B%5Cr%5Cn%5Ct
-$
%5Cxa0%5D%22,
|
f3ff33932180e177dfd87eada7cf4ba1e253e0b4
|
fix user view set add queryset
|
accounts/views.py
|
accounts/views.py
|
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import get_object_or_404
from django.views.generic import UpdateView, FormView
from django.views.generic.detail import DetailView
from django.db.models import Q
from accounts.models import TimtecUser
from accounts.forms import ProfileEditForm, AcceptTermsForm
from accounts.serializers import TimtecUserSerializer, TimtecUserAdminSerializer, GroupAdminSerializer, GroupSerializer
from braces.views import LoginRequiredMixin
from rest_framework import viewsets
from rest_framework import filters
from rest_framework import generics
from rest_framework.response import Response
from core.permissions import IsAdmin
from django.contrib.auth import update_session_auth_hash
class ProfileEditView(LoginRequiredMixin, UpdateView):
model = get_user_model()
form_class = ProfileEditForm
template_name = 'profile-edit.html'
def get_success_url(self):
url = self.request.GET.get('next', None)
if url:
return url
else:
return reverse_lazy('profile')
def get_object(self):
return self.request.user
def post(self, request, *args, **kwargs):
form_result = super(ProfileEditView, self)\
.post(request, *args, **kwargs)
update_session_auth_hash(self.request, self.request.user)
return form_result
class ProfileView(LoginRequiredMixin, DetailView):
model = get_user_model()
template_name = 'profile.html'
context_object_name = 'profile_user'
def get_object(self):
if hasattr(self, 'kwargs') and 'username' in self.kwargs:
try:
return get_object_or_404(self.model, username=self.kwargs['username'])
except:
return self.request.user
else:
return self.request.user
class TimtecUserViewSet(viewsets.ReadOnlyModelViewSet):
model = get_user_model()
lookup_field = 'id'
filter_fields = ('groups__name',)
filter_backends = (filters.DjangoFilterBackend, filters.OrderingFilter,)
serializer_class = TimtecUserSerializer
ordering = ('first_name', 'username',)
class TimtecUserAdminViewSet(viewsets.ModelViewSet):
model = get_user_model()
# lookup_field = 'id'
# filter_backends = (filters.OrderingFilter,)
permission_classes = (IsAdmin, )
serializer_class = TimtecUserAdminSerializer
ordering = ('first_name', 'username',)
queryset = TimtecUser.objects.all()
# search_fields = ('first_name', 'last_name', 'username', 'email')
def get_queryset(self):
page = self.request.query_params.get('page')
keyword = self.request.query_params.get('keyword')
admin = self.request.query_params.get('admin')
blocked = self.request.query_params.get('blocked')
queryset = super(TimtecUserAdminViewSet, self).get_queryset().order_by('first_name')
if keyword:
queryset = queryset.filter(Q(first_name__icontains=keyword) |
Q(last_name__icontains=keyword) |
Q(username__icontains=keyword) |
Q(email__icontains=keyword))
if admin == 'true':
queryset = queryset.filter(is_superuser=True)
if blocked == 'true':
queryset = queryset.filter(is_active=False)
if page:
paginator = Paginator(queryset, 50)
try:
queryset = paginator.page(page)
except PageNotAnInteger:
# If page is not an integer, deliver first page.
queryset = paginator.page(1)
except EmptyPage:
# If page is out of range (e.g. 9999),
# deliver last page of results.
queryset = paginator.page(paginator.num_pages)
return queryset
class GroupAdminViewSet(viewsets.ModelViewSet):
queryset = Group.objects.all()
model = Group
serializer_class = GroupAdminSerializer
permission_classes = (IsAdmin, )
def put(self, request, **kwargs):
User = get_user_model()
# Does a user need to be removed from a given group?
if request.data['action'] == 'remove':
group = Group.objects.get(id=request.data['id'])
group.user_set.remove(User.objects.get(id=request.data['user']['id']))
return Response(status=200)
# Does a user nedd to be added to a given group?
# The "add" action support multiple users
if request.data['action'] == 'add':
group = Group.objects.get(id=request.data['id'])
for user in request.data.get('users', None):
group.user_set.add(User.objects.get(id=user['id']))
return Response(status=200)
return Response(status=404)
class GroupViewSet(viewsets.ReadOnlyModelViewSet):
"""
API endpoint that allows groups to be viewed.
"""
queryset = Group.objects.all()
serializer_class = GroupSerializer
class UserSearchView(LoginRequiredMixin, generics.ListAPIView):
model = get_user_model()
serializer_class = TimtecUserSerializer
def get_queryset(self):
queryset = self.model.objects.all()
query = self.request.query_params.get('name', None)
if query is not None:
queryset = queryset.filter(Q(first_name__icontains=query) |
Q(last_name__icontains=query) |
Q(username__icontains=query) |
Q(email__icontains=query))
return queryset
class StudentSearchView(LoginRequiredMixin, generics.ListAPIView):
model = get_user_model()
serializer_class = TimtecUserSerializer
search_fields = ('first_name', 'last_name', 'username', 'email')
def get_queryset(self):
queryset = self.model.objects.all()
course = self.request.query_params.get('course', None)
classes = self.request.user.professor_classes.all()
if classes:
queryset = queryset.filter(classes__in=classes)
else:
# FIXME: if every student is in a class, this is useless.
if course is not None:
queryset = queryset.filter(studentcourse_set=course)
query = self.request.query_params.get('name', None)
if query is not None:
queryset = queryset.filter(Q(first_name__icontains=query) |
Q(last_name__icontains=query) |
Q(username__icontains=query) |
Q(email__icontains=query))
return queryset
class AcceptTermsView(FormView):
template_name = 'accept-terms.html'
form_class = AcceptTermsForm
success_url = reverse_lazy('courses')
def get_success_url(self):
next_url = self.request.POST.get('next', None)
if next_url:
return next_url
return reverse_lazy('courses')
def form_valid(self, form):
# This method is called when valid form data has been POSTed.
# It should return an HttpResponse.
self.request.user.accepted_terms = True
self.request.user.save()
return super(AcceptTermsView, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(AcceptTermsView, self).get_context_data(**kwargs)
next_url = self.request.GET.get('next')
if next_url:
context['next_url'] = next_url
return context
|
Python
| 0.000058
|
@@ -2264,32 +2264,72 @@
cUserSerializer%0A
+ queryset = TimtecUser.objects.all()%0A
ordering = (
|
2557d93e6607fb61b9ba607ca85f0eb1c9d13871
|
Update the rfam problems part of export query
|
luigi/rnacentral/search/exporter.py
|
luigi/rnacentral/search/exporter.py
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import subprocess as sp
from datetime import date
from lxml import etree
from lxml.builder import E
from rnacentral.psql import PsqlWrapper
from .data import builder
XML_SCHEMA = 'http://www.ebi.ac.uk/ebisearch/XML4dbDumps.xsd'
BASE_SQL = """
SELECT
json_build_object(
'upi', rna.upi,
'taxid', xref.taxid,
'first_seen', array_agg(release1.timestamp),
'last_seen', array_agg(release2.timestamp),
'cross_references', array_agg(
json_build_object(
'name', acc."database",
'external_id', acc.external_id,
'optional_id', acc.optional_id,
'accession', acc.accession,
'non_coding_id', acc.non_coding_id,
'parent_accession', acc.parent_ac || '.' || acc.seq_version
)
),
'description', array_agg(pre.description),
'deleted', array_agg(xref.deleted),
'length', array_agg(rna.len),
'species', array_agg(acc.species),
'organelles', array_agg(acc.organelle),
'expert_dbs', array_agg(db.display_name),
'rna_type', array_agg(pre.rna_type),
'product', array_agg(acc.product),
'md5', array_agg(rna.md5),
'authors', array_agg(refs.authors),
'journals', array_agg(refs.location),
'pub_titles', array_agg(refs.title),
'pub_ids', array_agg(refs.id),
'pubmed_ids', array_agg(pubmed.ref_pubmed_id::varchar) || array_agg(refs.pmid),
'dois', array_agg(pubmed.doi) || array_agg(refs.doi),
'has_coordinates', array_agg(pre.has_coordinates),
'rfam_family_names', array_agg(models.short_name),
'rfam_ids', array_agg(hits.rfam_model_id),
'rfam_clans', array_agg(models.rfam_clan_id),
'rfam_status',
case
when cardinality((array_agg(pre.rfam_problems))) = 0 then '{{}}'
when (array_agg(pre.rfam_problems))[1] = '' then '{{}}'
when (array_agg(pre.rfam_problems))[1] is null then '{{}}'
else (array_agg(pre.rfam_problems))[1]::json
end,
'tax_strings', array_agg(acc.classification),
'functions', array_agg(acc.function),
'genes', array_agg(acc.gene),
'gene_synonyms', array_agg(acc.gene_synonym),
'common_name', array_agg(acc.common_name),
'notes', array_agg(acc.note),
'locus_tags', array_agg(acc.locus_tag),
'standard_names', array_agg(acc.standard_name),
'products', array_agg(acc.product),
'go_annotations', array_agg(
json_build_object(
'go_term_id', anno.ontology_term_id,
'qualifier', anno.qualifier,
'go_name', ont.name
)
)
)
FROM xref xref
JOIN rnc_accessions acc ON xref.ac = acc.accession
JOIN rnc_database db ON xref.dbid = db.id
JOIN rnc_release release1 ON xref.created = release1.id
JOIN rnc_release release2 ON xref.last = release2.id
JOIN rna rna ON xref.upi = rna.upi
JOIN rnc_rna_precomputed pre
ON
xref.upi = pre.upi
AND xref.taxid = pre.taxid
LEFT JOIN rnc_reference_map ref_map ON ref_map.accession = acc.accession
LEFT JOIN rnc_references refs ON refs.id = ref_map.reference_id
LEFT JOIN rfam_model_hits hits ON xref.upi = hits.upi
LEFT JOIN rfam_models models
ON
hits.rfam_model_id = models.rfam_model_id
LEFT JOIN go_term_annotations anno ON anno.rna_id = pre.id
LEFT JOIN go_term_publication_map go_map
ON
go_map.go_term_annotation_id = anno.go_term_annotation_id
LEFT JOIN ref_pubmed pubmed ON pubmed.ref_pubmed_id = go_map.ref_pubmed_id
LEFT JOIN ontology_terms ont
ON
ont.ontology_term_id = anno.ontology_term_id
WHERE
xref.deleted = 'N'
AND %s
GROUP BY rna.upi, xref.taxid
"""
SINGLE_SQL = BASE_SQL % "xref.upi = '{upi}' AND xref.taxid = {taxid}"
RANGE_SQL = BASE_SQL % "rna.id BETWEEN {min_id} AND {max_id}"
def export(db, query, **kwargs):
psql = PsqlWrapper(db)
for result in psql.copy_to_iterable(query, **kwargs):
try:
data = json.loads(result['json_build_object'])
yield builder(data)
except:
raise
def range(db, min_id, max_id):
"""
Generates a series of XML strings representing all entries in the given
range of ids.
"""
return export(db, RANGE_SQL, min_id=min_id, max_id=max_id)
def upi(db, upi, taxid):
"""
Will create a XmlEntry object for the given upi, taxid.
"""
results = export(db, SINGLE_SQL, upi=upi, taxid=taxid)
try:
return next(results)
except StopIteration:
raise ValueError("Found no entries for %s_%i" % (upi, taxid))
def write(handle, results):
"""
This will create the required root XML element and place all the given
XmlEntry objects as ElementTree.Element's in it. This then produces the
string representation of that document which can be saved.
"""
handle.write('<database>')
handle.write(etree.tostring(E.name('RNAcentral')))
handle.write(etree.tostring(E.description('a database for non-protein coding RNA sequences')))
handle.write(etree.tostring(E.release('1.0')))
handle.write(etree.tostring(E.release_date(date.today().strftime('%d/%m/%Y'))))
count = 0
handle.write('<entries>')
for result in results:
count += 1
handle.write(etree.tostring(result))
handle.write('</entries>')
if not count:
raise ValueError("No entries found")
handle.write(etree.tostring(E.entry_count(str(count))))
handle.write('</database>')
def validate(filename):
"""
Run xmllint validation on the given filename.
"""
cmd = ('xmllint', filename, '--schema', XML_SCHEMA, '--stream')
sp.check_call(cmd)
|
Python
| 0
|
@@ -2535,155 +2535,8 @@
%7D%7D'%0A
- when (array_agg(pre.rfam_problems))%5B1%5D = '' then '%7B%7B%7D%7D'%0A when (array_agg(pre.rfam_problems))%5B1%5D is null then '%7B%7B%7D%7D'%0A
|
abcae974229dc28a60f78b706f7cd4070bc530fa
|
update doc
|
lib/ansible/modules/windows/win_feature.py
|
lib/ansible/modules/windows/win_feature.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Paul Durivage <paul.durivage@rackspace.com>, Trond Hindenes <trond@hindenes.com> and others
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_feature
version_added: "1.7"
short_description: Installs and uninstalls Windows Features
description:
- Installs or uninstalls Windows Roles or Features
options:
name:
description:
- Names of roles or features to install as a single feature or a comma-separated list of features
required: true
default: null
state:
description:
- State of the features or roles on the system
required: false
choices:
- present
- absent
default: present
restart:
description:
- Restarts the computer automatically when installation is complete, if restarting is required by the roles or features installed.
choices:
- yes
- no
default: null
include_sub_features:
description:
- Adds all subfeatures of the specified feature
choices:
- yes
- no
default: null
include_management_tools:
description:
- Adds the corresponding management tools to the specified feature
choices:
- yes
- no
default: null
source:
description:
- Specify a source to install the feature from
required: false
choices:
- {driveletter}:\sources\sxs
- \\{IP}\Share\sources\sxs
author:
- "Paul Durivage (@angstwad)"
- "Trond Hindenes (@trondhindenes)"
'''
EXAMPLES = '''
# This installs IIS.
# The names of features available for install can be run by running the following Powershell Command:
# PS C:\Users\Administrator> Import-Module ServerManager; Get-WindowsFeature
$ ansible -i hosts -m win_feature -a "name=Web-Server" all
$ ansible -i hosts -m win_feature -a "name=Web-Server,Web-Common-Http" all
# Playbook example
---
- name: Install IIS
hosts: all
gather_facts: false
tasks:
- name: Install IIS
win_feature:
name: "Web-Server"
state: present
restart: yes
include_sub_features: yes
include_management_tools: yes
'''
|
Python
| 0
|
@@ -2584,16 +2584,109 @@
tp%22 all%0A
+ansible -m %22win_feature%22 -a %22name=NET-Framework-Core source=C:/Temp/iso/sources/sxs%22 windows%0A
%0A%0A# Play
|
23b56303d3afa4764d7fa4f4d82eafbaf57d0341
|
Update the version number
|
ailib/__init__.py
|
ailib/__init__.py
|
# PyAI
# The MIT License
#
# Copyright (c) 2014,2015,2016,2017 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.2.dev0'
__all__ = ['ml',
'mdp',
'optimize',
'signal']
|
Python
| 0.000045
|
@@ -1603,17 +1603,17 @@
'0.2.dev
-0
+1
'%0A%0A__all
|
4fb02e4bdf4af30826a00dadc4883cd2d9922541
|
Fix type errors, move from partial to explicit connector function
|
aiosqlite/core.py
|
aiosqlite/core.py
|
# Copyright 2017 John Reese
# Licensed under the MIT license
import asyncio
import logging
import sqlite3
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from typing import Any, Callable, Iterable
__all__ = [
'connect',
'Connection',
'Cursor',
]
Log = logging.getLogger('aiosqlite')
class Cursor:
def __init__(
self,
conn: 'Connection',
cursor: sqlite3.Cursor,
) -> None:
self._conn = conn
self._cursor = cursor
class Connection:
def __init__(
self,
connector: Callable[[], sqlite3.Connection],
loop: asyncio.AbstractEventLoop,
executor: ThreadPoolExecutor,
) -> None:
self._conn: sqlite3.Connection = None
self._connector = connector
self._loop = loop
self._executor = executor
async def _execute(self, fn, *args, **kwargs):
"""Execute a function with the given arguments on the shared thread."""
pt = partial(fn, *args, **kwargs)
return await self._loop.run_in_executor(self._executor, pt)
async def _connect(self):
"""Connect to the actual sqlite database."""
if self._conn is None:
self._conn = await self._execute(self._connector)
async def __aenter__(self) -> 'Connection':
await self._connect()
return self
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
await self.close()
self._conn = None
async def cursor(self) -> Cursor:
raise NotImplementedError('Not yet available in aiosqlite')
async def commit(self) -> None:
raise NotImplementedError('Not yet available in aiosqlite')
async def rollback(self) -> None:
raise NotImplementedError('Not yet available in aiosqlite')
async def close(self) -> None:
await self._execute(self._conn.close)
async def execute(
self,
sql: str,
parameters: Iterable[Any] = None,
) -> Cursor:
raise NotImplementedError('Not yet available in aiosqlite')
async def executemany(
self,
sql: str,
parameters: Iterable[Iterable[Any]] = None,
) -> Cursor:
raise NotImplementedError('Not yet available in aiosqlite')
async def executescript(
self,
sql_script: str,
) -> Cursor:
raise NotImplementedError('Not yet available in aiosqlite')
def connect(
database: str,
**kwargs: Any,
) -> Connection:
"""Create and return a connection proxy to the sqlite database."""
loop = asyncio.get_event_loop()
executor = ThreadPoolExecutor(1)
connector = partial(
sqlite3.connect,
database,
check_same_thread=False,
**kwargs,
)
return Connection(connector, loop, executor)
|
Python
| 0
|
@@ -2632,20 +2632,25 @@
utor(1)%0A
+%0A
+def
connecto
@@ -2650,27 +2650,41 @@
onnector
- = partial(
+() -%3E sqlite3.Connection:
%0A
@@ -2684,16 +2684,23 @@
+return
sqlite3.
@@ -2706,26 +2706,28 @@
.connect
-,%0A
+(%0A
database
@@ -2714,24 +2714,26 @@
(%0A
+
database,%0A
@@ -2726,24 +2726,28 @@
database,%0A
+
chec
@@ -2771,24 +2771,28 @@
se,%0A
+
**kwargs,%0A
@@ -2789,16 +2789,20 @@
kwargs,%0A
+
)%0A%0A
|
d2cfc7f2fefcb9a317ab3cd18ebc8785fb764d9f
|
remove last bang
|
lib/exabgp/bgp/message/open/capability/refresh.py
|
lib/exabgp/bgp/message/open/capability/refresh.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
refresh.py
Created by Thomas Mangin on 2012-07-17.
Copyright (c) 2012 Exa Networks. All rights reserved.
"""
# =================================================================== RouteRefresh
class RouteRefresh (list):
def __str__ (self):
return "Route Refresh (unparsed)"
def extract (self):
return []
class CiscoRouteRefresh (list):
def __str__ (self):
return "Cisco Route Refresh (unparsed)"
def extract (self):
return []
|
Python
| 0.008047
|
@@ -1,26 +1,4 @@
-#!/usr/bin/env python%0A
# en
|
4fa4fb3f583e787da9594ac8a714a22981842c71
|
remove now-bogus test
|
pydoctor/test/test_commandline.py
|
pydoctor/test/test_commandline.py
|
from pydoctor import driver
import sys, cStringIO
def geterrtext(*options):
options = list(options)
se = sys.stderr
f = cStringIO.StringIO()
print options
sys.stderr = f
try:
try:
driver.main(options)
except SystemExit:
pass
else:
assert False, "did not fail"
finally:
sys.stderr = se
return f.getvalue()
def test_invalid_option():
err = geterrtext('--no-such-option')
assert 'no such option' in err
def test_no_do_nothing():
err = geterrtext()
assert "this invocation isn't going to do anything" in err
def test_cannot_advance_blank_system():
err = geterrtext('--make-html')
assert 'forget an --add-package?' in err
def test_invalid_systemclasses():
err = geterrtext('--system-class')
assert 'requires an argument' in err
err = geterrtext('--system-class=notdotted')
assert 'dotted name' in err
err = geterrtext('--system-class=no-such-module.System')
assert 'could not import module' in err
err = geterrtext('--system-class=pydoctor.model.Class')
assert 'is not a subclass' in err
def test_projectbasedir():
"""
The --project-base-dir option should set the projectbasedirectory attribute
on the options object.
"""
value = "projbasedirvalue"
options, args = driver.parse_args([
"--project-base-dir", value])
assert options.projectbasedirectory == value
|
Python
| 0.000005
|
@@ -508,121 +508,8 @@
rr%0A%0A
-def test_no_do_nothing():%0A err = geterrtext()%0A assert %22this invocation isn't going to do anything%22 in err%0A%0A
def
|
158e1ee867abac18eaabe366b4f453b3e3dcaa51
|
Add sort_by to Grondwatermonster.
|
pydov/search/grondwatermonster.py
|
pydov/search/grondwatermonster.py
|
# -*- coding: utf-8 -*-
"""Module containing the search classes to retrieve DOV groundwater samples."""
import pandas as pd
from pydov.types.fields import _WfsInjectedField
from .abstract import AbstractSearch
from ..types.grondwatermonster import GrondwaterMonster
from ..util import owsutil
class GrondwaterMonsterSearch(AbstractSearch):
"""Search class to retrieve information about groundwater samples
(GrondwaterMonster).
"""
__wfs_schema = None
__wfs_namespace = None
__md_metadata = None
__fc_featurecatalogue = None
__xsd_schemas = None
def __init__(self, objecttype=GrondwaterMonster):
"""Initialisation.
Parameters
----------
objecttype : subclass of pydov.types.abstract.AbstractDovType
Reference to a class representing the GrondwaterFilter type.
Optional: defaults to the GrondwaterFilter type containing the
fields described in the documentation.
"""
super(GrondwaterMonsterSearch,
self).__init__('gw_meetnetten:grondwatermonsters', objecttype)
def _init_namespace(self):
"""Initialise the WFS namespace associated with the layer."""
if GrondwaterMonsterSearch.__wfs_namespace is None:
GrondwaterMonsterSearch.__wfs_namespace = self._get_namespace()
def _init_fields(self):
"""Initialise the fields and their metadata available in this search
class."""
if self._fields is None:
if GrondwaterMonsterSearch.__wfs_schema is None:
GrondwaterMonsterSearch.__wfs_schema = self._get_schema()
if GrondwaterMonsterSearch.__md_metadata is None:
GrondwaterMonsterSearch.__md_metadata = \
self._get_remote_metadata()
if GrondwaterMonsterSearch.__fc_featurecatalogue is None:
csw_url = self._get_csw_base_url()
fc_uuid = owsutil.get_featurecatalogue_uuid(
GrondwaterMonsterSearch.__md_metadata)
GrondwaterMonsterSearch.__fc_featurecatalogue = \
owsutil.get_remote_featurecatalogue(csw_url, fc_uuid)
if GrondwaterMonsterSearch.__xsd_schemas is None:
GrondwaterMonsterSearch.__xsd_schemas = \
self._get_remote_xsd_schemas()
fields = self._build_fields(
GrondwaterMonsterSearch.__wfs_schema,
GrondwaterMonsterSearch.__fc_featurecatalogue,
GrondwaterMonsterSearch.__xsd_schemas)
for field in fields.values():
if field['name'] not in self._type.get_field_names(
include_wfs_injected=True):
self._type.fields.append(
_WfsInjectedField(name=field['name'],
datatype=field['type']))
self._fields = self._build_fields(
GrondwaterMonsterSearch.__wfs_schema,
GrondwaterMonsterSearch.__fc_featurecatalogue,
GrondwaterMonsterSearch.__xsd_schemas)
def search(self, location=None, query=None, return_fields=None,
max_features=None):
"""Search for groundwater samples (GrondwaterMonsterSearch). Provide
`location` and/or `query`. When `return_fields` is None,
all fields are returned.
Excludes 'empty' filters (i.e. Putten without Filters) by extending
the `query` with a not-null check on pkey_filter.
Parameters
----------
location : pydov.util.location.AbstractLocationFilter or \
owslib.fes.BinaryLogicOpType<AbstractLocationFilter> or \
owslib.fes.UnaryLogicOpType<AbstractLocationFilter>
Location filter limiting the features to retrieve. Can either be a
single instance of a subclass of AbstractLocationFilter, or a
combination using And, Or, Not of AbstractLocationFilters.
query : owslib.fes.OgcExpression
OGC filter expression to use for searching. This can contain any
combination of filter elements defined in owslib.fes. The query
should use the fields provided in `get_fields()`. Note that not
all fields are currently supported as a search parameter.
return_fields : list<str> or tuple<str> or set<str>
A list of fields to be returned in the output data. This should
be a subset of the fields provided in `get_fields()`. Note that
not all fields are currently supported as return fields.
max_features : int
Limit the maximum number of features to request.
Returns
-------
pandas.core.frame.DataFrame
DataFrame containing the output of the search query.
Raises
------
pydov.util.errors.InvalidSearchParameterError
When not one of `location` or `query` is provided.
pydov.util.errors.InvalidFieldError
When at least one of the fields in `return_fields` is unknown.
When a field that is only accessible as return field is used as
a query parameter.
When a field that can only be used as a query parameter is used as
a return field.
pydov.util.errors.FeatureOverflowError
When the number of features to be returned is equal to the
maxFeatures limit of the WFS server.
AttributeError
When the argument supplied as return_fields is not a list,
tuple or set.
"""
self._pre_search_validation(location, query, return_fields)
fts = self._search(location=location, query=query,
return_fields=return_fields,
max_features=max_features)
gw_filters = self._type.from_wfs(fts, self.__wfs_namespace)
df = pd.DataFrame(
data=self._type.to_df_array(gw_filters, return_fields),
columns=self._type.get_field_names(return_fields))
return df
|
Python
| 0
|
@@ -3167,16 +3167,45 @@
ry=None,
+ sort_by=None,%0A
return_
@@ -3216,31 +3216,16 @@
ds=None,
-%0A
max_fea
@@ -4367,16 +4367,105 @@
ameter.%0A
+ sort_by : owslib.fes.SortBy, optional%0A List of properties to sort by.%0A
@@ -5797,16 +5797,25 @@
, query,
+ sort_by,
return_
@@ -5820,16 +5820,66 @@
n_fields
+,%0A max_features
)%0A%0A
@@ -5931,16 +5931,33 @@
y=query,
+ sort_by=sort_by,
%0A
|
2d8a3b4dfbd9cfb6c5368c1b4a04e2bd07e97f18
|
Add Big O time complexity
|
pygorithm/data_structures/heap.py
|
pygorithm/data_structures/heap.py
|
# Author: ALLSTON MICKEY
# Contributed: OMKAR PATHAK
# Created On: 11th August 2017
from queue import Queue
# min-heap implementation as priority queue
class Heap(Queue):
def parent_idx(self, idx):
return idx / 2
def left_child_idx(self, idx):
return (idx * 2) + 1
def right_child_idx(self, idx):
return (idx * 2) + 2
def insert(self, data):
super(Heap, self).enqueue(data)
if self.rear >= 1: # heap may need to be fixed
self.heapify_up()
def heapify_up(self):
'''
Start at the end of the tree (first enqueued item).
Compare the rear item to its parent, swap if
the parent is larger than the child (min-heap property).
Repeat until the min-heap property is met.
'''
child = self.rear
parent = self.parent_idx(child)
while self.queue[child] < self.queue[self.parent_idx(child)]:
# Swap (sift up) and update child:parent relation
self.queue[child], self.queue[parent] = self.queue[parent], self.queue[child]
child = parent
parent = self.parent_idx(child)
def pop(self):
''' Removes the lowest value element (highest priority) from the heap '''
min = self.dequeue()
if self.rear >= 1: # heap may need to be fixed
self.heapify_down()
return min
def favorite(self, parent):
''' Determines which child has the highest priority by 3 cases '''
left = self.left_child_idx(parent)
right = self.right_child_idx(parent)
if left <= self.rear and right <= self.rear: # case 1: both nodes exist
if self.queue[left] <= self.queue[right]:
return left
else:
return right
elif left <= self.rear: # case 2: only left exists
return left
else: # case 3: no children (if left doesn't exist, neither can the right)
return None
def heapify_down(self):
'''
Select the root and sift down until min-heap property is met.
While a favorite child exists, and that child is smaller
than the parent, swap them (sift down).
'''
cur = ROOT = 0 # start at the root
fav = self.favorite(cur) # determine favorite child
while self.queue[fav] is not None:
if self.queue[cur] > self.queue[fav]:
# Swap (sift down) and update parent:favorite relation
fav = self.favorite(cur)
self.queue[cur], self.queue[fav] = self.queue[fav], self.queue[cur]
cur = fav
else:
return
def get_code(self):
''' returns the code for the current class '''
import inspect
return inspect.getsource(Heap)
|
Python
| 0.000056
|
@@ -586,11 +586,10 @@
ee (
-fir
+la
st e
@@ -773,16 +773,191 @@
is met.%0A
+ %0A Best Case: O(1), item is inserted at correct position, no swaps needed%0A Worst Case: O(logn), item needs to be swapped throughout all levels of tree%0A
@@ -1402,16 +1402,25 @@
priority
+, at root
) from t
@@ -2387,16 +2387,183 @@
t down).
+%0A%0A Best Case: O(1), item is inserted at correct position, no swaps needed%0A Worst Case: O(logn), item needs to be swapped throughout all levels of tree
%0A
@@ -2995,32 +2995,32 @@
else:%0A
-
@@ -3027,16 +3027,123 @@
return%0A%0A
+ def time_complexities(self):%0A return '''%5BInsert & Pop%5D Best Case: O(1), Worst Case: O(logn)'''%0A%0A
def
|
4f46ed9ae0e75afe49da3bcb8f9b91cd6ce78544
|
Remove cached class.
|
pymatgen/entries/compatibility.py
|
pymatgen/entries/compatibility.py
|
#!/usr/bin/env python
"""
This module implements Compatibility corrections for mixing runs of different
functionals.
"""
from __future__ import division
__author__ = "Shyue Ping Ong, Anubhav Jain"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Mar 19, 2012"
import os
import ConfigParser
from collections import defaultdict
from pymatgen.core.composition import Composition
from pymatgen.entries.post_processors_abc import EntryPostProcessor
from pymatgen.io.vaspio_set import VaspInputSet
from pymatgen.util.decorators import cached_class
@cached_class
class Compatibility(EntryPostProcessor):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. This is a base class from which other specific compatibility
schemes are implemented.
"""
def __init__(self, input_set_name, compat_type):
"""
Args:
input_set_name:
The name of the input set to use. Can be either
MaterialsProject or MITMatgen.
compat_type:
Two options, GGA or Advanced. GGA means all GGA+U entries are
excluded. Advanced means mixing scheme is implemented to make
entries compatible with each other, but entries which are
supposed to be done in GGA+U will have the equivalent GGA
entries excluded. For example, Fe oxides should have a U value
under the Advanced scheme. A GGA Fe oxide run will therefore be
excluded under the scheme.
"""
self.compat_type = compat_type
self.input_set_name = input_set_name
self.input_set = VaspInputSet(input_set_name)
module_dir = os.path.dirname(os.path.abspath(__file__))
self._config = ConfigParser.SafeConfigParser()
self._config.optionxform = str
self._config.readfp(open(os.path.join(module_dir,
"Compatibility.cfg")))
u_corrections = {}
for el in self.input_set.incar_settings["LDAUU"].keys():
name = "{}{}UCorrections{}".format(input_set_name, compat_type, el)
if name in self._config.sections():
corr = dict(self._config.items(name))
u_corrections[el] = {k: float(v) for k, v in corr.items()}
cpd_energies = dict(self._config.items("{}{}CompoundEnergies"
.format(input_set_name,
compat_type)))
self.u_corrections = u_corrections
self.cpd_energies = {k: float(v) for k, v in cpd_energies.items()}
self.valid_potcars = set(self.input_set.potcar_settings.values())
self.u_settings = self.input_set.incar_settings["LDAUU"]
if compat_type == "GGA":
self.u_corrections = {}
self.u_settings = {}
def requires_hubbard(self, comp):
"""
Check if a particular composition requies U parameters to be set.
Args:
comp:
Composition
Returns:
True if hubbard U parameter required. False otherwise.
"""
comp = Composition(comp)
elements = sorted([el for el in comp.elements if comp[el] > 0],
key=lambda el: el.X)
most_electroneg = elements[-1].symbol
usettings = self.u_settings.get(most_electroneg, {})
return any([usettings.get(el.symbol, 0) for el in comp.elements])
def process_entry(self, entry):
"""
Process a single entry with the chosen Compatibility scheme.
Args:
entry:
A ComputedEntry object.
Returns:
An adjusted entry if entry is compatible, otherwise None is
returned.
"""
if entry.parameters.get("run_type", "GGA") == "HF":
return None
cpdenergies = self.cpd_energies
calc_u = entry.parameters["hubbards"]
calc_u = defaultdict(int) if calc_u is None else calc_u
comp = entry.composition
#Check that POTCARs are valid
rform = comp.reduced_formula
if rform not in cpdenergies:
psp_settings = set([sym.split(" ")[1]
for sym in entry.parameters["potcar_symbols"]])
if not self.valid_potcars.issuperset(psp_settings):
return None
#correct all compounds that are wrong, e.g. O2 molecule
if rform in cpdenergies:
entry.structureid = -comp.keys()[0].Z
entry.correction = cpdenergies[rform] * comp.num_atoms \
- entry.uncorrected_energy
else:
elements = sorted([el for el in comp.elements if comp[el] > 0],
key=lambda el: el.X)
most_electroneg = elements[-1].symbol
correction = 0
ucorr = self.u_corrections.get(most_electroneg, {})
usettings = self.u_settings.get(most_electroneg, {})
for el in comp.elements:
sym = el.symbol
#Check for bad U values
if calc_u.get(sym, 0) != usettings.get(sym, 0):
return None
if sym in ucorr:
correction += float(ucorr[sym]) * comp[el]
entry.correction = correction
return entry
def process_entries(self, entries):
"""
Process a sequence of entries with the chosen Compatibility scheme.
Args:
entries - A sequence of entries.
Returns:
An list of adjusted entries. Entries in the original list which
are not compatible are excluded.
"""
return filter(None, map(self.process_entry, entries))
@property
def corrected_compound_formulas(self):
return self.cpd_energies.keys()
def __str__(self):
return "{} {} Compatibility".format(self.input_set_name,
self.compat_type)
class MaterialsProjectCompatibility(Compatibility):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MaterialsProject parameters (see pymatgen.io.vaspio_set
MaterialsProjectVaspInputSet). Using this compatibility scheme on runs with
different parameters is not valid.
"""
def __init__(self, compat_type="Advanced"):
"""
Args:
compat_type:
Two options, GGA or Advanced. GGA means all GGA+U entries are
excluded. Advanced means mixing scheme is implemented to make
entries compatible with each other, but entries which are
supposed to be done in GGA+U will have the equivalent GGA
entries excluded. For example, Fe oxides should have a U value
under the Advanced scheme. A GGA Fe oxide run will therefore be
excluded under the scheme.
"""
Compatibility.__init__(self, "MaterialsProject", compat_type)
class MITCompatibility(MaterialsProjectCompatibility):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MIT parameters (see pymatgen.io.vaspio_set MITVaspInputSet). Using
this compatibility scheme on runs with different parameters is not valid.
"""
def __init__(self, compat_type="Advanced"):
"""
Args:
compat_type:
Two options, GGA or Advanced. GGA means all GGA+U entries are
excluded. Advanced means mixing scheme is implemented to make
entries compatible with each other, but entries which are
supposed to be done in GGA+U will have the equivalent GGA
entries excluded. For example, Fe oxides should have a U value
under the Advanced scheme. A GGA Fe oxide run will therefore be
excluded under the scheme.
"""
Compatibility.__init__(self, "MITMatgen", compat_type)
|
Python
| 0
|
@@ -648,22 +648,8 @@
s%0A%0A%0A
-@cached_class%0A
clas
|
4dfdc6cd8b7b716eaab31cf7e489ecbdb92a1116
|
Use the record's __class__ rather than self.model, which isn't set in BaseEndpoint.
|
ajax/endpoints.py
|
ajax/endpoints.py
|
from django.core import serializers
from django.core.exceptions import ValidationError
from django.db import models
from django.utils import simplejson as json
from django.utils.encoding import smart_str
from django.utils.translation import ugettext_lazy as _
from django.db.models.fields import FieldDoesNotExist
from ajax.decorators import require_pk
from ajax.exceptions import AJAXError, AlreadyRegistered, NotRegistered, \
PrimaryKeyMissing
class BaseEndpoint(object):
def _encode_data(self, data):
"""Encode a ``QuerySet`` to a Python dict.
Handles converting a ``QuerySet`` (or something that looks like one) to
a more vanilla version of a list of dict's without the extra
inspection-related cruft.
"""
data = serializers.serialize("python", data)
ret = []
for d in data:
tmp = d['fields']
tmp['pk'] = d['pk']
ret.append(tmp)
return ret
def _encode_record(self, record):
"""Encode a record to a dict.
This will take a Django model, encode it to a normal Python dict, and
then inspect the data for instances of ``ForeignKey`` and convert
those to a dict of the related record.
"""
data = self._encode_data([record])[0]
for field, val in data.iteritems():
try:
f = self.model._meta.get_field(field)
if isinstance(f, models.ForeignKey):
try:
row = f.rel.to.objects.get(pk=val)
new_value = self._encode_record(row)
except f.rel.to.DoesNotExist:
new_value = {} # If it's not there add empty dict.
elif isinstance(f, models.BooleanField):
# If someone could explain to me why the fuck the Python
# serializer appears to serialize BooleanField to a string
# with "True" or "False" in it, please let me know.
if val == "True":
new_value = True
else:
new_value = False
else:
new_value = val
data[smart_str(field)] = new_value
except FieldDoesNotExist:
pass
return data
class BaseModelFormEndpoint(BaseEndpoint):
def __init__(self, application, model, method, pk):
self.application = application
self.model = model
self.method = method
self.pk = pk
class ModelEndpoint(BaseModelFormEndpoint):
def create(self, request):
record = self.model(**self._extract_data(request))
if self.can_create(request.user, record):
return self._encode_record(self._save(record))
else:
raise AJAXError(403, _("Access to endpoint is forbidden"))
def _save(self, record):
try:
record.full_clean()
record.save()
return record
except ValidationError, e:
raise AJAXError(400, _("Could not save model."), errors=e.message_dict)
@require_pk
def update(self, request):
record = self._get_record()
if self.can_update(request.user, record):
for key, val in self._extract_data(request).iteritems():
setattr(record, key, val)
return self._encode_record(self._save(record))
else:
raise AJAXError(403, _("Access to endpoint is forbidden"))
@require_pk
def delete(self, request):
record = self._get_record()
if self.can_delete(request.user, record):
record.delete()
return {'pk': int(self.pk)}
else:
raise AJAXError(403, _("Access to endpoint is forbidden"))
@require_pk
def get(self, request):
record = self._get_record()
if self.can_get(request.user, record):
return self._encode_record(record)
else:
raise AJAXError(403, _("Access to endpoint is forbidden"))
def _extract_data(self, request):
"""Extract data from POST.
Handles extracting a vanilla Python dict of values that are present
in the given model. This also handles instances of ``ForeignKey`` and
will convert those to the appropriate object instances from the
database. In other words, it will see that user is a ``ForeignKey`` to
Django's ``User`` class, assume the value is an appropriate pk, and
load up that record.
"""
data = {}
for field, val in request.POST.iteritems():
try:
f = self.model._meta.get_field(field)
if isinstance(f, models.ForeignKey):
data[smart_str(field)] = f.rel.to.objects.get(pk=val)
else:
data[smart_str(field)] = val
except FieldDoesNotExist:
pass
return data
def _get_record(self):
"""Fetch a given record.
Handles fetching a record from the database along with throwing an
appropriate instance of ``AJAXError`.
"""
if not self.pk:
raise AJAXError(400, _('Invalid request for record.'))
try:
return self.model.objects.get(pk=self.pk)
except self.model.DoesNotExist:
raise AJAXError(404, _('%s with id of "%s" not found.') % (
self.model.__name__, self.pk))
def can_get(self, user, record):
return True
def _user_is_active_or_staff(self, user, record):
return ((user.is_authenticated() and user.is_active) or user.is_staff)
can_create = _user_is_active_or_staff
can_update = _user_is_active_or_staff
can_delete = _user_is_active_or_staff
def authenticate(self, request, application, method):
"""Authenticate the AJAX request.
By default any request to fetch a model is allowed for any user,
including anonymous users. All other methods minimally require that
the user is already logged in.
Most likely you will want to lock down who can edit and delete various
models. To do this, just override this method in your child class.
"""
if request.user.is_authenticated():
return True
return False
class FormEndpoint(BaseModelFormEndpoint):
"""AJAX endpoint for processing Django forms.
The models and forms are processed in pretty much the same manner, only a
form class is used rather than a model class.
"""
def create(self, request):
form = self.model(request.POST)
if form.is_valid():
model = form.save()
if hasattr(model, 'save'):
# This is a model form so we save it and return the model.
model.save()
return self._encode_record(model)
else:
return model # Assume this is a dict to encode.
else:
return self._encode_data(form.errors)
def update(self, request):
raise AJAXError(404, _("Endpoint does not exist."))
delete = update
get = update
class Endpoints(object):
def __init__(self):
self._registry = {}
def register(self, model, endpoint):
if model in self._registry:
raise AlreadyRegistered()
self._registry[model] = endpoint
def unregister(self, model):
if model not in self._registry:
raise NotRegistered()
del self._registry[model]
def load(self, model_name, application, method, pk):
for model in self._registry:
if model.__name__.lower() == model_name:
return self._registry[model](application, model, method, pk)
raise NotRegistered()
|
Python
| 0
|
@@ -1371,34 +1371,40 @@
f =
-self.model
+record.__class__
._meta.get_f
|
579904c318031ed049f697f89109bd6909b68eba
|
Revise docstring
|
alg_merge_sort.py
|
alg_merge_sort.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
def _merge_recur(x_list, y_list):
"""Merge two sorted lists by recusions."""
if len(x_list) == 0:
return y_list
if len(y_list) == 0:
return x_list
if x_list[0] <= y_list[0]:
return [x_list[0]] + _merge_recur(x_list[1:], y_list)
else:
return [y_list[0]] + _merge_recur(x_list, y_list[1:])
def _merge_iter(x_list, y_list):
"""Merge two sorted lists by iteration."""
z_list = []
x_pos = 0
y_pos = 0
for z_pos in xrange(len(x_list) + len(y_list)):
if x_pos < len(x_list) and y_pos < len(y_list):
if x_list[x_pos] <= y_list[y_pos]:
z_list.append(x_list[x_pos])
x_pos += 1
else:
z_list.append(y_list[y_pos])
y_pos += 1
elif x_pos < len(x_list) and y_pos >= len(y_list):
z_list.append(x_list[x_pos])
x_pos += 1
elif x_pos >= len(x_list) and y_pos < len(y_list):
z_list.append(y_list[y_pos])
y_pos += 1
else:
pass
return z_list
def merge_sort(a_list, merge):
"""Merge sort by divide and conquer algorithm.
Time complexity: O(n*logn).
"""
if len(a_list) == 1:
return a_list
else:
mid = len(a_list) // 2
return merge(merge_sort(a_list[:mid], merge),
merge_sort(a_list[mid:], merge))
def main():
import time
a_list = [54, 26, 93, 17, 77, 31, 44, 55, 20]
start_time = time.time()
print(merge_sort(a_list, _merge_recur))
print('Run time of merge sort with recusions: {}'
.format(time.time() - start_time))
start_time = time.time()
print(merge_sort(a_list, _merge_iter))
print('Run time of merge sort with iterations: {}'
.format(time.time() - start_time))
if __name__ == '__main__':
main()
|
Python
| 0.000009
|
@@ -171,17 +171,17 @@
ists by
-r
+R
ecusions
@@ -512,25 +512,25 @@
ists by
-i
+I
teration
.%22%22%22%0A
@@ -521,16 +521,45 @@
teration
+ (i.e. Two Fingers Algorithm)
.%22%22%22%0A
@@ -1271,17 +1271,17 @@
sort by
-d
+D
ivide an
@@ -1286,17 +1286,17 @@
and
-c
+C
onquer
-a
+A
lgor
|
919ceccc688b20091f2feabc52bd43c9d2e53d71
|
move to python3
|
src/xii/builtin/components/node/attributes/network/node_network.py
|
src/xii/builtin/components/node/attributes/network/node_network.py
|
import libvirt
import xml.etree.ElementTree as etree
from time import sleep
from xii import error, need
from xii.validator import String, Or, Dict, RequiredKey, Ip
from xii.components.node import NodeAttribute
class NetworkAttribute(NodeAttribute, need.NeedLibvirt):
atype = "network"
defaults = "default"
keys = Or([
String("default"),
Dict([
RequiredKey("source", String("default")),
RequiredKey("ip", Ip("192.168.124.87"))
])
])
def network_name(self):
if self._need_ipv4():
return self.settings("source")
return self.settings()
def start(self):
network = self._get_delayed_network(self.network_name())
if network.isActive():
return
self.say("starting network...")
network.create()
self.success("network started!")
def after_start(self):
network = self._get_delayed_network(self.network_name())
if self._need_ipv4():
mac = self._get_mac_address()
self._remove_mac(network, mac, self.settings("ip"))
self._announce_static_ip(network, mac, self.settings("ip"))
def stop(self):
network = self._get_delayed_network(self.network_name())
if self._need_ipv4():
mac = self._get_mac_address()
self._remove_mac(network, mac, self.settings("ip"))
def spawn(self):
network = self._get_delayed_network(self.network_name())
if not network:
raise error.NotFound("Network {} for domain "
"{}".format(self.network_name(), self.component_entity()))
if not network.isActive():
self.start()
self.add_xml('devices', self._gen_xml())
def _gen_xml(self):
xml = self.template('network.xml')
return xml.safe_substitute({'network': self.network_name()})
def _get_mac_address(self):
def _uses_network(iface):
return iface.find("source").attrib["network"] == self.network_name()
node = self.get_domain(self.component_entity())
desc = etree.fromstring(node.XMLDesc())
ifaces = filter(_uses_network, desc.findall("devices/interface"))
if len(ifaces) == 0:
raise error.NotFound("Could not find domain interface")
# FIXME: Add multiple interface support
# When multiple interfaces using the same network
mac = ifaces[0].find("mac")
if mac is None:
raise error.NotFound("Could not find interface mac address")
return mac.attrib["address"]
def _need_ipv4(self):
return isinstance(self.settings(), dict)
def _announce_static_ip(self, network, mac, ip):
command = libvirt.VIR_NETWORK_UPDATE_COMMAND_ADD_LAST
flags = (libvirt.VIR_NETWORK_UPDATE_AFFECT_CONFIG |
libvirt.VIR_NETWORK_UPDATE_AFFECT_LIVE)
section = libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST
xml = "<host mac='{}' name='{}' ip='{}' />".format(mac, "xii-" + self.component_entity(), ip)
try:
network.update(command, section, -1, xml, flags)
except libvirt.libvirtError:
return False
return True
def _remove_mac(self, network, mac, ip):
command = libvirt.VIR_NETWORK_UPDATE_COMMAND_DELETE
flags = (libvirt.VIR_NETWORK_UPDATE_AFFECT_CONFIG |
libvirt.VIR_NETWORK_UPDATE_AFFECT_LIVE)
section = libvirt.VIR_NETWORK_SECTION_IP_DHCP_HOST
xml = "<host mac='{}' name='{}' ip='{}' />".format(mac, "xii-" + self.component_entity(), ip)
try:
network.update(command, section, -1, xml, flags)
except libvirt.libvirtError:
return False
return True
def _get_delayed_network(self, name):
network = self.get_network(name, raise_exception=False)
if not network:
if not self.has_component("network", name):
raise error.NotFound("Could not find network ({})"
.format(name))
# wait for network to become ready
for _ in range(self.global_get("global/retry_network", 20)):
network = self.get_network(name, raise_exception=False)
if network:
return network
sleep(self.global_get("global/wait", 3))
raise error.ExecError("Network {} has not become ready in "
"time. Giving up".format(name))
return network
|
Python
| 0.000031
|
@@ -2193,16 +2193,21 @@
aces =
+list(
filter(_
@@ -2254,16 +2254,17 @@
rface%22))
+)
%0A%0A
|
c94f24160e1d9189f13ea0029cccb815fb68d3da
|
Update postag.py
|
pythainlp/wangchanberta/postag.py
|
pythainlp/wangchanberta/postag.py
|
from typing import Dict, List, Tuple, Union
import re
from transformers import (
CamembertTokenizer,
AutoTokenizer,
pipeline,
)
_model_name = "wangchanberta-base-att-spm-uncased"
_tokenizer = CamembertTokenizer.from_pretrained(
f'airesearch/{_model_name}',
revision='main')
if _model_name == "wangchanberta-base-att-spm-uncased":
_tokenizer.additional_special_tokens = ['<s>NOTUSED', '</s>NOTUSED', '<_>']
class PosTagTransformers:
def __init__(self,
corpus: str = "lst20",
grouped_word: bool = False
) -> None:
self.corpus = corpus
self.grouped_word = grouped_word
self.load()
def load(self):
self.classify_tokens = pipeline(
task='ner',
tokenizer=_tokenizer,
model = f'airesearch/{_model_name}',
revision = f'finetuned@{self.corpus}-pos',
ignore_labels=[],
grouped_entities=self.grouped_word
)
def tag(
self, text: str, corpus: str = "lst20", grouped_word: bool = False
) -> List[Tuple[str, str]]:
if (corpus != self.corpus and corpus in ['lst20']) or grouped_word != self.grouped_word:
self.grouped_word = grouped_word
self.corpus = corpus
self.load()
text = re.sub(" ", "<_>", text)
self.json_pos = self.classify_tokens(text)
self.output = ""
if grouped_word:
self.sent_pos = [(i['word'].replace("<_>", " "), i['entity_group']) for i in self.json_pos]
else:
self.sent_pos = [(i['word'].replace("<_>", " ").replace('▁',''), i['entity']) for i in self.json_pos if i['word'] != '▁']
return self.sent_pos
_corpus = "lst20"
_grouped_word = False
_postag = PosTagTransformers(corpus=_corpus, grouped_word = _grouped_word)
def pos_tag(
text: str, corpus: str = "lst20", grouped_word: bool = False
) -> List[Tuple[str, str]]:
global _grouped_word, _postag
if corpus not in ["lst20"]:
raise NotImplementedError()
if _grouped_word != grouped_word:
_postag = PosTagTransformers(
corpus=corpus,
grouped_word = grouped_word
)
_grouped_word = grouped_word
return _postag.tag(text)
|
Python
| 0
|
@@ -2274,14 +2274,59 @@
tag.tag(text
+, corpus = corpus,grouped_word = grouped_word
)%0A
|
2c5a345aa7e21045d6a76225dc192f14b62db4f6
|
fix review permission manage command
|
symposion/reviews/management/commands/create_review_permissions.py
|
symposion/reviews/management/commands/create_review_permissions.py
|
from django.core.management.base import BaseCommand
from django.contrib.auth.models import Permission
from django.contrib.contenttypes.models import ContentType
from symposion.proposals.models import ProposalSection
class Command(BaseCommand):
def handle(self, *args, **options):
ct, created = ContentType.objects.get_or_create(
model="",
app_label="reviews",
defaults={"name": "reviews"}
)
for ps in ProposalSection.objects.all():
for action in ["review", "manage"]:
perm, created = Permission.objects.get_or_create(
codename="can_%s_%s" % (action, ps.section.slug),
content_type__pk=ct.id,
defaults={"name": "Can %s %s" % (action, ps), "content_type": ct}
)
print perm
|
Python
| 0
|
@@ -292,25 +292,16 @@
ct
-, created
= Conte
@@ -322,42 +322,14 @@
.get
-_or_create
(%0A
- model=%22%22,%0A
@@ -347,16 +347,26 @@
_label=%22
+symposion_
reviews%22
@@ -383,37 +383,35 @@
-defaults=%7B%22name%22: %22reviews%22%7D%0A
+ model=%22review%22%0A
|
f056d43facbb3f5ae0248e590bf09832b41ea9a0
|
fix assertion: converting numpy array to bool does not default to 'True'.
|
data_access/read_spectrum_fits.py
|
data_access/read_spectrum_fits.py
|
import csv
import itertools
import os.path
import numpy as np
import pyfits
import astropy.table as table
import common_settings
from pixel_flags import PixelFlags
from data_access.qso_data import QSORecord, QSOData
settings = common_settings.Settings()
QSO_FILE = settings.get_qso_metadata_fits()
# read header names for the QSO table
QSO_FIELDS_FILE = settings.get_qso_metadata_fields()
with open(QSO_FIELDS_FILE, mode='rb') as f:
QSO_fields = list(csv.reader(f))[0]
QSO_fields_dict = dict(zip(QSO_fields, itertools.count()))
PLATE_DIR_DEFAULT = settings.get_plate_dir_list()
# remove all pixels with AND bits:
AND_MASK = np.bitwise_not(np.uint32(0))
# remove pixels with the following OR bits:
# the first 13 bits do not mask many pixels so we might as well include them.
# the AND mask of 'bright sky' does not always block sky lines.
OR_MASK = PixelFlags.string_to_int(
'NOPLUG|BADTRACE|BADFLAT|BADARC|MANYBADCOLUMNS|MANYREJECTED|LARGESHIFT|BADSKYFIBER|' +
'NEARWHOPPER|WHOPPER|SMEARIMAGE|SMEARHIGHSN|SMEARMEDSN|' +
'BRIGHTSKY')
def generate_qso_details():
"""
iterate over the QSO table, yielding a dictionary containing the values for each QSO
"""
data = pyfits.getdata(QSO_FILE)
for obj in data:
yield obj
def get_fits_partial_path(qso_rec):
"""
Returns a relative path for the plate file within a data version directory
:rtype : basestring
"""
filename = "spPlate-%s-%s.fits" % \
(str(qso_rec.plate).zfill(4), qso_rec.mjd)
return os.path.join(str(qso_rec.plate), filename)
def find_fits_file(plate_dir_list, fits_partial_path):
"""
Returns a path
:rtype : basestring
"""
for plate_dir in plate_dir_list:
fits_path = os.path.join(plate_dir, fits_partial_path)
if os.path.exists(fits_path):
return fits_path
return None
def enum_spectra(qso_record_table, plate_dir_list=PLATE_DIR_DEFAULT, pre_sort=True, flag_stats=None):
"""
yields a QSO object from the fits files corresponding to the appropriate qso_record
:type qso_record_table: table.Table
:rtype: list[QSOData]
"""
last_fits_partial_path = None
# sort by plate to avoid reopening files too many times
if pre_sort:
qso_record_table.sort(['plate'])
for i in qso_record_table:
qso_rec = QSORecord.from_row(i)
fits_partial_path = get_fits_partial_path(qso_rec)
# skip reading headers and getting data objects if the filename hasn't changed
if fits_partial_path != last_fits_partial_path:
fits_full_path = find_fits_file(plate_dir_list, fits_partial_path)
if not fits_full_path:
print "Missing file:", fits_partial_path
continue
# get header
hdu_list = pyfits.open(fits_full_path)
hdu0_header = hdu_list[0].header
hdu1_header = hdu_list[1].header
l1 = hdu1_header["NAXIS1"]
c0 = hdu0_header["COEFF0"]
c1 = hdu0_header["COEFF1"]
l = hdu0_header["NAXIS1"]
assert l1 == l, "flux and ivar dimensions must be equal"
# wavelength grid
counter = np.arange(0, l)
o_grid = 10 ** (c0 + c1 * counter)
# get flux_data
flux_data = hdu_list[0].data
ivar_data = hdu_list[1].data
and_mask_data = hdu_list[2].data
or_mask_data = hdu_list[3].data
last_fits_partial_path = fits_partial_path
assert flux_data and ivar_data and and_mask_data and or_mask_data and o_grid
# return requested spectrum
ar_flux = flux_data[qso_rec.fiberID - 1]
ar_ivar = ivar_data[qso_rec.fiberID - 1]
assert ar_flux.size == ar_ivar.size
current_and_mask_data = np.asarray(and_mask_data[qso_rec.fiberID - 1])
current_or_mask_data = np.asarray(or_mask_data[qso_rec.fiberID - 1])
ar_effective_mask = np.logical_or(current_and_mask_data & AND_MASK,
current_or_mask_data & OR_MASK)
if flag_stats is not None:
for bit in xrange(0, 32):
flag_stats.flag_count[bit, 0] += (current_and_mask_data & 1).sum()
flag_stats.flag_count[bit, 1] += (current_or_mask_data & 1).sum()
current_and_mask_data >>= 1
current_or_mask_data >>= 1
flag_stats.pixel_count += current_and_mask_data.size
# temporary: set ivar to 0 for all bad pixels
ar_ivar[ar_effective_mask != 0] = 0
yield QSOData(qso_rec, o_grid, ar_flux, ar_ivar)
|
Python
| 0.000004
|
@@ -3559,67 +3559,171 @@
ata
-and ivar_data and and_mask_data and or_mask_data and o_grid
+is not None%0A assert ivar_data is not None%0A assert and_mask_data is not None%0A assert or_mask_data is not None%0A assert o_grid is not None
%0A
|
5cb0d875cfe480a30a58aaa7aed71bca20b3aa9b
|
Allow giving explicit lexer name.
|
scripts/find_error.py
|
scripts/find_error.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Lexing error finder
~~~~~~~~~~~~~~~~~~~
For the source files given on the command line, display
the text where Error tokens are being generated, along
with some context.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys, os
try:
import pygments
except ImportError:
# try parent path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from pygments.lexer import RegexLexer
from pygments.lexers import get_lexer_for_filename, get_lexer_by_name
from pygments.token import Error, Text, _TokenType
class DebuggingRegexLexer(RegexLexer):
"""Make the state stack, position and current match instance attributes."""
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the inital stack (default: ``['root']``)
"""
self.pos = 0
tokendefs = self._tokens
self.statestack = list(stack)
statetokens = tokendefs[self.statestack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
self.m = m = rexmatch(text, self.pos)
if m:
if type(action) is _TokenType:
yield self.pos, action, m.group()
else:
for item in action(self, m):
yield item
self.pos = m.end()
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
self.statestack.pop()
elif state == '#push':
self.statestack.append(self.statestack[-1])
else:
self.statestack.append(state)
elif isinstance(new_state, int):
# pop
del self.statestack[new_state:]
elif new_state == '#push':
self.statestack.append(self.statestack[-1])
else:
assert False, 'wrong state def: %r' % new_state
statetokens = tokendefs[self.statestack[-1]]
break
else:
try:
if text[self.pos] == '\n':
# at EOL, reset state to 'root'
self.pos += 1
self.statestack = ['root']
statetokens = tokendefs['root']
yield self.pos, Text, u'\n'
continue
yield self.pos, Error, text[self.pos]
self.pos += 1
except IndexError:
break
def main(fn):
try:
lx = get_lexer_for_filename(os.path.basename(fn))
except ValueError:
try:
name, rest = fn.split('_', 1)
lx = get_lexer_by_name(name)
except ValueError:
raise AssertionError('no lexer found for file %r' % fn)
debug_lexer = False
# does not work for e.g. ExtendedRegexLexers
if lx.__class__.__bases__ == (RegexLexer,):
lx.__class__.__bases__ = (DebuggingRegexLexer,)
debug_lexer = True
lno = 1
text = file(fn, 'U').read()
text = text.strip('\n') + '\n'
text = text.decode('latin1')
ntext = []
states = []
def show_token(tok):
reprs = map(repr, tok)
print ' ' + reprs[1] + ' ' + ' ' * (29-len(reprs[1])) + reprs[0],
if debug_lexer:
print ' ' + ' ' * (29-len(reprs[0])) + repr(states[i]),
print
for type, val in lx.get_tokens(text):
lno += val.count('\n')
if type == Error:
print 'Error parsing', fn, 'on line', lno
print 'Previous tokens' + (debug_lexer and ' and states' or '') + ':'
for i in range(len(ntext) - num, len(ntext)):
show_token(ntext[i])
print 'Error token:'
l = len(repr(val))
print ' ' + repr(val),
if debug_lexer:
print ' ' * (60-l) + repr(lx.statestack),
print
print
return 1
ntext.append((type,val))
if debug_lexer:
states.append(lx.statestack[:])
if showall:
for tok in ntext:
show_token(tok)
return 0
num = 10
showall = False
if __name__ == '__main__':
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'n:a')
for opt, val in opts:
if opt == '-n':
num = int(val)
elif opt == '-a':
showall = True
ret = 0
for f in args:
ret += main(f)
sys.exit(bool(ret))
|
Python
| 0
|
@@ -3113,20 +3113,114 @@
n(fn
-):%0A try:%0A
+, lexer=None):%0A if lexer is not None:%0A lx = get_lexer_by_name(lexer)%0A else:%0A try:%0A
@@ -3273,16 +3273,20 @@
me(fn))%0A
+
exce
@@ -3300,32 +3300,36 @@
eError:%0A
+
try:%0A
@@ -3329,16 +3329,20 @@
+
+
name, re
@@ -3367,32 +3367,36 @@
1)%0A
+
lx = get_lexer_b
@@ -3408,32 +3408,36 @@
e(name)%0A
+
+
except ValueErro
@@ -3431,32 +3431,36 @@
ept ValueError:%0A
+
rais
@@ -4882,16 +4882,29 @@
= False
+%0Alexer = None
%0A%0Aif __n
@@ -4990,16 +4990,18 @@
1:%5D, 'n:
+l:
a')%0A
@@ -5122,24 +5122,74 @@
wall = True%0A
+ elif opt == '-l':%0A lexer = val%0A
ret = 0%0A
@@ -5207,16 +5207,16 @@
n args:%0A
-
@@ -5228,16 +5228,23 @@
= main(f
+, lexer
)%0A sy
|
c8398415bf82a1f68c7654c8d4992661587fccf7
|
Update pathlib-recursive-rmdir.py
|
python/pathlib-recursive-rmdir.py
|
python/pathlib-recursive-rmdir.py
|
import pathlib
# path: pathlib.Path - directory to remove
def removeDirectory(path):
for i in path.glob('*'):
if i.is_dir():
removeDirectory(i)
else:
i.unlink()
path.rmdir()
|
Python
| 0.000001
|
@@ -178,16 +178,123 @@
nlink()%0A
+ # NOTE: can replace above lines with %60removeConents(path)%60 scrap form pathlib-recursive-remove-contents %0A
path.r
|
e718615eac8e964b46ded826e68fe1087ee33f09
|
set DEBUG to False (oops)
|
frontend/fifoci/settings/base.py
|
frontend/fifoci/settings/base.py
|
# This file is part of the FifoCI project.
# Copyright (c) 2014 Pierre Bourdon <delroth@dolphin-emu.org>
# Licensing information: see $REPO_ROOT/LICENSE
"""
Django settings for fifoci project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
# We don't use sessions for anything but the Django admin in this application.
# 32 random bytes are enough, though this means cookies will get invalidated on
# application restart.
SECRET_KEY = os.urandom(32)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'fifoci',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.media",
"django.core.context_processors.static",
"django.core.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"fifoci.context_processors.recent_changes",
)
ROOT_URLCONF = 'fifoci.urls'
WSGI_APPLICATION = 'fifoci.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'fifoci',
'USER': 'fifoci',
}
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_STORAGE = 'django.contrib.staticfiles.storage.ManifestStaticFilesStorage'
# Media files (used to store DFFs, PNGs)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
Python
| 0.000014
|
@@ -1035,19 +1035,20 @@
DEBUG =
-Tru
+Fals
e%0ATEMPLA
@@ -1058,19 +1058,20 @@
DEBUG =
-Tru
+Fals
e%0A%0AALLOW
|
85581374377d76e6dfd76e81477c1598c0944959
|
Add in/out filetypes to searchsettings (python)
|
python/pysearch/searchsettings.py
|
python/pysearch/searchsettings.py
|
# -*- coding: utf-8 -*-
################################################################################
#
# searchsettings.py
#
# class SearchSettings: encapsulates search settings
#
################################################################################
import re
class SearchSettings(object):
"""a class to encapsulate search settings for a particular search session"""
_extension_set_names = set('''in_extensions out_extensions
in_archiveextensions out_archiveextensions'''.split())
_pattern_set_names = set('''in_dirpatterns out_dirpatterns
in_filepatterns out_filepatterns
in_archivefilepatterns out_archivefilepatterns
in_linesafterpatterns out_linesafterpatterns
in_linesbeforepatterns out_linesbeforepatterns
linesaftertopatterns linesafteruntilpatterns
searchpatterns'''.split())
_props_with_defaults = {
'archivesonly': False,
'debug': False,
'firstmatch': False,
'excludehidden': True,
'linesafter': 0,
'linesbefore': 0,
'listdirs': False,
'listfiles': False,
'listlines': False,
'maxlinelength': 150,
'multilinesearch': False,
'printresults': True,
'printusage': False,
'printversion': False,
'recursive': True,
'searcharchives': False,
'uniquelines': False,
'verbose': False,
}
def __init__(self):
self.startpath = ''
for name in self._extension_set_names:
self.__dict__[name] = set()
for name in self._pattern_set_names:
self.__dict__[name] = set()
self.__dict__.update(self._props_with_defaults)
def add_exts(self, exts, ext_set_name):
if type(exts) is list:
self.__dict__[ext_set_name] = self.__dict__[ext_set_name].union(exts)
elif type(exts) in set([str, unicode]):
ext_set = set([ext for ext in exts.split(',') if ext])
self.__dict__[ext_set_name] = self.__dict__[ext_set_name].union(ext_set)
def add_patterns(self, patterns, pattern_set_name):
assert pattern_set_name in self._pattern_set_names
compile_flag = re.S | re.U
if type(patterns) is list:
pattern_set = set([re.compile(p, compile_flag) for p in patterns])
self.__dict__[pattern_set_name] = self.__dict__[pattern_set_name].union(pattern_set)
elif type(patterns) in set([str, unicode]):
self.__dict__[pattern_set_name].add(re.compile(patterns, compile_flag))
else:
print 'ERROR: patterns is an unknown type'
def set_property(self, name, val):
self.__dict__[name] = val
# some trues trigger others
if type(val) is bool and val:
if name == 'archivesonly':
self.searcharchives = True
elif name == 'debug':
self.verbose = True
def set_properties(self, propdict):
for p in propdict:
self.set_property(p, propdict[p])
def __str__(self):
all_props = set(['startpath']) | self._extension_set_names | \
self._pattern_set_names | set(self._props_with_defaults.keys())
print_dict = {}
s = '{0}('.format(self.__class__.__name__)
for p in sorted(all_props):
val = self.__dict__[p]
if type(val) == set:
if p in self._pattern_set_names:
print_dict[p] = str([x.pattern for x in val])
else:
print_dict[p] = str(list(val))
elif type(val) in set([str, unicode]):
if val:
print_dict[p] = '"{0}"'.format(val)
else:
print_dict[p] = '""'
else:
print_dict[p] = '{0!s}'.format(val)
next_elem = 0
for p in sorted(print_dict.keys()):
if next_elem:
s += ', '
s += '{0}: {1}'.format(p, print_dict[p])
next_elem += 1
s += ')'
return s
|
Python
| 0
|
@@ -267,16 +267,47 @@
mport re
+%0Afrom filetypes import FileType
%0A%0Aclass
@@ -560,24 +560,60 @@
''.split())%0A
+# TODO: move filetypes to own group%0A
_pattern
@@ -720,32 +720,91 @@
ut_filepatterns%0A
+ in_filetypes out_filetypes%0A
@@ -2919,16 +2919,541 @@
type'%0A%0A
+ def add_filetypes(self, filetypes, filetype_set_name):%0A filetype_set = set()%0A if type(filetypes) is list:%0A filetype_set = set(%5BFileType.from_name(ft) for tf in filetypes%5D)%0A elif type(filetypes) in set(%5Bstr, unicode%5D):%0A filetype_set = set(%5BFileType.from_name(ft) for ft in filetypes.split(',') if ft%5D)%0A else:%0A raise Exception('ERROR: patterns is an unknown type')%0A self.__dict__%5Bfiletype_set_name%5D = self.__dict__%5Bfiletype_set_name%5D.union(filetype_set)%0A%0A
def
|
5a84b0161a5bc31b2da5bf6a5458c2c1a3ea9aa9
|
Define value for DEBUG setting based on environment
|
jarbas/settings.py
|
jarbas/settings.py
|
"""
Django settings for Jarbas project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
import re
import socket
from decouple import Csv, config
from dj_database_url import parse
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
ENVIRONMENT = config('ENVIRONMENT', default='development')
LOG_LEVEL = config('LOG_LEVEL', default='debug')
ALLOWED_HOSTS = config('ALLOWED_HOSTS', default='*', cast=Csv())
INTERNAL_IPS = ('127.0.0.1',)
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'test_without_migrations',
'corsheaders',
'rest_framework',
'jarbas.core.app.CoreConfig',
'jarbas.chamber_of_deputies.app.ChamberOfDeputiesConfig',
'jarbas.layers',
'jarbas.dashboard',
'django.contrib.admin',
'django_extensions',
'debug_toolbar',
]
MIDDLEWARE = [
'django.middleware.cache.UpdateCacheMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
if ENVIRONMENT == 'production':
MIDDLEWARE.insert(2, 'whitenoise.middleware.WhiteNoiseMiddleware')
ROOT_URLCONF = 'jarbas.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'jarbas.core.context_processors.google_analytics',
],
},
},
]
WSGI_APPLICATION = 'jarbas.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
default_db = 'sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3')
DATABASES = {
'default': config('DATABASE_URL', default=default_db, cast=parse)
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'pt_BR'
TIME_ZONE = 'America/Sao_Paulo'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
CORE_STATIC_DIR = os.path.join(BASE_DIR, 'jarbas', 'core', 'static')
STATICFILES_STORAGE = config('STATICFILES_STORAGE', default='')
# Django REST Framework
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.DjangoModelPermissionsOrAnonReadOnly'
],
'PAGE_SIZE': 7,
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.LimitOffsetPagination'
}
# Google
GOOGLE_ANALYTICS = config('GOOGLE_ANALYTICS', default='')
GOOGLE_STREET_VIEW_API_KEY = config('GOOGLE_STREET_VIEW_API_KEY', default='')
# Twitter
TWITTER_CONSUMER_KEY = config('TWITTER_CONSUMER_KEY', default='')
TWITTER_CONSUMER_SECRET = config('TWITTER_CONSUMER_SECRET', default='')
TWITTER_ACCESS_TOKEN = config('TWITTER_ACCESS_TOKEN', default='')
TWITTER_ACCESS_SECRET = config('TWITTER_ACCESS_SECRET', default='')
# Server headers
USE_X_FORWARDED_HOST = config('USE_X_FORWARDED_HOST', default=False, cast=bool)
SECURE_PROXY_SSL_HEADER = config('SECURE_PROXY_SSL_HEADER', default='', cast=lambda x: tuple(Csv()(x)) or None)
CORS_ORIGIN_ALLOW_ALL = True
CORS_URLS_REGEX = r'^/api/.*$'
# Cache
default_cache = 'django.core.cache.backends.dummy.DummyCache'
CACHES = {
'default': {
'BACKEND': config('CACHE_BACKEND', default=default_cache),
'LOCATION': config('CACHE_LOCATION', default=None),
'TIMEOUT': 60 * 60 * 6
}
}
# Django Debug Toolbar
# Set internal IP dinamycally
INTERNAL_IP = socket.gethostbyname(socket.gethostname())
INTERNAL_IPS = (re.sub(r'\d$', '1', INTERNAL_IP),)
# Queue
CELERY_BROKER_URL = config('CELERY_BROKER_URL', default='amqp://guest:guest@localhost//')
# Set home
HOMES_REDIRECTS_TO = '/dashboard/chamber_of_deputies/reimbursement/'
|
Python
| 0.000002
|
@@ -897,16 +897,52 @@
'debug')
+%0ADEBUG = ENVIRONMENT != 'production'
%0A%0AALLOWE
|
7668331e7cc4f5e2a310fcddcb3f90af4c18bb30
|
add Python 3 compatibility imports to capabilities.py
|
python/pywatchman/capabilities.py
|
python/pywatchman/capabilities.py
|
# Copyright 2015 Facebook, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name Facebook nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
def parse_version(vstr):
res = 0
for n in vstr.split('.'):
res = res * 1000
res = res + int(n)
return res
cap_versions = {
"cmd-watch-del-all": "3.1.1",
"cmd-watch-project": "3.1",
"relative_root": "3.3",
"term-dirname": "3.1",
"term-idirname": "3.1",
"wildmatch": "3.7",
}
def check(version, name):
if name in cap_versions:
return version >= parse_version(cap_versions[name])
return False
def synthesize(vers, opts):
""" Synthesize a capability enabled version response
This is a very limited emulation for relatively recent feature sets
"""
parsed_version = parse_version(vers['version'])
vers['capabilities'] = {}
for name in opts['optional']:
vers['capabilities'][name] = check(parsed_version, name)
failed = False
for name in opts['required']:
have = check(parsed_version, name)
vers['capabilities'][name] = have
if not have:
vers['error'] = 'client required capability `' + name + \
'` is not supported by this server'
return vers
|
Python
| 0
|
@@ -1523,16 +1523,148 @@
AMAGE.%0A%0A
+from __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_function%0A# no unicode literals%0A%0A
import r
|
67a748197266c30f0ab8977b55bde9df7fe6a96d
|
Fix loguniform range in tune tutorial (#28131)
|
python/ray/tune/tests/tutorial.py
|
python/ray/tune/tests/tutorial.py
|
# flake8: noqa
# Original Code: https://github.com/pytorch/examples/blob/master/mnist/main.py
# fmt: off
# __tutorial_imports_begin__
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import torch.nn.functional as F
from ray import air, tune
from ray.tune.schedulers import ASHAScheduler
# __tutorial_imports_end__
# fmt: on
# fmt: off
# __model_def_begin__
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
# In this example, we don't change the model architecture
# due to simplicity.
self.conv1 = nn.Conv2d(1, 3, kernel_size=3)
self.fc = nn.Linear(192, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 3))
x = x.view(-1, 192)
x = self.fc(x)
return F.log_softmax(x, dim=1)
# __model_def_end__
# fmt: on
# fmt: off
# __train_def_begin__
# Change these values if you want the training to run quicker or slower.
EPOCH_SIZE = 512
TEST_SIZE = 256
def train(model, optimizer, train_loader):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
# We set this just for the example to run quickly.
if batch_idx * len(data) > EPOCH_SIZE:
return
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(model, data_loader):
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(data_loader):
# We set this just for the example to run quickly.
if batch_idx * len(data) > TEST_SIZE:
break
data, target = data.to(device), target.to(device)
outputs = model(data)
_, predicted = torch.max(outputs.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
return correct / total
# __train_def_end__
# __train_func_begin__
def train_mnist(config):
# Data Setup
mnist_transforms = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))])
train_loader = DataLoader(
datasets.MNIST("~/data", train=True, download=True, transform=mnist_transforms),
batch_size=64,
shuffle=True)
test_loader = DataLoader(
datasets.MNIST("~/data", train=False, transform=mnist_transforms),
batch_size=64,
shuffle=True)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = ConvNet()
model.to(device)
optimizer = optim.SGD(
model.parameters(), lr=config["lr"], momentum=config["momentum"])
for i in range(10):
train(model, optimizer, train_loader)
acc = test(model, test_loader)
# Send the current training result back to Tune
tune.report(mean_accuracy=acc)
if i % 5 == 0:
# This saves the model to the trial directory
torch.save(model.state_dict(), "./model.pth")
# __train_func_end__
# fmt: on
# __eval_func_begin__
search_space = {
"lr": tune.sample_from(lambda spec: 10 ** (-10 * np.random.rand())),
"momentum": tune.uniform(0.1, 0.9),
}
# Uncomment this to enable distributed execution
# `ray.init(address="auto")`
# Download the dataset first
datasets.MNIST("~/data", train=True, download=True)
tuner = tune.Tuner(
train_mnist,
param_space=search_space,
)
results = tuner.fit()
# __eval_func_end__
# __plot_begin__
dfs = {result.log_dir: result.metrics_dataframe for result in results}
[d.mean_accuracy.plot() for d in dfs.values()]
# __plot_end__
# __run_scheduler_begin__
tuner = tune.Tuner(
train_mnist,
tune_config=tune.TuneConfig(
num_samples=20,
scheduler=ASHAScheduler(metric="mean_accuracy", mode="max"),
),
param_space=search_space,
)
results = tuner.fit()
# Obtain a trial dataframe from all run trials of this `tune.run` call.
dfs = {result.log_dir: result.metrics_dataframe for result in results}
# __run_scheduler_end__
# fmt: off
# __plot_scheduler_begin__
# Plot by epoch
ax = None # This plots everything on the same plot
for d in dfs.values():
ax = d.mean_accuracy.plot(ax=ax, legend=False)
# __plot_scheduler_end__
# fmt: on
# __run_searchalg_begin__
from hyperopt import hp
from ray.tune.search.hyperopt import HyperOptSearch
space = {
"lr": hp.loguniform("lr", 1e-10, 0.1),
"momentum": hp.uniform("momentum", 0.1, 0.9),
}
hyperopt_search = HyperOptSearch(space, metric="mean_accuracy", mode="max")
tuner = tune.Tuner(
train_mnist,
tune_config=tune.TuneConfig(
num_samples=10,
search_alg=hyperopt_search,
),
)
results = tuner.fit()
# To enable GPUs, use this instead:
# analysis = tune.run(
# train_mnist, config=search_space, resources_per_trial={'gpu': 1})
# __run_searchalg_end__
# __run_analysis_begin__
import os
logdir = results.get_best_result("mean_accuracy", mode="max").log_dir
state_dict = torch.load(os.path.join(logdir, "model.pth"))
model = ConvNet()
model.load_state_dict(state_dict)
# __run_analysis_end__
from ray.tune.examples.mnist_pytorch_trainable import TrainMNIST
# __trainable_run_begin__
search_space = {
"lr": tune.sample_from(lambda spec: 10 ** (-10 * np.random.rand())),
"momentum": tune.uniform(0.1, 0.9),
}
tuner = tune.Tuner(
TrainMNIST,
run_config=air.RunConfig(stop={"training_iteration": 10}),
param_space=search_space,
)
results = tuner.fit()
# __trainable_run_end__
|
Python
| 0
|
@@ -4760,17 +4760,14 @@
r%22,
-1e
-10,
-0.
+-
1),%0A
|
97545d3055a0e0044723e0cb4b7ffd7803d1dbd5
|
Make class constants.
|
qipipe/staging/airc_collection.py
|
qipipe/staging/airc_collection.py
|
import re
from .staging_error import StagingError
__all__ = ['with_name']
EXTENT = {}
"""A name => collection dictionary for all supported AIRC collections."""
def collection_with_name(name):
"""
@param name: the OHSU QIN collection name
@return: the corresponding AIRC collection
"""
return EXTENT[name]
class AIRCCollection(object):
"""The AIRC Study characteristics."""
def __init__(self, collection, subject_pattern, session_pattern, dicom_pattern):
"""
@param collection: the collection name
@param subject_pattern: the subject directory name match regular expression pattern
@param session_pattern: the session directory name match regular expression pattern
@param dicom_pattern: the DICOM directory name match glob pattern
"""
self.collection = collection
self.subject_pattern = subject_pattern
self.session_pattern = session_pattern
self.dicom_pattern = dicom_pattern
EXTENT[collection] = self
def path2subject_number(self, path):
"""
@param path: the directory path
@return: the subject number
"""
match = re.search(self.subject_pattern, path)
if not match:
raise StagingError("The directory path %s does not match the subject pattern %s" % (path, self.subject_pattern))
return int(match.group(1))
def path2session_number(self, path):
"""
@param path: the directory path
@return: the session number
"""
return int(re.search(self.session_pattern, path).group(1))
BREAST = AIRCCollection('Breast', 'BreastChemo(\d+)', 'Visit(\d+)', '*concat*/*')
SARCOMA = AIRCCollection('Sarcoma', 'Subj_(\d+)', '(?:Visit_|S\d+V)(\d+)', '*concat*/*')
|
Python
| 0
|
@@ -1614,16 +1614,42 @@
up(1))%0A%0A
+class AIRCCollection:%0A
BREAST =
@@ -1722,17 +1722,54 @@
at*/*')%0A
-%0A
+ %22%22%22The Breast collection.%22%22%22%0A%0A
SARCOMA
@@ -1828,28 +1828,62 @@
%7CS%5Cd+V)(%5Cd+)', '*concat*/*')
+%0A %22%22%22The Sarcoma collection.%22%22%22
|
b3163923a6a17cbfc64ee912c5f351402c3ac367
|
version bump
|
amnet/__init__.py
|
amnet/__init__.py
|
from amnet.amn import *
import amnet.atoms
import amnet.smt
import amnet.util
import amnet.lyap
__version__ = "0.0.4"
|
Python
| 0.000001
|
@@ -111,9 +111,9 @@
%220.
-0.4
+2.0
%22%0A
|
ed263e083dcb49bdd3f2d6cc707008cb5fe8ce1b
|
remove account.reconcile before deleting a account.move
|
account_statement_ext/account.py
|
account_statement_ext/account.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Joel Grand-Guillaume
# Copyright 2011-2012 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv.orm import Model
from openerp.osv import fields
class account_move(Model):
_inherit = 'account.move'
def unlink(self, cr, uid, ids, context=None):
"""
Delete the reconciliation when we delete the moves. This
allow an easier way of cancelling the bank statement.
"""
for move in self.browse(cr, uid, ids, context=context):
for move_line in move.line_id:
if move_line.reconcile_id:
move_line.reconcile_id.unlink(context=context)
return super(account_move, self).unlink(cr, uid, ids, context=context)
|
Python
| 0
|
@@ -1258,32 +1258,129 @@
nt.%0A %22%22%22%0A
+ reconcile_to_delete = %5B%5D%0A reconcile_obj = self.pool.get('account.move.reconcile')%0A
for move
@@ -1517,58 +1517,123 @@
id:%0A
- move_line.reconcile_id.unlink(
+%09%09%09%09%09reconcile_to_delete.append(move_line.reconcile_id.id)%0A reconcile_obj.unlink(cr,uid,reconcile_to_delete,
cont
|
9e0c521cc003c9c9096845dd01db2a535aa2c659
|
Add supports_chunked_encoding attribute to GoogleStorageDriver class.
|
libcloud/storage/drivers/google_storage.py
|
libcloud/storage/drivers/google_storage.py
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import httplib
import urllib
import copy
import base64
import hmac
from hashlib import sha1
from email.utils import formatdate
from libcloud.common.base import ConnectionUserAndKey
from libcloud.storage.drivers.s3 import S3StorageDriver, S3Response
from libcloud.storage.drivers.s3 import S3RawResponse
SIGNATURE_IDENTIFIER = 'GOOG1'
# Docs are a lie. Actual namespace returned is different that the one listed in
# the docs.
AUTH_HOST = 'commondatastorage.googleapis.com'
API_VERSION = '2006-03-01'
NAMESPACE = 'http://doc.s3.amazonaws.com/%s' % (API_VERSION)
class GoogleStorageConnection(ConnectionUserAndKey):
"""
Repersents a single connection to the Google storage API endpoint.
"""
host = AUTH_HOST
responseCls = S3Response
rawResponseCls = S3RawResponse
def add_default_headers(self, headers):
date = formatdate(usegmt=True)
headers['Date'] = date
return headers
def pre_connect_hook(self, params, headers):
signature = self._get_aws_auth_param(method=self.method,
headers=headers,
params=params,
expires=None,
secret_key=self.key,
path=self.action)
headers['Authorization'] = '%s %s:%s' % (SIGNATURE_IDENTIFIER,
self.user_id, signature)
return params, headers
def _get_aws_auth_param(self, method, headers, params, expires,
secret_key, path='/'):
# TODO: Refactor and re-use in S3 driver
"""
Signature = URL-Encode( Base64( HMAC-SHA1( YourSecretAccessKeyID,
UTF-8-Encoding-Of( StringToSign ) ) ) );
StringToSign = HTTP-VERB + "\n" +
Content-MD5 + "\n" +
Content-Type + "\n" +
Date + "\n" +
CanonicalizedHeaders +
CanonicalizedResource;
"""
special_header_keys = ['content-md5', 'content-type', 'date']
special_header_values = {}
extension_header_values = {}
headers_copy = copy.deepcopy(headers)
for key, value in headers_copy.iteritems():
if key.lower() in special_header_keys:
if key.lower() == 'date':
value = value.strip()
else:
value = value.lower().strip()
special_header_values[key.lower()] = value
elif key.lower().startswith('x-goog-'):
extension_header_values[key.lower()] = value.strip()
if not 'content-md5' in special_header_values:
special_header_values['content-md5'] = ''
if not 'content-type' in special_header_values:
special_header_values['content-type'] = ''
keys_sorted = special_header_values.keys()
keys_sorted.sort()
buf = [method]
for key in keys_sorted:
value = special_header_values[key]
buf.append(value)
string_to_sign = '\n'.join(buf)
keys_sorted = extension_header_values.keys()
keys_sorted.sort()
extension_header_string = []
for key in keys_sorted:
value = extension_header_values[key]
extension_header_string.append('%s:%s' % (key, value))
extension_header_string = '\n'.join(extension_header_string)
values_to_sign = []
for value in [string_to_sign, extension_header_string, path]:
if value:
values_to_sign.append(value)
string_to_sign = '\n'.join(values_to_sign)
b64_hmac = base64.b64encode(
hmac.new(secret_key, string_to_sign, digestmod=sha1).digest()
)
return b64_hmac
class GoogleStorageDriver(S3StorageDriver):
name = 'Google Storage'
connectionCls = GoogleStorageConnection
hash_type = 'md5'
namespace = NAMESPACE
|
Python
| 0.00015
|
@@ -4833,8 +4833,46 @@
MESPACE%0A
+ supports_chunked_encoding = False%0A
|
55373ddcf68641ec0654b58b8471c01e749366f9
|
Use a single redis client object for the module
|
tinman/handlers/redis.py
|
tinman/handlers/redis.py
|
"""The RedisRequestHandler uses tornado-redis to support Redis. It will
auto-establish a single redis connection when initializing the connection.
"""
import logging
import tornadoredis
from tornado import web
LOGGER = logging.getLogger(__name__)
class RedisRequestHandler(web.RequestHandler):
"""This request handler will connect to Redis on initialize if the
connection is not previously set.
"""
REDIS_CLIENT = 'redis'
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
REDIS_DB = 0
def prepare(self):
super(RedisRequestHandler, self).prepare()
if not self._has_redis_client:
self._set_redis_client(self._connect_to_redis())
@property
def _has_redis_client(self):
return hasattr(self.application.tinman, self.REDIS_CLIENT)
def _connect_to_redis(self):
LOGGER.debug('Connecting to redis: %r', self._redis_connection_settings)
client = tornadoredis.Client(**self._redis_connection_settings)
client.connect()
return client
@property
def _redis_connection_settings(self):
return {'host': self._redis_settings.get('host', self.REDIS_HOST),
'port': self._redis_settings.get('port', self.REDIS_PORT),
'selected_db': self._redis_settings.get('db', self.REDIS_DB)}
@property
def _redis_settings(self):
LOGGER.debug('Redis settings')
return self.application.settings.get('redis', dict())
def _set_redis_client(self, client):
setattr(self.application.tinman, self.REDIS_CLIENT, client)
@property
def redis_client(self):
LOGGER.debug('Returning redis client')
return getattr(self.application.tinman, self.REDIS_CLIENT, None)
|
Python
| 0
|
@@ -243,16 +243,37 @@
ame__)%0A%0A
+redis_client = None%0A%0A
%0Aclass R
@@ -434,35 +434,8 @@
%22%22%22%0A
- REDIS_CLIENT = 'redis'%0A
@@ -511,112 +511,212 @@
def
-prepare(self):%0A super(RedisRequestHandler, self).prepare()%0A if not
+_connect_to_redis(self):%0A %22%22%22Connect to a Redis server returning the handle to the redis%0A connection.%0A%0A :rtype: tornadoredis.Redis%0A%0A %22%22%22%0A settings =
self._
-has_
redis_
-client:
+settings
%0A
@@ -724,75 +724,154 @@
- self._set_
+LOGGER.debug('Connecting to redis: %25r', settings)%0A client = tornado
redis
-_c
+.C
lient(
-self._connect_to_redis())%0A%0A @property
+**settings)%0A client.connect()%0A return client%0A
%0A
@@ -876,19 +876,19 @@
def _
-has
+new
_redis_c
@@ -912,171 +912,197 @@
-return hasattr(self.application.tinman, self.REDIS_CLIENT)%0A%0A def _connect_to_redis(self):%0A LOGGER.debug('C
+%22%22%22Create a new redis client and assign it to the module level handle.%0A%0A %22%22%22%0A global redis_client%0A redis_client = self._c
onnect
-ing to
+_to_
redis
-: %25r', self._redis_connection
+()%0A%0A @property%0A def _redis
_set
@@ -1098,33 +1098,39 @@
_redis_settings
-)
+(self):
%0A client
@@ -1126,71 +1126,77 @@
-client = tornadoredis.Client(**self._redis_connection_set
+%22%22%22Return the Redis settings from configuration as a dict, defaul
ting
-s)
%0A
@@ -1204,96 +1204,219 @@
-client.connect()%0A return client%0A%0A @property%0A def _redis_connec
+to localhost:6379:0 if it's not set in configuration. The dict format%0A is set to be passed as kwargs into the Client object.%0A%0A :rtype: dict%0A%0A %22%22%22%0A settings = self.applica
tion
-_
+.
settings
(sel
@@ -1403,39 +1403,53 @@
ication.settings
-(self):
+.get('redis', dict())
%0A return
@@ -1453,36 +1453,24 @@
rn %7B'host':
-self._redis_
settings.get
@@ -1520,28 +1520,16 @@
'port':
-self._redis_
settings
@@ -1590,28 +1590,16 @@
ed_db':
-self._redis_
settings
@@ -1626,38 +1626,24 @@
S_DB)%7D%0A%0A
- @property%0A
def
_redis_s
@@ -1630,39 +1630,31 @@
)%7D%0A%0A def
-_redis_settings
+prepare
(self):%0A
@@ -1661,209 +1661,376 @@
-LOGGER.debug('Redis settings')%0A return self.application.settings.get('redis', dict())%0A%0A def _set_redis_client(self, client):%0A setattr(self.application.tinman, self.REDIS_CLIENT,
+%22%22%22Prepare RedisRequestHandler requests, ensuring that there is a%0A connected tornadoredis.Client object.%0A%0A %22%22%22%0A global redis_client%0A super(RedisRequestHandler, self).prepare()%0A if redis_client is None or not redis_client.connection.connected:%0A LOGGER.info('Creating new Redis instance')%0A self._new_redis_
client
+(
)%0A%0A
@@ -2082,31 +2082,40 @@
-LOGGER.debug('Returning
+%22%22%22Return a handle to the active
red
@@ -2123,18 +2123,18 @@
s client
-')
+.%0A
%0A
@@ -2138,70 +2138,100 @@
-return getattr(self.application.tinman, self.REDIS_CLIENT, None)%0A
+:rtype: tornadoredis.Redis%0A%0A %22%22%22%0A global redis_client%0A return redis_client
%0A
|
10766f6f0e66ed3fbe5c6ec23d81926076129163
|
delete test code
|
jellyblog/views.py
|
jellyblog/views.py
|
from django.shortcuts import render, get_object_or_404, redirect
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Category, Document
def sorted_category():
return_category = list(Category.objects.all())
childList = []
for category in return_category:
if (category.parent.id == 1):
continue
else:
parent_index = return_category.index(category.parent)
return_category[parent_index].children.append(category)
childList.append(category)
for child in childList:
return_category.remove(child)
return return_category
categoryList = sorted_category()
def index(request):
return redirect('/page/1')
def index_with_page(request, page):
document_list = list(Document.objects.all())
document_list.reverse()
paginator = Paginator(document_list, 4)
documents = get_documents(paginator, page)
context = {'documents': documents, 'category_list': categoryList, 'page_range': get_page_number_range(
paginator, documents)}
return render(request, 'jellyblog/index.html', context)
def category_detail(request, category_id):
return redirect("/category/" + category_id + "/page/1")
def category_with_page(request, category_id, page):
selectedCategory = Category.objects.get(id=category_id)
document_list = []
if (selectedCategory.parent == categoryList[0]):
children = Category.objects.all().filter(parent=selectedCategory.id)
for child in children:
document_list += Document.objects.all().filter(category_id=child.id)
document_list += Document.objects.all().filter(category=category_id)
document_list.sort(key=id, reverse=True)
paginator = Paginator(document_list, 4)
documents = get_documents(paginator, page)
context = {'documents': documents, 'category_list': categoryList, 'category_id': category_id,
'page_range': get_page_number_range(paginator, documents)}
return render(request, 'jellyblog/category.html', context)
def get_documents(paginator, page):
try:
documents = paginator.page(page)
except PageNotAnInteger:
documents = paginator.page(1)
except EmptyPage:
documents = paginator.page(paginator.num_pages)
return documents
def get_page_number_range(paginator, page):
print(paginator.num_pages)
if (paginator.num_pages < 11):
return range(1, paginator.num_pages+1)
elif (page.number - 5 > 1):
if (page.number + 4 < paginator.num_pages):
return range(page.number - 5, page.number + 5)
else:
return range(page.number - 5, paginator.num_pages + 1)
else:
return range(1,11)
def detail(request, document_id):
document = get_object_or_404(Document, pk=document_id)
document.view_count += 1
document.save()
return render(request, 'jellyblog/detail.html', {'document': document, 'category_list': categoryList})
|
Python
| 0.000001
|
@@ -2367,39 +2367,8 @@
e):%0A
- print(paginator.num_pages)%0A
|
eb07abeaaa13952b862377c231e81db060bbf1a4
|
set subgraph_retries config in the retries test (#3076)
|
tests/integration_tests/tests/agentless_tests/test_task_retries.py
|
tests/integration_tests/tests/agentless_tests/test_task_retries.py
|
########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
import uuid
import pytest
from integration_tests import AgentlessTestCase
from integration_tests.tests.utils import get_resource as resource
INFINITY = -1
@pytest.mark.usefixtures('testmockoperations_plugin')
class TaskRetriesTest(AgentlessTestCase):
def setUp(self):
super(TaskRetriesTest, self).setUp()
self.events = []
def configure(self, retries, retry_interval):
self.client.manager.put_config('task_retries', retries)
self.client.manager.put_config('task_retry_interval', retry_interval)
def test_retries_and_retry_interval(self):
self._test_retries_and_retry_interval_impl(
blueprint='dsl/workflow_task_retries_1.yaml',
retries=2,
retry_interval=3,
expected_interval=3,
expected_retries=2,
invocations_type='failure_invocation')
def test_infinite_retries(self):
self._test_retries_and_retry_interval_impl(
blueprint='dsl/workflow_task_retries_2.yaml',
retries=INFINITY,
retry_interval=1,
expected_interval=1,
# see blueprint
expected_retries=5,
invocations_type='failure_invocation')
def test_retries_ignore_total(self):
self._test_retries_and_retry_interval_impl(
blueprint='dsl/workflow_task_retries_3.yaml',
retries=0,
retry_interval=0,
expected_interval=0,
# see blueprint (get_state does ignores total_retries)
expected_retries=3,
invocations_type='host_get_state_invocation')
def test_non_recoverable_error(self):
self._test_retries_and_retry_interval_impl(
blueprint='dsl/workflow_task_retries_4.yaml',
retries=-1,
retry_interval=1,
expected_interval=1,
expected_retries=0,
invocations_type='failure_invocation',
expect_failure=True)
def test_recoverable_error(self):
self._test_retries_and_retry_interval_impl(
blueprint='dsl/workflow_task_retries_5.yaml',
retries=1,
retry_interval=1000,
# blueprint overrides retry_interval
expected_interval=1,
expected_retries=1,
invocations_type='failure_invocation')
def test_operation_retry_in_operation_mapping(self):
self._test_retries_and_retry_interval_impl(
blueprint='dsl/workflow_task_retries_6.yaml',
# setting global values to something really big
retries=1000,
retry_interval=1000,
# blueprint operation mapping overrides retry_interval
# and retries
expected_interval=1,
expected_retries=2,
invocations_type='failure_invocation')
def test_operation_retry(self):
self.configure(retries=5, retry_interval=5)
deployment_id = 'd{0}'.format(uuid.uuid4())
self.deploy_application(
resource('dsl/test-operation-retry-blueprint.yaml'),
deployment_id=deployment_id)
invocations = self.get_runtime_property(deployment_id,
'retry_invocations')[0]
self.assertEqual(4, invocations)
# 1 asserting event messages reflect that the task has been rescheduled
# 2 asserting that task events contain current_retries/total_retries
# only asserting that the properties exists and nothing logical
# because this is already covered in the local workflow tests
def assertion():
events = self.client.events.list(deployment_id=deployment_id,
include_logs=True)
self.assertGreater(len(events), 0)
self.assertTrue(any(
'Task rescheduled' in event['message']
for event in events))
self.assertTrue(any(
'Retrying operation' in event['message']
for event in events))
# We're looking only at the events from the create operation
retry_events = [
event
for event in events
if event['operation'] == 'cloudify.interfaces.lifecycle.create'
]
# Note: sorting by timestamp and event_type to guarantee
# that tasks will be correctly ordered
# even if they have the same timestamp
event_type_sort_order = {
'sending_task': 0,
'task_started': 1,
'task_rescheduled': 2,
'task_succeeded': 3
}
retry_events = [e for e in retry_events if 'event_type' in e]
retry_events = sorted(
retry_events,
key=lambda e: (
e['timestamp'],
event_type_sort_order[e['event_type']],
),
)
self.assertEqual([e['event_type'] for e in retry_events],
['sending_task', 'task_rescheduled'] * 3 +
['sending_task', 'task_succeeded'])
self.assertTrue(retry_events[2]['message'].endswith('[retry 1/5]'))
self.assertTrue(retry_events[3]['message'].endswith('[retry 1/5]'))
self.assertTrue(retry_events[4]['message'].endswith('[retry 2/5]'))
self.assertTrue(retry_events[5]['message'].endswith('[retry 2/5]'))
self.assertTrue(retry_events[6]['message'].endswith('[retry 3/5]'))
self.assertTrue(retry_events[7]['message'].endswith('[retry 3/5]'))
# events are async so we may have to wait some
self.do_assertions(assertion, timeout=10)
def _test_retries_and_retry_interval_impl(self,
blueprint,
retries,
retry_interval,
expected_interval,
expected_retries,
invocations_type,
expect_failure=False):
self.configure(retries=retries, retry_interval=retry_interval)
deployment_id = 'd{0}'.format(uuid.uuid4())
if expect_failure:
self.assertRaises(RuntimeError, self.deploy_application,
dsl_path=resource(blueprint),
deployment_id=deployment_id)
else:
self.deploy_application(
resource(blueprint),
deployment_id=deployment_id)
invocations = self.get_runtime_property(deployment_id,
invocations_type)[0]
self.assertEqual(expected_retries + 1, len(invocations))
for i in range(len(invocations) - 1):
self.assertLessEqual(expected_interval,
invocations[i + 1] - invocations[i])
|
Python
| 0
|
@@ -1091,32 +1091,94 @@
ries', retries)%0A
+ self.client.manager.put_config('subgraph_retries', 0)%0A
self.cli
|
9326d6e602234cfe9d0224cc697f273b8036e64c
|
allow article previews for admin users
|
source/articles/views.py
|
source/articles/views.py
|
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView
from .models import Article
from source.base.utils import paginate
from taggit.models import Tag
# Current iteration does not use this in nav, but leaving dict
# in place for feed, url imports until we make a permanent call
SECTION_MAP = {
'articles': {
'name': 'Features',
'slug': 'articles',
'article_types': ['project', 'tool', 'how-to', 'interview', 'roundtable', 'roundup', 'event', 'update'],
},
}
# Current iteration only has *one* articles section, but this map is in place
# in case we split out into multiple sections that need parent categories
CATEGORY_MAP = {
'project': {
'name': 'Project',
'parent_name': 'Features',
'parent_slug': 'articles',
},
'tool': {
'name': 'Tool',
'parent_name': 'Features',
'parent_slug': 'articles',
},
'how-to': {
'name': 'How-to',
'parent_name': 'Features',
'parent_slug': 'articles',
},
'interview': {
'name': 'Interview',
'parent_name': 'Features',
'parent_slug': 'articles',
},
'roundtable': {
'name': 'Roundtable',
'parent_name': 'Features',
'parent_slug': 'articles',
},
'roundup': {
'name': 'Roundup',
'parent_name': 'Features',
'parent_slug': 'articles',
},
'event': {
'name': 'Event',
'parent_name': 'Features',
'parent_slug': 'articles',
},
'update': {
'name': 'Update',
'parent_name': 'Features',
'parent_slug': 'articles',
},
}
class ArticleList(ListView):
model = Article
def dispatch(self, *args, **kwargs):
self.section = kwargs.get('section', None)
self.category = kwargs.get('category', None)
self.tags = None
self.tag_slugs = kwargs.get('tag_slugs', None)
self.tag_slug_list = []
return super(ArticleList, self).dispatch(*args, **kwargs)
def get_queryset(self):
queryset = Article.live_objects.prefetch_related('authors', 'people', 'organizations')
if self.section:
queryset = queryset.filter(article_type__in=SECTION_MAP[self.section]['article_types'])
elif self.category:
queryset = queryset.filter(article_type=self.category)
elif self.tag_slugs:
self.tag_slug_list = self.tag_slugs.split('+')
# need to fail if any item in slug list references nonexistent tag
self.tags = [get_object_or_404(Tag, slug=tag_slug) for tag_slug in self.tag_slug_list]
for tag_slug in self.tag_slug_list:
queryset = queryset.filter(tags__slug=tag_slug)
return queryset
def get_section_links(self, context):
if self.section:
context.update({
'section': SECTION_MAP[self.section],
'active_nav': SECTION_MAP[self.section]['slug'],
'rss_link': reverse('article_list_by_section_feed', kwargs={'section': self.section}),
})
elif self.category:
context.update({
'category': CATEGORY_MAP[self.category]['name'],
'section': SECTION_MAP[CATEGORY_MAP[self.category]['parent_slug']],
'active_nav': CATEGORY_MAP[self.category]['parent_slug'],
'rss_link': reverse('article_list_by_category_feed', kwargs={'category': self.category}),
})
elif self.tags:
context.update({
'section': SECTION_MAP['articles'],
'active_nav': SECTION_MAP['articles']['slug'],
'tags':self.tags,
'rss_link': reverse('article_list_by_tag_feed', kwargs={'tag_slugs': self.tag_slugs}),
})
else:
context.update({
'rss_link': reverse('homepage_feed'),
})
return ''
def paginate_list(self, context):
page, paginator = paginate(self.request, self.object_list, 20)
context.update({
'page': page,
'paginator': paginator
})
return ''
def get_standard_context(self, context):
self.get_section_links(context)
self.paginate_list(context)
return ''
def get_context_data(self, **kwargs):
context = super(ArticleList, self).get_context_data(**kwargs)
self.get_standard_context(context)
return context
class ArticleDetail(DetailView):
model = Article
def get_queryset(self):
queryset = Article.live_objects.prefetch_related('articleblock_set', 'authors', 'people', 'organizations', 'code')
return queryset
|
Python
| 0
|
@@ -4729,39 +4729,480 @@
-queryset = Article.live_objects
+if self.request.user.is_staff:%0A # simple method for allowing article preview for editors,%0A # bypassing %60live_objects%60 check on detail view. List pages%0A # populate with public articles only, but admin user can hit%0A # %22view on site%22 button to see article even if it's not live yet%0A queryset = Article.objects.all()%0A else:%0A queryset = Article.live_objects.all()%0A %0A queryset = queryset
.pre
|
65972062c03133a79ff77d90f8a11fd16f7d16ff
|
Correct column name
|
luigi/tasks/rfam/pgload_go_term_mapping.py
|
luigi/tasks/rfam/pgload_go_term_mapping.py
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from tasks.utils.pgloader import PGLoader
from tasks.go_terms.pgload_go_terms import PGLoadGoTerms
from .go_term_mapping_csv import RfamGoTermsCSV
from .pgload_families import RfamPGLoadFamilies
CONTROL_FILE = """LOAD CSV
FROM '{filename}' WITH ENCODING ISO-8859-14
HAVING FIELDS
(
go_term_id,
rfam_model_id
)
INTO {db_url}
TARGET COLUMNS
(
go_term_id,
name
)
SET
search_path = '{search_path}'
WITH
skip header = 1,
fields escaped by double-quote,
fields terminated by ','
BEFORE LOAD DO
$$
create table if not exists load_rfam_go_terms (
go_term_id character varying(10) COLLATE pg_catalog."default" NOT NULL,
rfam_model_id character varying(20) COLLATE pg_catalog."default" NOT NULL
);
$$,
$$
truncate table load_rfam_go_terms;
$$
AFTER LOAD DO
$$ insert into rfam_go_terms (
go_term_id,
rfam_model_id
) (
select
go_term_id,
rfam_model_id
from load_rfam_go_terms
)
ON CONFLICT (go_term_id, rfam_model_id) DO UPDATE SET
go_term_id = excluded.go_term_id,
rfam_model_id = excluded.rfam_model_id
;
$$,
$$
drop table load_rfam_go_terms;
$$
;
"""
class RfamPGLoadGoTerms(PGLoader): # pylint: disable=R0904
"""
This will run pgloader on the Rfam go term mapping CSV file. The importing
will update any existing mappings and will not produce duplicates.
"""
def requires(self):
return [
RfamGoTermsCSV(),
PGLoadGoTerms(),
RfamPGLoadFamilies(),
]
def control_file(self):
filename = RfamGoTermsCSV().output().fn
return CONTROL_FILE.format(
filename=filename,
db_url=self.db_url(table='load_rfam_go_terms'),
search_path=self.db_search_path(),
)
|
Python
| 0
|
@@ -982,20 +982,29 @@
id,%0A
-name
+rfam_model_id
%0A)%0ASET%0A
|
3b20bbe9c0d647ec59fc30e7bb0b786ad3b77609
|
Add utf-8 encoding
|
afl_scraper/spiders/afltables.py
|
afl_scraper/spiders/afltables.py
|
import scrapy
import os
import fnmatch
from lxml import etree
AFL_STAT_HEADER = ["#", "S_ON", "S_OFF", "T_NM", "T_NS", "P_NM", "KI", "MK", "HB", "DI", "GL", "BH",
"HO", "TK", "RB", "IF", "CL", "CG", "FF", "FA", "BR", "CP", "UP", "CM",
"MI", "1%", "BO", "GA", "%P"]
AFL_SUB_ON = " ↑"
AFL_SUB_OFF = " ↓"
class AflTablesSpider(scrapy.Spider):
name = "afl_tables"
allowed_domains = ["afltables.com"]
start_urls = map(lambda x: "http://afltables.com/afl/seas/" + str(x) + ".html", range(1997, 2016))
download_delay = 3
def parse(self, response):
for href in response.xpath('//a[contains(@href, "/stats/games")]/@href'):
url = response.urljoin(href.extract())
yield scrapy.Request(url, callback=self.parse_game_stats)
def parse_game_stats(self, response):
season = str(response.xpath('//a[contains(@href, "/seas")]/text()').extract()[0]).replace("Games", "").strip()
round_num = str(response.xpath("//table[1]/tr[1]/td[2]/text()").extract()[0]).strip()
team_one = str(response.xpath("//table[1]/tr[2]/td[1]/a/text()").extract()[0])
team_two = str(response.xpath("//table[1]/tr[3]/td[1]/a/text()").extract()[0])
filename = "data/" + season + "/" + round_num + "/" + team_one + "_vs_" + team_two + ".html"
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open (filename, 'wb') as f:
f.write(response.body)
return None
def convert_html_to_psv(path):
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, '*.html'):
htmlpath = os.path.join(root, filename)
print("Parsing: " + htmlpath)
psvdata = get_match_stats_psv(htmlpath)
psvfilename = os.path.splitext(filename)[0] + ".psv"
psvfile = os.path.join(root, psvfilename)
print("Writing: " + psvfile)
with open(psvfile, 'wb') as fpsv:
fpsv.write(psvdata)
def get_match_stats_psv(html):
parser = etree.HTMLParser()
tree = etree.parse(html, parser)
team_one_name = tree.xpath("//table[1]/tr[2]/td[1]/a/text()")[0]
team_one_score = float(tree.xpath("//table[1]/tr[2]/td[5]/b/text()")[0])
team_two_name = tree.xpath("//table[1]/tr[3]/td[1]/a/text()")[0]
team_two_score = float(tree.xpath("//table[1]/tr[3]/td[5]/b/text()")[0])
total_score = team_one_score + team_two_score
team_one_norm_score = team_one_score / total_score
team_two_norm_score = team_two_score / total_score
team_one_stats = get_team_stats(team_one_name, team_one_norm_score, tree.xpath("//tbody")[0])
team_two_stats = get_team_stats(team_two_name, team_two_norm_score, tree.xpath("//tbody")[1])
data = ["|".join(AFL_STAT_HEADER)]
for stat in team_one_stats:
data.append("|".join(stat))
for stat in team_two_stats:
data.append("|".join(stat))
return "\n".join(data)
def get_team_stats(team_name, normalized_score, tbody):
team_stats = []
for tr in tbody.xpath("tr"):
p_stats = {}
tds = tr.xpath("td")
p_stats["#"] = tds[0].text.encode('utf-8').replace(AFL_SUB_ON, "").replace(AFL_SUB_OFF, "").strip()
p_stats["S_ON"] = str((tds[0].text.encode('utf-8').find(AFL_SUB_ON) != -1).numerator)
p_stats["S_OFF"] = str((tds[0].text.encode('utf-8').find(AFL_SUB_OFF) != -1).numerator)
p_stats["T_NM"] = team_name
p_stats["T_NS"] = str(normalized_score)
p_stats["P_NM"] = tds[1].xpath("a")[0].text
for i in range(2, 25):
header_index = i + 4;
header = AFL_STAT_HEADER[header_index]
p_stats[header] = tds[i].text.strip() if tds[i].text.strip() else "0"
stats = map(lambda x: p_stats[x], AFL_STAT_HEADER)
team_stats.append(stats)
return team_stats
|
Python
| 0.002015
|
@@ -1,12 +1,37 @@
+# -*- coding: utf-8 -*-%0A%0A
import scrap
@@ -557,9 +557,9 @@
201
-6
+7
))%0A
|
a9aad224d6875e774cd0f7db7e30965ea6a3fbdc
|
Fix simple typo: taget -> target (#591)
|
algorithms/search/jump_search.py
|
algorithms/search/jump_search.py
|
import math
def jump_search(arr,target):
"""Jump Search
Worst-case Complexity: O(√n) (root(n))
All items in list must be sorted like binary search
Find block that contains target value and search it linearly in that block
It returns a first target value in array
reference: https://en.wikipedia.org/wiki/Jump_search
"""
n = len(arr)
block_size = int(math.sqrt(n))
block_prev = 0
block= block_size
# return -1 means that array doesn't contain taget value
# find block that contains target value
if arr[n - 1] < target:
return -1
while block <= n and arr[block - 1] < target:
block_prev = block
block += block_size
# find target value in block
while arr[block_prev] < target :
block_prev += 1
if block_prev == min(block, n) :
return -1
# if there is target value in array, return it
if arr[block_prev] == target :
return block_prev
else :
return -1
|
Python
| 0.999977
|
@@ -510,16 +510,17 @@
ntain ta
+r
get valu
|
3f2d7df4082b39c6f1fce04ff015b06ab30ad9a1
|
Use distutils.version instead of pkg_resources
|
tools/sosreport/tower.py
|
tools/sosreport/tower.py
|
# Copyright (c) 2014 Ansible, Inc.
# All Rights Reserved.
import sos
from pkg_resources import parse_version
if parse_version(sos.__version__) >= parse_version('3.0'):
from sos.plugins import Plugin, RedHatPlugin, UbuntuPlugin
class tower(Plugin, RedHatPlugin, UbuntuPlugin):
'''Collect Ansible Tower related information'''
plugin_name = "tower"
def setup(self):
commands = [
"ansible --version", # ansible core version
"awx-manage --version", # tower version
"supervisorctl status", # tower process status
"tree -d /var/lib/awx", # show me the dirs
"ls -ll /var/lib/awx", # check permissions
"ls -ll /etc/awx"
]
dirs = [
"/etc/awx/",
"/var/log/supervisor/",
"/var/log/syslog",
"/var/log/udev",
"/var/log/kern*",
"/var/log/dist-upgrade",
"/var/log/installer",
"/var/log/unattended-upgrades",
"/var/log/apport.log"
]
for path in dirs:
self.add_copy_spec(path)
for command in commands:
self.collect_ext_output(command)
else:
import sos.plugintools
class tower(sos.plugintools.PluginBase):
'''Collect Ansible Tower related information'''
def setup(self):
commands = [
"ansible --version", # ansible core version
"awx-manage --version", # tower version
"supervisorctl status", # tower process status
"tree -d /var/lib/awx", # show me the dirs
"ls -ll /var/lib/awx", # check permissions
"ls -ll /etc/awx"
]
dirs = [
"/etc/awx/",
"/var/log/supervisor/",
"/var/log/syslog",
"/var/log/udev",
"/var/log/kern*",
"/var/log/dist-upgrade",
"/var/log/installer",
"/var/log/unattended-upgrades",
"/var/log/apport.log"
]
for path in dirs:
self.addCopySpec(path)
for command in commands:
self.collectExtOutput(command)
|
Python
| 0.000001
|
@@ -72,29 +72,33 @@
rom
-pkg_resources
+distutils.version
import
pars
@@ -93,23 +93,22 @@
import
-parse_v
+LooseV
ersion%0A%0A
@@ -110,23 +110,22 @@
ion%0A%0Aif
-parse_v
+LooseV
ersion(s
@@ -147,15 +147,14 @@
%3E=
-parse_v
+LooseV
ersi
|
9c062d779fe7cb3fc439ff03f25ade58f6045ee5
|
fix PythonActivity path
|
androidhelpers.py
|
androidhelpers.py
|
from jnius import PythonJavaClass, java_method, autoclass
# SERVICE = Autoclass('org.renpy.PythonService').mService
SERVICE = autoclass('org.renpy.PythonActivity').mActivity
Intent = autoclass('android.content.Intent')
BluetoothManager = SERVICE.getSystemService(SERVICE.BLUETOOTH_SERVICE)
ADAPTER = BluetoothManager.getAdapter()
REQUEST_ENABLE_BT = 0x100
def activity_result(request_code, result_code, data):
print("get result: %s, %s, %s" % (
request_code == REQUEST_ENABLE_BT, result_code, data))
def start_scanning(callback):
if not ADAPTER.isEnabled():
SERVICE.startActivityForResult(
Intent(ADAPTER.ACTION_REQUEST_ENABLE), REQUEST_ENABLE_BT)
SERVICE.bind(on_activity_result=activity_result)
else:
ADAPTER.startLeScan(callback)
def stop_scanning(callback):
ADAPTER.stopLeScan(callback)
class AndroidScanner(PythonJavaClass):
__javainterfaces__ = ['android.bluetooth.BluetoothAdapter$LeScanCallback']
@java_method('Landroid.bluetooth.BluetoothDevice,I,[B')
def onLeScan(device, irssi, scan_record):
print device.getName()
print irssi
print scan_record
|
Python
| 0.00012
|
@@ -137,24 +137,32 @@
('org.renpy.
+android.
PythonActivi
|
f240c72e88e70f771305f44a14907929c6ddacb2
|
Delete obsolete _robust_bool and get_parent_attr (#265)
|
aospy/utils/io.py
|
aospy/utils/io.py
|
"""Utility functions for data input and output."""
import logging
import subprocess
import numpy as np
def _robust_bool(obj):
try:
return bool(obj)
except ValueError:
return obj.any()
def get_parent_attr(obj, attr, strict=False):
"""Search recursively through an object and its parent for an attribute.
Check if the object has the given attribute and it is non-empty. If not,
check each parent object for the attribute and use the first one found.
"""
attr_val = getattr(obj, attr, False)
if _robust_bool(attr_val):
return attr_val
else:
for parent in ('parent', 'run', 'model', 'proj'):
parent_obj = getattr(obj, parent, False)
if parent_obj:
return get_parent_attr(parent_obj, attr, strict=strict)
if strict:
raise AttributeError('Attribute %s not found in parent of %s'
% (attr, obj))
else:
return None
def data_in_label(intvl_in, dtype_in_time, dtype_in_vert=False):
"""Create string label specifying the input data of a calculation."""
intvl_lbl = intvl_in
time_lbl = dtype_in_time
lbl = '_'.join(['from', intvl_lbl, time_lbl]).replace('__', '_')
vert_lbl = dtype_in_vert if dtype_in_vert else False
if vert_lbl:
lbl = '_'.join([lbl, vert_lbl]).replace('__', '_')
return lbl
def data_out_label(time_intvl, dtype_time, dtype_vert=False):
intvl_lbl = time_label(time_intvl, return_val=False)
time_lbl = dtype_time
lbl = '.'.join([intvl_lbl, time_lbl]).replace('..', '.')
vert_lbl = dtype_vert if dtype_vert else False
if vert_lbl:
lbl = '.'.join([lbl, vert_lbl]).replace('..', '.')
return lbl
def yr_label(yr_range):
"""Create label of start and end years for aospy data I/O."""
assert yr_range is not None, "yr_range is None"
if yr_range[0] == yr_range[1]:
return '{:04d}'.format(yr_range[0])
else:
return '{:04d}-{:04d}'.format(*yr_range)
def time_label(intvl, return_val=True):
"""Create time interval label for aospy data I/O."""
# Monthly labels are 2 digit integers: '01' for jan, '02' for feb, etc.
if type(intvl) in [list, tuple, np.ndarray] and len(intvl) == 1:
label = '{:02}'.format(intvl[0])
value = np.array(intvl)
elif type(intvl) == int and intvl in range(1, 13):
label = '{:02}'.format(intvl)
value = np.array([intvl])
# Seasonal and annual time labels are short strings.
else:
labels = {'jfm': (1, 2, 3),
'fma': (2, 3, 4),
'mam': (3, 4, 5),
'amj': (4, 5, 6),
'mjj': (5, 6, 7),
'jja': (6, 7, 8),
'jas': (7, 8, 9),
'aso': (8, 9, 10),
'son': (9, 10, 11),
'ond': (10, 11, 12),
'ndj': (11, 12, 1),
'djf': (1, 2, 12),
'jjas': (6, 7, 8, 9),
'djfm': (12, 1, 2, 3),
'ann': range(1, 13)}
for lbl, vals in labels.items():
if intvl == lbl or set(intvl) == set(vals):
label = lbl
value = np.array(vals)
break
if return_val:
return label, value
else:
return label
def data_name_gfdl(name, domain, data_type, intvl_type, data_yr,
intvl, data_in_start_yr, data_in_dur):
"""Determine the filename of GFDL model data output."""
# Determine starting year of netCDF file to be accessed.
extra_yrs = (data_yr - data_in_start_yr) % data_in_dur
data_in_yr = data_yr - extra_yrs
# Determine file name. Two cases: time series (ts) or time-averaged (av).
if data_type in ('ts', 'inst'):
if intvl_type == 'annual':
if data_in_dur == 1:
filename = '.'.join([domain, '{:04d}'.format(data_in_yr),
name, 'nc'])
else:
filename = '.'.join([domain, '{:04d}-{:04d}'.format(
data_in_yr, data_in_yr + data_in_dur - 1
), name, 'nc'])
elif intvl_type == 'monthly':
filename = (domain + '.{:04d}'.format(data_in_yr) + '01-' +
'{:04d}'.format(int(data_in_yr+data_in_dur-1)) +
'12.' + name + '.nc')
elif intvl_type == 'daily':
filename = (domain + '.{:04d}'.format(data_in_yr) + '0101-' +
'{:04d}'.format(int(data_in_yr+data_in_dur-1)) +
'1231.' + name + '.nc')
elif 'hr' in intvl_type:
filename = '.'.join(
[domain, '{:04d}010100-{:04d}123123'.format(
data_in_yr, data_in_yr + data_in_dur - 1), name, 'nc']
)
elif data_type == 'av':
if intvl_type in ['annual', 'ann']:
label = 'ann'
elif intvl_type in ['seasonal', 'seas']:
label = intvl.upper()
elif intvl_type in ['monthly', 'mon']:
label, val = time_label(intvl)
if data_in_dur == 1:
filename = (domain + '.{:04d}'.format(data_in_yr) +
'.' + label + '.nc')
else:
filename = (domain + '.{:04d}'.format(data_in_yr) + '-' +
'{:04d}'.format(int(data_in_yr+data_in_dur-1)) +
'.' + label + '.nc')
elif data_type == 'av_ts':
filename = (domain + '.{:04d}'.format(data_in_yr) + '-' +
'{:04d}'.format(int(data_in_yr+data_in_dur-1)) +
'.01-12.nc')
return filename
def dmget(files_list):
"""Call GFDL command 'dmget' to access archived files."""
if isinstance(files_list, str):
files_list = [files_list]
archive_files = []
for f in files_list:
if f.startswith('/archive'):
archive_files.append(f)
try:
subprocess.call(['dmget'] + archive_files)
except OSError:
logging.debug('dmget command not found in this machine')
|
Python
| 0
|
@@ -103,900 +103,8 @@
p%0A%0A%0A
-def _robust_bool(obj):%0A try:%0A return bool(obj)%0A except ValueError:%0A return obj.any()%0A%0A%0Adef get_parent_attr(obj, attr, strict=False):%0A %22%22%22Search recursively through an object and its parent for an attribute.%0A%0A Check if the object has the given attribute and it is non-empty. If not,%0A check each parent object for the attribute and use the first one found.%0A %22%22%22%0A attr_val = getattr(obj, attr, False)%0A if _robust_bool(attr_val):%0A return attr_val%0A%0A else:%0A for parent in ('parent', 'run', 'model', 'proj'):%0A parent_obj = getattr(obj, parent, False)%0A if parent_obj:%0A return get_parent_attr(parent_obj, attr, strict=strict)%0A%0A if strict:%0A raise AttributeError('Attribute %25s not found in parent of %25s'%0A %25 (attr, obj))%0A else:%0A return None%0A%0A%0A
def
|
9ff6f01f7319270f66e2cc32aa5201ad53228e8f
|
Add __init__ and __call_internal__ for isotropicHernquistdf
|
galpy/df/isotropicHernquistdf.py
|
galpy/df/isotropicHernquistdf.py
|
# Class that implements isotropic spherical Hernquist DF
# computed using the Eddington formula
from .sphericaldf import sphericaldf
from .Eddingtondf import Eddingtondf
class isotropicHernquistdf(Eddingtondf):
"""Class that implements isotropic spherical Hernquist DF computed using the Eddington formula"""
def __init__(self,ro=None,vo=None):
# Initialize using sphericaldf rather than Eddingtondf, because
# Eddingtondf will have code specific to computing the Eddington
# integral, which is not necessary for Hernquist
sphericaldf.__init__(self,ro=ro,vo=vo)
def fE(self,E):
# Stub for computing f(E)
return None
|
Python
| 0.000043
|
@@ -329,16 +329,25 @@
__(self,
+pot=None,
ro=None,
@@ -592,16 +592,24 @@
__(self,
+pot=pot,
ro=ro,vo
@@ -626,74 +626,1307 @@
def
-fE(self,E):%0A # Stub for computing f(E)%0A return None%0A
+__call_internal__(self,*args):%0A %22%22%22%0A NAME:%0A%0A __call_internal__%0A%0A PURPOSE%0A%0A Calculate the distribution function for an isotropic Hernquist%0A%0A INPUT:%0A%0A E - The energy%0A%0A OUTPUT:%0A%0A fH - The distribution function%0A%0A HISTORY:%0A%0A 2020-07 - Written%0A%0A %22%22%22%0A E = args%5B0%5D%0A if _APY_LOADED and isinstance(E,units.quantity.Quantity):%0A E.to(units.km**2/units.s**2).value/vo**2.%0A # Scale energies%0A phi0 = evaluatePotentials(self._pot,0,0)%0A Erel = -E%0A Etilde = Erel/phi0%0A # Handle potential E out of bounds%0A Etilde_out = numpy.where(Etilde%3C0%7CEtilde%3E1)%5B0%5D%0A if len(Etilde_out)%3E0:%0A # Set to dummy and 0 later, prevents functions throwing errors?%0A Etilde%5BEtilde_out%5D=0.5%0A _GMa = phi0*self._pot.a**2.%0A fH = numpy.power((2**0.5)*((2*numpy.pi)**3) *((_GMa)**1.5),-1)%5C%0A *(numpy.sqrt(Etilde)/numpy.power(1-Etilde,2))%5C%0A *((1-2*Etilde)*(8*numpy.power(Etilde,2)-8*Etilde-3)%5C%0A +((3*numpy.arcsin(numpy.sqrt(Etilde)))%5C%0A /numpy.sqrt(Etilde*(1-Etilde))))%0A # Fix out of bounds values%0A if len(Etilde_out) %3E 0:%0A fH%5BEtilde_out%5D = 0%0A return fH%0A
%0A
|
c40a2516951d735084eccd184e7ae44ba36510e3
|
Modify default source value
|
tests/mq/test_esworker_sns_sqs.py
|
tests/mq/test_esworker_sns_sqs.py
|
#!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
#
# Contributors:
# Brandon Myers bmyers@mozilla.com
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), "../../mq"))
from mq.esworker_sns_sqs import taskConsumer
sys.path.append(os.path.join(os.path.dirname(__file__), "../../lib"))
from utilities.dot_dict import DotDict
from query_models import SearchQuery, ExistsMatch
sys.path.append(os.path.join(os.path.dirname(__file__), "../"))
from unit_test_suite import UnitTestSuite
class TestEsworkerSNSSQS(UnitTestSuite):
def setup(self):
super(TestEsworkerSNSSQS, self).setup()
mq_conn = 'abc'
task_queue = 'example-logs-mozdef'
es_connection = self.es_client
options = DotDict(
{
"esbulksize": 0,
"mozdefhostname": "unittest.hostname",
"taskexchange": task_queue,
'plugincheckfrequency': 120,
}
)
self.consumer = taskConsumer(mq_conn, task_queue, es_connection, options)
def search_and_verify_event(self, expected_event):
self.flush('events')
search_query = SearchQuery(minutes=5)
search_query.add_must(ExistsMatch('tags'))
results = search_query.execute(self.es_client)
assert len(results['hits']) == 1
saved_event = results['hits'][0]['_source']
self.verify_event(saved_event, expected_event)
def test_event1(self):
event = {
"Type": "Notification",
"MessageId": "abcdefg",
"TopicArn": "arn:aws:sns:us-west-2:123456789:example-logs-mozdef",
"Subject": "Fluentd-Notification",
"Message": "{\"time\":\"2017-05-25 07:14:15 +0000\",\"timestamp\":\"2017-05-25T07:14:15+00:00\",\"hostname\":\"abcdefghostname\",\"pname\":\"dhclient\",\"processid\":\"[123]\",\"type\":\"syslog\",\"logger\":\"systemslogs\",\"payload\":\"DHCPREQUEST of 1.2.3.4 on eth0 to 5.6.7.8 port 67 (xid=0x123456)\"}",
"Timestamp": "2017-05-25T07:14:16.103Z",
"SignatureVersion": "1",
"Signature": "examplesignatureabcd",
"SigningCertURL": "https://sns.us-west-2.amazonaws.com/SimpleNotificationService-12345.pem",
"UnsubscribeURL": "https://sns.us-west-2.amazonaws.com/?Action=Unsubscribe&SubscriptionArn=arn:aws:sns:us-west-2:123456789:example-logs-mozdef:adsf0laser013"
}
self.consumer.on_message(event)
expected_event = {
u'category': u'syslog',
u'details': {u'logger': u'systemslogs'},
u'hostname': u'abcdefghostname',
u'mozdefhostname': u'unittest.hostname',
u'processid': u'123',
u'processname': u'dhclient',
u'receivedtimestamp': u'2017-05-26T17:47:17.813876+00:00',
u'severity': u'INFO',
u'source': u'None',
u'summary': u'DHCPREQUEST of 1.2.3.4 on eth0 to 5.6.7.8 port 67 (xid=0x123456)',
u'tags': [u'example-logs-mozdef'],
u'timestamp': u'2017-05-25T07:14:15+00:00',
u'utctimestamp': u'2017-05-25T07:14:15+00:00'
}
self.search_and_verify_event(expected_event)
|
Python
| 0.000001
|
@@ -3090,12 +3090,15 @@
: u'
-None
+UNKNOWN
',%0A
|
07f39fbd3e068a01105f0c3a13523d9fbd78cc29
|
add tests for pathway results
|
tests/test_pathway_predictions.py
|
tests/test_pathway_predictions.py
|
# Copyright 2015 Novo Nordisk Foundation Center for Biosustainability, DTU.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, print_function
import os
import unittest
from cameo import load_model
from cameo.strain_design.pathway_prediction import PathwayPredictor
TESTDIR = os.path.dirname(__file__)
TESTMODEL = load_model(os.path.join(TESTDIR, 'data/EcoliCore.xml'))
UNIVERSALMODEL = load_model(os.path.join(TESTDIR, 'data/iJO1366.xml'))
TRAVIS = os.getenv('TRAVIS', False)
PATHWAYPREDICTOR = PathwayPredictor(TESTMODEL, universal_model=UNIVERSALMODEL)
class TestPathwayPredictor(unittest.TestCase):
def setUp(self):
self.pathway_predictor = PATHWAYPREDICTOR
def test_setting_incorrect_universal_model_raises(self):
with self.assertRaisesRegexp(ValueError, 'Provided universal_model.*'):
PathwayPredictor(TESTMODEL, universal_model='Mickey_Mouse')
# def test_predict_native_compound_returns_shorter_alternatives(self):
# result = self.pathway_predictor.run(product='Phosphoenolpyruvate', max_predictions=1)
# self.assertTrue(len(result.pathways) == 1)
# self.assertTrue(len(result.pathways[0].pathway) == 3)
# self.assertTrue(len(result.pathways[0].adapters) == 0)
def test_predict_non_native_compound(self):
result = self.pathway_predictor.run(product='L-Serine', max_predictions=1)
self.assertTrue(len(result.pathways) == 1)
self.assertTrue(len(result.pathways[0].reactions) == 3)
self.assertTrue(len(result.pathways[0].adapters) == 0)
|
Python
| 0
|
@@ -743,64 +743,289 @@
meo.
-strain_design.pathway_prediction import PathwayPredictor
+core.pathway import Pathway%0Afrom cameo.flux_analysis.analysis import PhenotypicPhasePlaneResult%0Afrom cameo.strain_design.pathway_prediction import PathwayPredictor%0Afrom cameo.strain_design.pathway_prediction.pathway_predictor import PathwayResult%0Afrom cameo.util import TimeMachine
%0A%0ATE
@@ -2130,24 +2130,66 @@
dictions=1)%0A
+ self.assertTrue(len(result) == 1)%0A
self
@@ -2334,28 +2334,1563 @@
pathways%5B0%5D.adapters) == 0)%0A
+%0A%0Aclass PathwayPredictionsTestCase(unittest.TestCase):%0A def setUp(self):%0A self.result = PATHWAYPREDICTOR.run(product='L-Serine', max_predictions=1)%0A%0A def test_pathway(self):%0A model = TESTMODEL.copy()%0A pathway = self.result%5B0%5D%0A biomass = 'Biomass_Ecoli_core_N_lp_w_fsh_GAM_rp__Nmet2'%0A self.assertIsInstance(pathway, PathwayResult)%0A self.assertIsInstance(pathway, Pathway)%0A self.assertIsInstance(pathway.production_envelope(model, objective=biomass), PhenotypicPhasePlaneResult)%0A self.assertTrue(pathway.needs_optimization(model, objective=biomass))%0A%0A def test_plug_model_without_time_machine(self):%0A model = TESTMODEL.copy()%0A self.result%5B0%5D.plug_model(model)%0A for reaction in self.result%5B0%5D.reactions:%0A self.assertIn(reaction, model.reactions)%0A%0A for reaction in self.result%5B0%5D.exchanges:%0A self.assertIn(reaction, model.reactions)%0A%0A for reaction in self.result%5B0%5D.adapters:%0A self.assertIn(reaction, model.reactions)%0A%0A def test_plug_model_with_time_machine(self):%0A model = TESTMODEL.copy()%0A with TimeMachine() as tm:%0A self.result%5B0%5D.plug_model(model, tm=tm)%0A%0A for reaction in self.result%5B0%5D.reactions:%0A self.assertNotIn(reaction, model.reactions)%0A%0A for reaction in self.result%5B0%5D.exchanges:%0A self.assertNotIn(reaction, model.reactions)%0A%0A for reaction in self.result%5B0%5D.adapters:%0A self.assertNotIn(reaction, model.reactions)
|
035f77f9f5db6088a9331e0d9beb0c393982fbe4
|
rename function plus refactor
|
web/impact/impact/v1/views/mentor_program_office_hour_list_view.py
|
web/impact/impact/v1/views/mentor_program_office_hour_list_view.py
|
# MIT License
# Copyright (c) 2019 MassChallenge, Inc.
from django.db.models import Value as V
from django.db.models.functions import Concat
from impact.v1.views.base_list_view import BaseListView
from impact.v1.helpers import (
MentorProgramOfficeHourHelper,
)
ID_FIELDS = ['mentor_id', 'finalist_id']
NAME_FIELDS = ['mentor_name', 'finalist_name']
class MentorProgramOfficeHourListView(BaseListView):
view_name = "office_hour"
helper_class = MentorProgramOfficeHourHelper
def filter(self, qs):
qs = super().filter(qs)
if not self.request.query_params.keys():
return qs
if self._has_participant_filter(NAME_FIELDS):
return self._filter_by_participant_names(qs)
if self._has_participant_filter(ID_FIELDS):
return self._filter_by_ids(qs)
user_name = self.request.query_params.get('user_name', None)
if user_name:
return self._filter_by_user_name(user_name, qs)
def _filter_by_id(self, id_field, qs):
value = self.request.query_params.get(id_field, None)
if value and value.isdigit():
return self.filter_by_field(id_field, qs)
return qs
def _filter_by_ids(self, qs):
qs = self._filter_by_id('mentor_id', qs)
qs = self._filter_by_id('finalist_id', qs)
return qs
def _filter_by_participant_names(self, qs):
qs = self._filter_by_participant_name('mentor_name', qs)
qs = self._filter_by_participant_name('finalist_name', qs)
return qs
def _filter_by_participant_name(self, name_field, qs):
value = self.request.query_params.get(name_field, None)
user_type = name_field.replace('_name', '')
if value:
return self._filter_by_full_name(qs, user_type, value)
return qs
def _filter_by_user_name(self, user_name, qs):
mentor_qs = self._filter_by_full_name(qs, 'mentor', user_name)
finalist_qs = self._filter_by_full_name(qs, 'finalist', user_name)
return mentor_qs | finalist_qs
def _filter_by_full_name(self, qs, user, name_value):
first_name_field = '{}__first_name'.format(user)
last_name_field = '{}__last_name'.format(user)
result = qs.annotate(
full_name=Concat(
first_name_field, V(' '), last_name_field)).filter(
full_name__icontains=name_value)
return result
def _has_participant_filter(self, fields):
return any(
field in self.request.query_params.keys() for field in fields)
|
Python
| 0.000001
|
@@ -631,34 +631,41 @@
f self._has_
-participan
+mentor_or_finalis
t_filter(NAM
@@ -706,34 +706,41 @@
._filter_by_
-participan
+mentor_or_finalis
t_names(qs)%0A
@@ -757,34 +757,41 @@
f self._has_
-participan
+mentor_or_finalis
t_filter(ID_
@@ -847,103 +847,8 @@
s)%0A%0A
- user_name = self.request.query_params.get('user_name', None)%0A if user_name:%0A
@@ -884,27 +884,16 @@
er_name(
-user_name,
qs)%0A%0A
@@ -1273,34 +1273,41 @@
_filter_by_
-participan
+mentor_or_finalis
t_names(self
@@ -1338,34 +1338,41 @@
._filter_by_
-participan
+mentor_or_finalis
t_name('ment
@@ -1410,34 +1410,41 @@
._filter_by_
-participan
+mentor_or_finalis
t_name('fina
@@ -1497,26 +1497,33 @@
lter_by_
-participan
+mentor_or_finalis
t_name(s
@@ -1798,16 +1798,228 @@
me(self,
+ qs):%0A user_name = self.request.query_params.get('user_name', None)%0A if user_name:%0A return self._filter_by_user_names(user_name, qs)%0A return qs%0A%0A def _filter_by_user_names(self,
user_na
@@ -2604,18 +2604,25 @@
has_
-participan
+mentor_or_finalis
t_fi
|
c5c509ff9e2c4599fcf51044abc9e7cbe4a152e1
|
remove redundant method [skip ci]
|
custom/enikshay/management/commands/base_model_reconciliation.py
|
custom/enikshay/management/commands/base_model_reconciliation.py
|
import csv
from datetime import datetime
from django.core.management.base import BaseCommand, CommandError
from django.core.mail import EmailMessage
from django.conf import settings
from custom.enikshay.const import ENROLLED_IN_PRIVATE
class BaseModelReconciliationCommand(BaseCommand):
email_subject = None
result_file_name_prefix = None
result_file_headers = None
def __init__(self, *args, **kwargs):
super(BaseModelReconciliationCommand, self).__init__(*args, **kwargs)
self.commit = False
self.recipient = None
self.result_file_name = None
def add_arguments(self, parser):
parser.add_argument('--commit', action='store_true')
parser.add_argument('--recipient', type=str)
def handle(self, *args, **options):
raise CommandError(
"This is the base reconciliation class and should not be run. "
"One of it's inherited commands should be run.")
def public_app_case(self, person_case):
if person_case.get_case_property(ENROLLED_IN_PRIVATE) == 'true':
return False
return True
@staticmethod
def get_result_file_headers():
raise NotImplementedError
def email_report(self):
csv_file = open(self.result_file_name)
email = EmailMessage(
subject=self.email_subject,
body="Please find attached report for a %s run finished at %s." %
('real' if self.commit else 'mock', datetime.now()),
to=self.recipient,
from_email=settings.DEFAULT_FROM_EMAIL
)
email.attach(filename=self.result_file_name, content=csv_file.read())
csv_file.close()
email.send()
def setup_result_file(self):
file_name = "{file_name_prefix}_{timestamp}.csv".format(
file_name_prefix=self.result_file_name_prefix,
timestamp=datetime.now().strftime("%Y-%m-%d-%H-%M-%S"),
)
with open(file_name, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=self.get_result_file_headers)
writer.writeheader()
return file_name
def writerow(self, row):
with open(self.result_file_name, 'a') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=self.get_result_file_headers)
writer.writerow(row)
|
Python
| 0.000003
|
@@ -1117,96 +1117,8 @@
ue%0A%0A
- @staticmethod%0A def get_result_file_headers():%0A raise NotImplementedError%0A%0A
@@ -1957,36 +1957,32 @@
fieldnames=self.
-get_
result_file_head
@@ -2193,20 +2193,16 @@
es=self.
-get_
result_f
|
550e37d703fcc4200dfaf03e483b009d88b02276
|
bump version number
|
scrubadub/__init__.py
|
scrubadub/__init__.py
|
from typing import Union, List, Dict, Sequence, Optional
# convenient imports
from .scrubbers import Scrubber
from . import filth
from . import detectors
from . import post_processors
from .filth import Filth
__version__ = VERSION = "2.0.0.rc0"
__all__ = [
'Scrubber', 'filth', 'detectors', 'post_processors', 'clean', 'clean_documents', 'list_filth',
'list_filth_documents',
]
def clean(text: str, locale: Optional[str] = None, **kwargs) -> str:
"""Seaches for ``Filth`` in `text` in a string and replaces it with placeholders.
.. code:: pycon
>>> import scrubadub
>>> scrubadub.clean(u"contact me at joe@example.com")
'contact me at {{EMAIL}}'
:param text: The text containing possible PII that needs to be redacted
:type text: `str`
:param locale: The locale of the documents in the format: 2 letter lower-case language code followed by an
underscore and the two letter upper-case country code, eg "en_GB" or "de_CH"
:type locale: str
:return: Text with all :class:``Filth`` replaced.
:rtype: `str`
"""
scrubber = Scrubber(locale=locale)
return scrubber.clean(text, **kwargs)
def clean_documents(documents: Union[Sequence[str], Dict[Optional[str], str]], locale: Optional[str] = None, **kwargs
) -> Union[Sequence[str], Dict[Optional[str], str]]:
"""Seaches for ``Filth`` in `documents` and replaces it with placeholders.
`documents` can be in a dict, in the format of ``{'document_name': 'document'}``, or as a list of strings
(each a seperate document).
This can be useful when processing many documents.
.. code:: pycon
>>> import scrubadub
>>> scrubadub.clean_documents({'contact.txt': "contact me at joe@example.com",
... 'hello.txt': 'hello world!'})
{'contact.txt': 'contact me at {{EMAIL}}', 'hello.txt': 'hello world!'}
>>> scrubadub.clean_documents(["contact me at joe@example.com", 'hello world!'])
['contact me at {{EMAIL}}', 'hello world!']
:param documents: Documents containing possible PII that needs to be redacted in the form of a list of documents
or a dictonary with the key as the document name and the value as the document text
:type documents: `list` of `str` objects, `dict` of `str` objects
:param locale: The locale of the documents in the format: 2 letter lower-case language code followed by an
underscore and the two letter upper-case country code, eg "en_GB" or "de_CH"
:type locale: str
:return: Documents in the same format as input, but with `Filth` redacted
:rtype: `list` of `str` objects, `dict` of `str` objects; same as input
"""
scrubber = Scrubber(locale=locale)
return scrubber.clean_documents(documents, **kwargs)
def list_filth(text: str, locale: Optional[str] = None, **kwargs) -> List[Filth]:
"""Return a list of ``Filth`` that was detected in the string `text`.
.. code:: pycon
>>> import scrubadub
>>> scrubadub.list_filth(u"contact me at joe@example.com")
[<EmailFilth text='joe@example.com' beg=14 end=29 detector_name='email' locale='en_US'>]
:param text: The text containing possible PII that needs to be found
:type text: `str`
:param locale: The locale of the documents in the format: 2 letter lower-case language code followed by an
underscore and the two letter upper-case country code, eg "en_GB" or "de_CH"
:type locale: str
:return: A list of all the :class:``Filth`` objects that were found
:rtype: `list` of :class:``Filth`` objects
"""
scrubber = Scrubber(locale=locale)
return list(scrubber.iter_filth(text, **kwargs))
def list_filth_documents(documents: Union[List[str], Dict[Optional[str], str]], locale: Optional[str] = None,
**kwargs) -> List[Filth]:
"""Return a list of ``Filth`` that was detected in the string `text`.
`documents` can be in a dict, in the format of ``{'document_name': 'document'}``, or as a list of strings
(each a seperate document).
This can be useful when processing many documents.
.. code:: pycon
>>> import scrubadub
>>> scrubadub.list_filth_documents(
... {'contact.txt': "contact me at joe@example.com", 'hello.txt': 'hello world!'}
... )
[<EmailFilth text='joe@example.com' document_name='contact.txt' beg=14 end=29 detector_name='email' \
locale='en_US'>]
>>> scrubadub.list_filth_documents(["contact me at joe@example.com", 'hello world!'])
[<EmailFilth text='joe@example.com' document_name='0' beg=14 end=29 detector_name='email' locale='en_US'>]
:param documents: Documents containing possible PII that needs to be found in the form of a list of documents
or a dictonary with the key as the document name and the value as the document text
:type documents: `list` of `str` objects, `dict` of `str` objects
:param locale: The locale of the documents in the format: 2 letter lower-case language code followed by an
underscore and the two letter upper-case country code, eg "en_GB" or "de_CH"
:type locale: str
:return: A list of all the :class:``Filth`` objects that were found
:rtype: `list` of :class:``Filth`` objects
"""
scrubber = Scrubber(locale=locale)
return list(scrubber.iter_filth_documents(documents, **kwargs))
|
Python
| 0.000004
|
@@ -238,17 +238,17 @@
2.0.0.rc
-0
+1
%22%0A__all_
|
3074fed515d69af505e41e365738f22764f00695
|
Configure main chatroom to be open all day
|
ditto/scripts/setup_test_data.py
|
ditto/scripts/setup_test_data.py
|
"""Script to set up test data for a Kvoti instance.
As before we tried to do this with migrations but ran into problems
early on with custom permissions not being created.
In any case, it's probably easier/better to have a single bootstrap
script instead of a bunch of data migrations.
"""
import os
from django.conf import settings
from django.contrib.auth.models import Group, Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
import casenotes.models
import chat.models
import configuration.models
import core
import dittoforms.models
import multitenancy.models
import multitenancy.tenant
from users.models import User
INTERACTIONS = ["Messaging"]
REG_FORM_SPEC = '[{"name":"Name","on":true,"fields":[{"name":"First name"},{"name":"Last name"}]},{"name":"Gender","on":true,"options":["Male","Female","Other"]},{"name":"Ethnicity","on":true,"options":["White British","Other"]},{"name":"How did you hear about us?","on":true,"multiple":true,"options":["Internet search","Magazine","Other"]}]'
def run():
setup_guest_passwords()
setup_site()
setup_features()
setup_default_roles()
setup_permissions()
setup_interactions()
setup_admin_users()
setup_members()
setup_tenants()
setup_reg_form()
setup_chat_conf()
setup_case_notes()
def setup_guest_passwords():
global GUEST_PASSWORDS
if 'GUEST_PASSWORDS' in os.environ:
GUEST_PASSWORDS = os.environ['GUEST_PASSWORDS'].split()
else:
GUEST_PASSWORDS = None
def setup_site(name='KVOTI.TECHNOLOGY'):
site = Site.objects.get_current()
site.name = name
domain = 'localhost:8000' if settings.DEBUG else site.name.lower()
site.domain = domain
site.save()
def setup_features():
for slug, name, perms in (
('chatroom', 'Chatroom', [
('can_chat', 'Can chat'),
('create_chatroom', 'Can create chatroom')
]),
):
feature, _ = configuration.models.Feature.objects.get_or_create(
slug=slug, name=name)
content_type = ContentType.objects.get_for_model(configuration.models.Feature)
for codename, name in perms:
perm, _ = Permission.objects.get_or_create(
codename=codename,
content_type=content_type)
perm.name = name
perm.save()
feature.permissions.add(perm)
def setup_default_roles():
for group in core.DEFAULT_ROLES:
group, _ = Group.objects.get_or_create(name=group)
def setup_permissions():
content_type = ContentType.objects.get_for_model(User)
perm, _ = Permission.objects.get_or_create(
codename='can_admin',
content_type=content_type)
perm.name = 'Can administer'
perm.save()
Group.objects.get(name=core.ADMIN_ROLE).permissions.add(perm)
perm = Permission.objects.get(codename='invite_user')
Group.objects.get(name=core.ADMIN_ROLE).permissions.add(perm)
perm = Permission.objects.get(codename='create_chatroom')
Group.objects.get(name=core.ADMIN_ROLE).permissions.add(perm)
perm = Permission.objects.get(codename='guest')
Group.objects.get(name=core.GUEST_ROLE).permissions.add(perm)
perm = Permission.objects.get(codename='can_chat')
for group in Group.objects.all():
group.permissions.add(perm)
def setup_interactions():
for interaction in INTERACTIONS:
configuration.models.Interaction.objects.get_or_create(name=interaction)
def setup_admin_users():
_create_user('admin', core.ADMIN_ROLE)
_create_user('visitor', core.ADMIN_ROLE)
def setup_members():
for name in ['mark', 'sarah', 'ross', 'emma']:
_create_user(name, core.MEMBER_ROLE)
def _create_user(username, group_name):
user, created = User.objects.get_or_create(username=username)
user.emailaddress_set.get_or_create(
verified=1,
defaults={'email': '%s@example.com' % username})
if created:
if 'GUEST_PASSWORDS' in os.environ:
password = GUEST_PASSWORDS.pop()
else:
password = 'x'
user.set_password(password)
user.save()
user.groups.add(Group.objects.get(name=group_name))
def setup_tenants():
user = User.objects.get(username='mark')
multitenancy.models.Tenant.objects.create(
user=user,
network_name='Kvoti',
slug='di',
is_configured=True,
)
if not multitenancy.tenant.is_main():
setup_site(name='Kvoti')
def setup_reg_form():
for role in Group.objects.all():
form = dittoforms.models.FormSpec.objects.create(
slug='reg',
spec=REG_FORM_SPEC
)
configuration.models.RegForm.objects.create(
role=role,
form=form
)
def setup_chat_conf():
room = chat.models.Room.objects.create(
slug='main',
name='Main chatroom',
is_regular=True
)
chat.models.Slot.objects.create(
room=room,
day=chat.models.Slot.Monday,
start=8,
end=22,
)
def setup_case_notes():
client = User.objects.get(username="mark")
author = User.objects.get(username="sarah")
for i in range(1, 5):
casenotes.models.CaseNote.objects.create(
author=author,
client=client,
text="Case note %s" % i
)
|
Python
| 0
|
@@ -5004,24 +5004,53 @@
=True%0A )%0A
+ for day in range(7):%0A
chat.mod
@@ -5078,24 +5078,28 @@
te(%0A
+
room=room,%0A
@@ -5097,16 +5097,20 @@
m=room,%0A
+
@@ -5117,33 +5117,17 @@
day=
-chat.models.Slot.Mon
day,%0A
+
@@ -5151,16 +5151,24 @@
-end=22,%0A
+ end=18,%0A
|
8f90b8cd67b6bca0c8c2123c229b18bd0ee078d8
|
Implement FlatfileCommentProvider._load_one.
|
firmant/plugins/datasource/flatfile/comments.py
|
firmant/plugins/datasource/flatfile/comments.py
|
import datetime
import pytz
import os
import re
from firmant.utils import not_implemented
comment_re = r'(?P<year>\d{4}),(?P<month>\d{2}),(?P<day>\d{2}),(?P<slug>.+)' +\
r',(?P<created>[1-9][0-9]*),(?P<id>[0-9a-f]{40})'
comment_re = re.compile(comment_re)
class FlatfileCommentProvider(object):
def __init__(self, rc, settings):
self.settings = settings
def for_entry(self, status, slug, year, month, day):
not_implemented()
|
Python
| 0
|
@@ -84,16 +84,64 @@
emented%0A
+from firmant.datasource.comments import Comment%0A
%0A%0Acommen
@@ -501,8 +501,1486 @@
ented()%0A
+%0A def _load_one(self, status, entry_pkey, created, id):%0A comment_dt = entry_pkey%5B0%5D.strftime('%25Y,%25m,%25d')%0A comment_filename = '%25s,%25s,%25i,%25s' %25 %5C%0A (comment_dt, entry_pkey%5B1%5D, created, id)%0A comment_path = os.path.join(self.settings%5B'FLATFILE_BASE'%5D,%0A 'comments',%0A status,%0A comment_filename)%0A%0A if not os.access(comment_path, os.R_OK):%0A return None%0A%0A f = open(comment_path)%0A d = f.read()%0A f.close()%0A%0A headers, content = d.split('%5Cn%5Cn', 1)%0A%0A comment = Comment()%0A for header in headers.split('%5Cn'):%0A if header.startswith('Name:%5Ct'):%0A comment.name = header%5B6:%5D%0A elif header.startswith('Email:%5Ct'):%0A comment.email = header%5B7:%5D%0A elif header.startswith('URL:%5Ct'):%0A comment.url = header%5B5:%5D%0A elif header.startswith('Host:%5Ct'):%0A comment.ip = header%5B6:%5D%0A elif header.startswith('Agent:%5Ct'):%0A comment.useragent = header%5B7:%5D%0A%0A if content.endswith('%5Cn'):%0A content = content%5B:-1%5D%0A comment.content = content%0A%0A comment.status = status%0A comment.entry_pkey = entry_pkey%0A%0A created_dt = datetime.datetime.fromtimestamp(created)%0A comment.created = pytz.utc.localize(created_dt)%0A%0A return comment%0A
|
68ecbb59c856a20f8f00cae47f1075086da982c7
|
Add bitmask imports to nddata.__init__.py
|
astropy/nddata/__init__.py
|
astropy/nddata/__init__.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
The `astropy.nddata` subpackage provides the `~astropy.nddata.NDData`
class and related tools to manage n-dimensional array-based data (e.g.
CCD images, IFU Data, grid-based simulation data, ...). This is more than
just `numpy.ndarray` objects, because it provides metadata that cannot
be easily provided by a single array.
"""
from .nddata import *
from .nddata_base import *
from .nddata_withmixins import *
from .nduncertainty import *
from .flag_collection import *
from .decorators import *
from .mixins.ndarithmetic import *
from .mixins.ndslicing import *
from .mixins.ndio import *
from .compat import *
from .utils import *
from .ccddata import *
from .. import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `astropy.nddata`.
"""
warn_unsupported_correlated = _config.ConfigItem(
True,
'Whether to issue a warning if `~astropy.nddata.NDData` arithmetic '
'is performed with uncertainties and the uncertainties do not '
'support the propagation of correlated uncertainties.'
)
warn_setting_unit_directly = _config.ConfigItem(
True,
'Whether to issue a warning when the `~astropy.nddata.NDData` unit '
'attribute is changed from a non-``None`` value to another value '
'that data values/uncertainties are not scaled with the unit change.'
)
conf = Conf()
|
Python
| 0.000015
|
@@ -713,32 +713,55 @@
ccddata import *
+%0Afrom .bitmask import *
%0A%0Afrom .. import
|
f2a9529165d1ab97f9d6f5efef6f67811462dafd
|
fix pylint issues
|
sdcm/utils/latency.py
|
sdcm/utils/latency.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2020 ScyllaDB
from sdcm.db_stats import PrometheusDBStats
def avg(values):
return sum(values)/len(values)
def collect_latency(monitor_node, start, end, load_type, cluster, nodes_list):
res = dict()
prometheus = PrometheusDBStats(host=monitor_node.external_address)
duration = int(end - start)
cassandra_stress_precision = ['99', '95'] # in the future should include also 'max'
scylla_precision = ['99'] # in the future should include also '95', '5'
for precision in cassandra_stress_precision:
metric = f'c-s {precision}' if precision == 'max' else f'c-s P{precision}'
if not precision == 'max':
precision = f'perc_{precision}'
query = f'collectd_cassandra_stress_{load_type}_gauge{{type="lat_{precision}"}}'
query_res = prometheus.query(query, start, end)
latency_values_lst = list()
max_latency_values_lst = list()
for entry in query_res:
if not entry['values']:
continue
sequence = [float(val[-1]) for val in entry['values'] if not val[-1].lower() == 'nan']
if not sequence or all([val == sequence[0] for val in sequence]):
continue
latency_values_lst.extend(sequence)
max_latency_values_lst.extend(sequence)
if latency_values_lst:
res[metric] = format(avg(latency_values_lst), '.2f')
if max_latency_values_lst:
res[f'{metric} max'] = format(max(max_latency_values_lst), '.2f')
if load_type == 'mixed':
load_type = ['read', 'write']
else:
load_type = [load_type]
for load in load_type:
for precision in scylla_precision:
query = f'histogram_quantile(0.{precision},sum(rate(scylla_storage_proxy_coordinator_{load}_' \
f'latency_bucket{{}}[{duration}s])) by (instance, le))'
query_res = prometheus.query(query, start, end)
for entry in query_res:
node_ip = entry['metric']['instance'].replace('[', '').replace(']', '')
node = cluster.get_node_by_ip(node_ip)
if not node:
for db_node in nodes_list:
if db_node.ip_address == node_ip:
node = db_node
if node:
node_idx = node.name.split('-')[-1]
else:
continue
node_name = f'node-{node_idx}'
metric = f"Scylla P{precision}_{load} - {node_name}"
if not entry['values']:
continue
sequence = [float(val[-1]) for val in entry['values'] if not val[-1].lower() == 'nan']
if sequence:
res[metric] = format(avg(sequence) / 1000, '.2f')
return res
def calculate_latency(latency_results):
result_dict = dict()
all_keys = list(latency_results.keys())
steady_key = ''
if all_keys:
steady_key = [key for key in all_keys if 'steady' in key.lower()]
if not steady_key or not all_keys:
return latency_results
else:
steady_key = all_keys.pop(all_keys.index(steady_key[0]))
result_dict[steady_key] = latency_results[steady_key].copy()
for key in all_keys:
result_dict[key] = latency_results[key].copy()
temp_dict = dict()
for cycle in latency_results[key]['cycles']:
for metric, value in cycle.items():
if metric not in temp_dict:
temp_dict[metric] = list()
temp_dict[metric].append(value)
for temp_key, temp_val in temp_dict.items():
if 'Cycles Average' not in result_dict[key]:
result_dict[key]['Cycles Average'] = dict()
average = format(avg([float(val) for val in temp_val]), '.2f')
result_dict[key]['Cycles Average'][temp_key] = float(f'{average}')
if 'Relative to Steady' not in result_dict[key]:
result_dict[key]['Relative to Steady'] = dict()
if temp_key in latency_results[steady_key]:
steady_val = float(latency_results[steady_key][temp_key])
if steady_val != 0:
result_dict[key]['Relative to Steady'][temp_key] = \
format((float(average) - steady_val), '.2f')
if 'color' not in result_dict[key]:
result_dict[key]['color'] = {}
if float(average) >= 3 * steady_val: # right now it is only a 10% difference, to test if it works
result_dict[key]['color'][temp_key] = 'red'
else:
result_dict[key]['color'][temp_key] = 'blue'
return result_dict
|
Python
| 0.000002
|
@@ -603,16 +603,110 @@
lues)%0A%0A%0A
+# pylint: disable=too-many-arguments,too-many-locals,too-many-nested-blocks,too-many-branches%0A
def coll
@@ -1722,17 +1722,16 @@
or all(
-%5B
val == s
@@ -1760,17 +1760,16 @@
sequence
-%5D
):%0A
|
a6883813694b0c41a271b51af4ecca74351690f2
|
Fix smoothstreamer component test
|
flumotion/test/test_component_smoothstreamer.py
|
flumotion/test/test_component_smoothstreamer.py
|
# -*- Mode: Python; test-case-name: flumotion.test.test_resource -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2009,2010 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# flumotion-fragmented-streaming - Flumotion Advanced fragmented streaming
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import setup
setup.setup()
from twisted.trial import unittest
try:
from twisted.web import client
except ImportError:
from twisted.protocols import client
from flumotion.common import log, testsuite, netutils
from flumotion.common.planet import moods
from flumotion.test import comptest
from flumotion.component.consumers.smoothstreamer.smoothstreamer \
import SmoothHTTPLiveStreamer
attr = testsuite.attr
CONFIG = {
'feed': [],
'name': 'smooth-streamer',
'parent': 'default',
'eater': {'default': [('muxer-video:default', 'default')]},
'source': ['muxer-video:default'],
'avatarId': '/default/smooth-streamer',
'clock-master': None,
'plugs': {
'flumotion.component.plugs.streamdata.StreamDataProviderPlug': [],
'flumotion.component.plugs.request.RequestLoggerPlug': [],
},
'type': 'http-smoothstreamer',
}
class SmoothStreamerTestCase(comptest.CompTestTestCase, log.Loggable):
properties = {}
config = CONFIG
def setUp(self):
config = self.getConfig()
config['properties'] = self.properties.copy()
self.component = SmoothHTTPLiveStreamer(config)
def tearDown(self):
return self.component.stop()
def getConfig(self):
# test classes can override this to change/extend config
return self.config.copy()
# test based on test_component_httpstreamer.py
# (FIXME: write base test class for all http-streamer)
class TestSmoothStreamerNoPlug(SmoothStreamerTestCase):
def testGetUrlIsManifest(self):
self.failUnless(self.component.getUrl().endswith("Manifest"))
class TestSmoothStreamerDataPlug(SmoothStreamerTestCase):
def getConfig(self):
config = CONFIG.copy()
sType = 'flumotion.component.plugs.streamdata.StreamDataProviderPlug'
pType = 'streamdataprovider-example'
config['plugs'] = {sType: [
{
'entries': {
'default': {
'module-name': 'flumotion.component.plugs.streamdata',
'function-name': 'StreamDataProviderExamplePlug',
}
}
},
]}
return config
def testGetStreamData(self):
streamData = self.component.getStreamData()
self.assertEquals(streamData['protocol'], 'HTTP')
self.assertEquals(streamData['description'], 'Flumotion Stream')
self.failUnless(streamData['url'].startswith('http://'))
# plug is started before component can do getUrl
testGetStreamData.skip = 'See #1137'
class TestSmoothStreamer(comptest.CompTestTestCase, log.Loggable):
slow = True # and ugly...
def setUp(self):
self.tp = comptest.ComponentTestHelper()
prod = ('videotestsrc is-live=1 ! ' \
'video/x-raw-yuv,width=(int)320,height=(int)240, '\
'framerate=(fraction)30/1 ! ' \
'flumch264enc max-keyframe-distance=15 ' \
'min-keyframe-distance=15 bitrate=400000 ! '\
'ismlmux fragment-duration=500 ' \
'trak-timescale=10000000 movie-timescale=10000000')
self.s = \
'flumotion.component.consumers.smoothstreamer.'\
'SmoothHTTPLiveStreamer'
self.prod = comptest.pipeline_src(prod)
def tearDown(self):
d = comptest.delayed_d(1, None)
d.addCallback(comptest.cleanup_reactor)
return d
def _getFreePort(self):
while True:
port = netutils.tryPort()
if port is not None:
break
return port
def _initComp(self):
self.compWrapper =\
comptest.ComponentWrapper('http-smoothstreamer',
SmoothHTTPLiveStreamer,
name='smooth-streamer',
props={'mount-point': 'mytest',
'port': self._getFreePort()})
self.tp.set_flow([self.prod, self.compWrapper])
d = self.tp.start_flow()
d.addCallback(lambda _:
self.__setattr__('comp', self.compWrapper.comp))
# wait for the converter to go happy
d.addCallback(lambda _: self.compWrapper.wait_for_mood(moods.happy))
return d
def getURL(self, path):
# path should start with /
return 'http://localhost:%d%s' % (self.compWrapper.comp.port, path)
def testManifestAndFragment(self):
d = self._initComp()
d.addCallback(lambda _: comptest.delayed_d(2.0, _))
def check_manifest(r):
from xml.dom.minidom import parseString
media = parseString(r)
c = media.getElementsByTagName("SmoothStreamingMedia")[0]\
.getElementsByTagName("StreamIndex")[0]\
.getElementsByTagName("c")
# in 2 seconds after being happy we should really have at
# least 2 fragments (1 second of encoded data more)
self.failIf(len(c) < 2)
# return last known timestamp
return int(c[-1].getAttribute("t"))
d.addCallback(lambda _: \
client.getPage(self.getURL('/mytest/Manifest')))
d.addCallback(check_manifest)
def check_fragment(f):
# make sure we get at least 1k of encoded video fragment..
self.failIf(len(f) < 1000)
# and that we got a moof
self.assertEqual(f[4:8], "moof")
url = '/mytest/QualityLevels(400000)/Fragments(video=%d)'
d.addCallback(lambda t: client.getPage(
self.getURL(url % t)))
d.addCallback(check_fragment)
# and finally stop the flow
# d.addCallback(lambda _: self.tp.stop_flow())
return d
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000001
|
@@ -831,16 +831,27 @@
netutils
+, gstreamer
%0Afrom fl
@@ -3322,32 +3322,234 @@
ef setUp(self):%0A
+ if not gstreamer.element_factory_exists('keyunitscheduler'):%0A from flumotion.component.effects.kuscheduler %5C%0A import kuscheduler%0A kuscheduler.register()%0A
self.tp
@@ -3768,110 +3768,51 @@
'
-flumch264enc max-keyframe-distance=15 ' %5C%0A 'min-keyframe-distance=15 bitrate=4
+keyunitsscheduler interval = 10000
00000 !
-
'
+
%5C%0A
@@ -3830,37 +3830,30 @@
'
-ismlmux fragment-duration=500
+flumch264enc ! ismlmux
' %5C
|
f7e279777995a716dfdee01cc37aed5e94eafa09
|
ADD multilabel support
|
ParamSklearn/components/classification/liblinear_svc.py
|
ParamSklearn/components/classification/liblinear_svc.py
|
import sklearn.svm
from HPOlibConfigSpace.configuration_space import ConfigurationSpace
from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \
CategoricalHyperparameter, Constant
from HPOlibConfigSpace.forbidden import ForbiddenEqualsClause, \
ForbiddenAndConjunction
from ParamSklearn.components.base import ParamSklearnClassificationAlgorithm
from ParamSklearn.implementations.util import softmax
from ParamSklearn.constants import *
class LibLinear_SVC(ParamSklearnClassificationAlgorithm):
# Liblinear is not deterministic as it uses a RNG inside
def __init__(self, penalty, loss, dual, tol, C, multi_class,
fit_intercept, intercept_scaling, class_weight=None,
random_state=None):
self.penalty = penalty
self.loss = loss
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.estimator = None
def fit(self, X, Y):
self.C = float(self.C)
self.tol = float(self.tol)
self.dual = self.dual == 'True'
self.fit_intercept = self.fit_intercept == 'True'
self.intercept_scaling = float(self.intercept_scaling)
if self.class_weight == "None":
self.class_weight = None
self.estimator = sklearn.svm.LinearSVC(penalty=self.penalty,
loss=self.loss,
dual=self.dual,
tol=self.tol,
C=self.C,
class_weight=self.class_weight,
fit_intercept=self.fit_intercept,
intercept_scaling=self.intercept_scaling,
multi_class=self.multi_class,
random_state=self.random_state)
self.estimator.fit(X, Y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
df = self.estimator.decision_function(X)
return softmax(df)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'Liblinear-SVC',
'name': 'Liblinear Support Vector Classification',
'handles_missing_values': False,
'handles_nominal_values': False,
'handles_numerical_features': True,
'prefers_data_scaled': True,
# Find out if this is good because of sparsity
'prefers_data_normalized': False,
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': False,
'is_deterministic': False,
'handles_sparse': True,
'input': (SPARSE, DENSE, UNSIGNED_DATA),
'output': (PREDICTIONS,),
'preferred_dtype': None}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None):
cs = ConfigurationSpace()
penalty = cs.add_hyperparameter(CategoricalHyperparameter(
"penalty", ["l1", "l2"], default="l2"))
loss = cs.add_hyperparameter(CategoricalHyperparameter(
"loss", ["hinge", "squared_hinge"], default="squared_hinge"))
dual = cs.add_hyperparameter(Constant("dual", "False"))
# This is set ad-hoc
tol = cs.add_hyperparameter(UniformFloatHyperparameter(
"tol", 1e-5, 1e-1, default=1e-4, log=True))
C = cs.add_hyperparameter(UniformFloatHyperparameter(
"C", 0.03125, 32768, log=True, default=1.0))
multi_class = cs.add_hyperparameter(Constant("multi_class", "ovr"))
# These are set ad-hoc
fit_intercept = cs.add_hyperparameter(Constant("fit_intercept", "True"))
intercept_scaling = cs.add_hyperparameter(Constant(
"intercept_scaling", 1))
penalty_and_loss = ForbiddenAndConjunction(
ForbiddenEqualsClause(penalty, "l1"),
ForbiddenEqualsClause(loss, "hinge")
)
constant_penalty_and_loss = ForbiddenAndConjunction(
ForbiddenEqualsClause(dual, "False"),
ForbiddenEqualsClause(penalty, "l2"),
ForbiddenEqualsClause(loss, "hinge")
)
penalty_and_dual = ForbiddenAndConjunction(
ForbiddenEqualsClause(dual, "False"),
ForbiddenEqualsClause(penalty, "l1")
)
cs.add_forbidden_clause(penalty_and_loss)
cs.add_forbidden_clause(constant_penalty_and_loss)
cs.add_forbidden_clause(penalty_and_dual)
return cs
|
Python
| 0
|
@@ -11,16 +11,42 @@
earn.svm
+%0Aimport sklearn.multiclass
%0A%0Afrom H
@@ -1480,37 +1480,32 @@
= None%0A%0A
-self.
estimator = skle
@@ -1544,21 +1544,16 @@
enalty,%0A
-
@@ -1644,21 +1644,16 @@
-
dual=sel
@@ -1702,21 +1702,16 @@
-
-
tol=self
@@ -1716,21 +1716,16 @@
lf.tol,%0A
-
@@ -1810,21 +1810,16 @@
-
class_we
@@ -1876,37 +1876,32 @@
-
-
fit_intercept=se
@@ -1918,21 +1918,16 @@
ercept,%0A
-
@@ -2044,21 +2044,16 @@
-
multi_cl
@@ -2108,37 +2108,32 @@
-
-
random_state=sel
@@ -2148,16 +2148,209 @@
_state)%0A
+%0A if len(Y.shape) == 2 and Y.shape%5B1%5D %3E 1:%0A self.estimator = sklearn.multiclass.OneVsRestClassifier(estimator, n_jobs=1)%0A else:%0A self.estimator = estimator%0A%0A
@@ -3386,20 +3386,19 @@
label':
-Fals
+Tru
e,%0A
|
1156be60da01ee34230dcf5e9e993e72fbe7b635
|
make linter happy
|
test_project/test_project/urls.py
|
test_project/test_project/urls.py
|
"""test_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import include
from django.contrib import admin
try:
from django.urls import re_path # Django >= 4.0
except ImportError:
try:
from django.conf.urls import re_path # Django < 4.0
except ImportError: # Django < 2.0
from django.conf.urls import url as re_path
from core.views import completion_demo
urlpatterns = [
re_path(r'^admin/', admin.site.urls),
re_path(r'^$', completion_demo),
]
if settings.DEBUG and settings.DJDT:
import debug_toolbar
urlpatterns = [
re_path(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
Python
| 0.000001
|
@@ -984,17 +984,16 @@
e_path%0A%0A
-%0A
from cor
|
e716a71bad4e02410e2a0908d630abbee1d4c691
|
Revert the removal of an unused import (in [14175]) that was referenced in documentation. Thanks for noticing, clong.
|
django/contrib/admin/__init__.py
|
django/contrib/admin/__init__.py
|
from django.contrib.admin.options import ModelAdmin, HORIZONTAL, VERTICAL
from django.contrib.admin.options import StackedInline, TabularInline
from django.contrib.admin.sites import AdminSite, site
def autodiscover():
"""
Auto-discover INSTALLED_APPS admin.py modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
"""
import copy
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's admin module.
try:
before_import_registry = copy.copy(site._registry)
import_module('%s.admin' % app)
except:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
# (see #8245).
site._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have an admin module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'admin'):
raise
|
Python
| 0.000002
|
@@ -1,28 +1,207 @@
+# ACTION_CHECKBOX_NAME is unused, but should stay since its import from here%0A# has been referenced in documentation.%0Afrom django.contrib.admin.helpers import ACTION_CHECKBOX_NAME%0A
from django.contrib.admin.op
|
e244cbd6052ebae8bf966bbcd60b54375d16c5df
|
Fix flake8 #2
|
mongodb_consistent_backup/Common/Config.py
|
mongodb_consistent_backup/Common/Config.py
|
import json
import mongodb_consistent_backup
import sys
from datetime import datetime
from argparse import Action
from pkgutil import walk_packages
from yconf import BaseConfiguration
from yconf.util import NestedDict
def parse_config_bool(item):
try:
if isinstance(item, bool):
return item
elif isinstance(item, str):
if item.rstrip().lower() is "true":
return True
return False
except Exception:
return False
class PrintVersions(Action):
def __init__(self, option_strings, dest, nargs=0, **kwargs):
super(PrintVersions, self).__init__(option_strings=option_strings, dest=dest, nargs=nargs, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
print("%s version: %s, git commit hash: %s" % (
mongodb_consistent_backup.prog_name,
mongodb_consistent_backup.__version__,
mongodb_consistent_backup.git_commit
))
import platform
print("Python version: %s" % platform.python_version())
print("Python modules:")
import fabric.version
print("\t%s: %s" % ('Fabric', fabric.version.get_version()))
modules = ['pymongo', 'multiprocessing', 'yaml', 'boto', 'filechunkio']
for module_name in modules:
module = __import__(module_name)
if hasattr(module, '__version__'):
print("\t%s: %s" % (module_name, module.__version__))
sys.exit(0)
class ConfigParser(BaseConfiguration):
def makeParserLoadSubmodules(self, parser):
for _, modname, ispkg in walk_packages(path=mongodb_consistent_backup.__path__, prefix=mongodb_consistent_backup.__name__ + '.'):
if not ispkg:
continue
try:
components = modname.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
parser = mod.config(parser)
except AttributeError:
continue
return parser
def makeParser(self):
parser = super(ConfigParser, self).makeParser()
parser.add_argument("-V", "--version", dest="version", help="Print mongodb_consistent_backup version info and exit", action=PrintVersions)
parser.add_argument("-v", "--verbose", dest="verbose", help="Verbose output", default=False, action="store_true")
parser.add_argument("-H", "--host", dest="host", default="localhost", type=str,
help="MongoDB Hostname, IP address or '<replset>/<host:port>,<host:port>,..' URI (default: localhost)")
parser.add_argument("-P", "--port", dest="port", help="MongoDB Port (default: 27017)", default=27017, type=int)
parser.add_argument("-u", "--user", "--username", dest="username", help="MongoDB Authentication Username (for optional auth)", type=str)
parser.add_argument("-p", "--password", dest="password", help="MongoDB Authentication Password (for optional auth)", type=str)
parser.add_argument("-a", "--authdb", dest="authdb", help="MongoDB Auth Database (for optional auth - default: admin)", default='admin', type=str)
parser.add_argument("--ssl.enabled", dest="ssl.enabled", default=False, action="store_true",
help="Use SSL secured database connections to MongoDB hosts (default: false)")
parser.add_argument("--ssl.insecure", dest="ssl.insecure", default=False, action="store_true",
help="Do not validate the SSL certificate and hostname of the server (default: false)")
parser.add_argument("--ssl.ca_file", dest="ssl.ca_file", default=None, type=str,
help="Path to SSL Certificate Authority file in PEM format (default: use OS default CA)")
parser.add_argument("--ssl.crl_file", dest="ssl.crl_file", default=None, type=str,
help="Path to SSL Certificate Revocation List file in PEM or DER format (for optional cert revocation)")
parser.add_argument("--ssl.client_cert_file", dest="ssl.client_cert_file", default=None, type=str,
help="Path to Client SSL Certificate file in PEM format (for optional client ssl auth)")
parser.add_argument("-L", "--log-dir", dest="log_dir", help="Path to write log files to (default: disabled)", default='', type=str)
parser.add_argument("-T", "--backup-time", dest="backup_time",
default=datetime.now().strftime("%Y%m%d_%H%M"), type=str,
help="Backup timestamp as yyyymmdd_HHMM. (default: current time)")
parser.add_argument("--lock-file", dest="lock_file", default='/tmp/mongodb-consistent-backup.lock', type=str,
help="Location of lock file (default: /tmp/mongodb-consistent-backup.lock)")
parser.add_argument("--rotate.max_backups", dest="rotate.max_backups", default=0, type=int,
help="Maximum number of backups to keep in backup directory (default: unlimited)")
parser.add_argument("--rotate.max_days", dest="rotate.max_days", default=0, type=float,
help="Maximum age in days for backups in backup directory (default: unlimited)")
parser.add_argument("--sharding.balancer.wait_secs", dest="sharding.balancer.wait_secs", default=300, type=int,
help="Maximum time to wait for balancer to stop, in seconds (default: 300)")
parser.add_argument("--sharding.balancer.ping_secs", dest="sharding.balancer.ping_secs", default=3, type=int,
help="Interval to check balancer state, in seconds (default: 3)")
return self.makeParserLoadSubmodules(parser)
class Config(object):
# noinspection PyUnusedLocal
def __init__(self):
self._config = ConfigParser()
self.parse()
self.version = mongodb_consistent_backup.__version__
self.git_commit = mongodb_consistent_backup.git_commit
def _get(self, keys, data=None):
if not data:
data = self._config
if "." in keys:
key, rest = keys.split(".", 1)
return self._get(rest, data[key])
else:
return data[keys]
def check_required(self):
required = [
'backup.name',
'backup.location'
]
for key in required:
try:
self._get(key)
except Exception:
raise mongodb_consistent_backup.Errors.OperationError(
'Field "%s" (config file field: "%s.%s") must be set via command-line or config file!' % (
key,
self._config.environment,
key
)
)
def parse(self):
self._config.parse(self.cmdline)
self.check_required()
def to_dict(self, data):
if isinstance(data, dict) or isinstance(data, NestedDict):
ret = {}
for key in data:
value = self.to_dict(data[key])
if value and key != ('merge'):
if key == "password" or key == "secret_key":
value = "******"
ret[key] = value
return ret
elif isinstance(data, (str, int, bool)):
return data
def dump(self):
return self.to_dict(self._config)
def to_json(self):
return json.dumps(self.dump(), sort_keys=True)
def __repr__(self):
return self.to_json()
def __getattr__(self, key):
try:
return self._config.get(key)
# TODO-timv What can we do to make this better?
except Exception:
return None
|
Python
| 0.000002
|
@@ -384,18 +384,18 @@
lower()
-is
+==
%22true%22:
|
74b3b60cfe6f12f119ac91f04177abc4c7427e5c
|
bump version
|
sensorbee/_version.py
|
sensorbee/_version.py
|
# -*- coding: utf-8 -*-
__version__ = '0.1.1'
|
Python
| 0
|
@@ -37,11 +37,11 @@
= '0.1.
-1
+2
'%0A
|
ec76e31320d41fe4bd8df062f1432ebe476bb798
|
Update IrrigatorsModel.py
|
backend/IrrigatorsModel.py
|
backend/IrrigatorsModel.py
|
import datetime, re;
from sqlalchemy.orm import validates;
from server import DB, FlaskServer;
from components.validation import validate_word;
class Owner_irrigators(DB.Model):
id = DB.Column(DB.Integer, primary_key=True, autoincrement=True);
name = DB.Column(DB.Integer(20));
notation = DB.Column(DB.Text);
created_by = DB.Column(DB.Integer, DB.ForeignKey('users.id'));
created_at = DB.Column(DB.DateTime);
updated_by = DB.Column(DB.Integer, DB.ForeignKey('users.id'), nullable=True);
updated_at = DB.Column(DB.DateTime, nullable=True);
""" @validates('subject')
def validate_subject(self, key, subject):
return validate_word(subject, 2, 320, valid=re.compile('[a-zA-Z\s]+'))
"""
def __init__(self, name, notation, created_at, updated_at):
self.name = name;
self.notation = notation;
self.created_at = datetime.datetime.now();
self.updated_at = self.created_at;
""" def serialize(self, session):
if session.clearance:
return {"id": self.id, "subject": self.subject, "deleted_by": self.deleted_by, "created_by": self.created_by, "created_at": self.created_at.strftime("%B %d, %Y %H:%M:%S"), "updated_at": self.updated_at.strftime("%B %d, %Y %H:%M:%S"), "updated_by": self.updated_by};
else: #public or protected
return {"delete": self.id} if self.deleted_by else {"id": self.id, "subject": self.subject};
@staticmethod
def create(args, secure):
auth = DB.Models["sessions"].authenticate(secure, paranoid=True, role="admin");
if auth.success == False:
return auth.serialize();
else:
security_question = SecurityQuestions(args["subject"], auth.session.user);
DB.session.add(security_question);
DB.session.commit();
return {"success": True};
@staticmethod
def exists(prop, value):
if prop == "subject":
return True if SecurityQuestions.query.filter_by(subject=value).count() else False;
else:
return False;#invalid property
@staticmethod
def request(mode, secure):
auth = DB.Models["sessions"].authenticate(secure, bypass_activation=True);
if auth.success == False:
return auth.serialize();
cache = DB.Models["caches"].cache("security_questions", mode, auth.session, False);
if mode == "config":
data = [i.serialize(auth.session) for i in SecurityQuestions.query.filter(SecurityQuestions.updated_at > cache).all()] if cache else [i.serialize(auth.session) for i in SecurityQuestions.query.all()];
elif mode == "index": #public
data = [i.serialize(auth.session) for i in SecurityQuestions.query.filter(SecurityQuestions.updated_at > cache, SecurityQuestions.deleted_by == None).all()] if cache else [i.serialize(auth.session) for i in SecurityQuestions.query.all()];
else: #login
data = [i.serialize(auth.session) for i in SecurityQuestions.query.filter(SecurityQuestions.updated_at > cache, SecurityQuestions.deleted_by == None, SecurityQuestions.id == auth.session.user.security_question.id).all()] if cache else [i.serialize(auth.session) for i in SecurityQuestions.query.filter_by(id = auth.session.user.security_question.id).all()];
return {"success": True, "items": len(data), "data": data};
@staticmethod
def update(args, secure):
auth = DB.Models["sessions"].authenticate(secure, paranoid=True, role="admin");
if auth.success == False:
return auth.serialize();
else:
security_question = SecurityQuestions.query.get(args["id"]);
for param, value in args.iteritems():
if param == "subject":
security_question.subject = value;
elif param == "deleted_by":
security_question.deleted_by = auth.session.user if value == True else None;
security_question.updater = auth.session.user;
security_question.updated_at = datetime.datetime.now();
DB.session.commit();
return {"success": True};
"""
|
Python
| 0
|
@@ -153,15 +153,9 @@
ass
-Owner_i
+I
rrig
|
5ed3834cbefcf399dbdd0e7f26a033aba65e7a54
|
Make detect_soft_applied exit correctly on non-create migrations
|
django/db/migrations/executor.py
|
django/db/migrations/executor.py
|
from __future__ import unicode_literals
from django.db import migrations
from django.apps.registry import apps as global_apps
from .loader import MigrationLoader
from .recorder import MigrationRecorder
class MigrationExecutor(object):
"""
End-to-end migration execution - loads migrations, and runs them
up or down to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets):
"""
Given a set of targets, returns a list of (Migration instance, backwards?).
"""
plan = []
applied = set(self.loader.applied_migrations)
for target in targets:
# If the target is (app_label, None), that means unmigrate everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
backwards_plan = self.loader.graph.backwards_plan(target)[:-1]
# We only do this if the migration is not the most recent one
# in its app - that is, another migration with the same app
# label is in the backwards plan
if any(node[0] == target[0] for node in backwards_plan):
for migration in backwards_plan:
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def migrate(self, targets, plan=None, fake=False):
"""
Migrates the database up to the given targets.
"""
if plan is None:
plan = self.migration_plan(targets)
for migration, backwards in plan:
if not backwards:
self.apply_migration(migration, fake=fake)
else:
self.unapply_migration(migration, fake=fake)
def collect_sql(self, plan):
"""
Takes a migration plan and returns a list of collected SQL
statements that represent the best-efforts version of that plan.
"""
statements = []
for migration, backwards in plan:
with self.connection.schema_editor(collect_sql=True) as schema_editor:
project_state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
if not backwards:
migration.apply(project_state, schema_editor, collect_sql=True)
else:
migration.unapply(project_state, schema_editor, collect_sql=True)
statements.extend(schema_editor.collected_sql)
return statements
def apply_migration(self, migration, fake=False):
"""
Runs a migration forwards.
"""
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
# Test to see if this is an already-applied initial migration
if self.detect_soft_applied(migration):
fake = True
else:
# Alright, do it normally
with self.connection.schema_editor() as schema_editor:
project_state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
migration.apply(project_state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_applied(app_label, name)
else:
self.recorder.record_applied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("apply_success", migration, fake)
def unapply_migration(self, migration, fake=False):
"""
Runs a migration backwards.
"""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
with self.connection.schema_editor() as schema_editor:
project_state = self.loader.project_state((migration.app_label, migration.name), at_end=False)
migration.unapply(project_state, schema_editor)
# For replacement migrations, record individual statuses
if migration.replaces:
for app_label, name in migration.replaces:
self.recorder.record_unapplied(app_label, name)
else:
self.recorder.record_unapplied(migration.app_label, migration.name)
# Report progress
if self.progress_callback:
self.progress_callback("unapply_success", migration, fake)
def detect_soft_applied(self, migration):
"""
Tests whether a migration has been implicitly applied - that the
tables it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel).
"""
project_state = self.loader.project_state((migration.app_label, migration.name), at_end=True)
apps = project_state.render()
found_create_migration = False
for operation in migration.operations:
if isinstance(operation, migrations.CreateModel):
model = apps.get_model(migration.app_label, operation.name)
if model._meta.swapped:
# We have to fetch the model to test with from the
# main app cache, as it's not a direct dependency.
model = global_apps.get_model(model._meta.swapped)
if model._meta.db_table not in self.connection.introspection.get_table_list(self.connection.cursor()):
return False
found_create_migration = True
# If we get this far and we found at least one CreateModel migration,
# the migration is considered implicitly applied.
return found_create_migration
|
Python
| 0.000002
|
@@ -6744,32 +6744,79 @@
igration = True%0A
+ else:%0A return False%0A
# If we
|
ff41218c0a63959a34969eefafd9951c48ef667f
|
convert `test/test_sparql/test_sparql_parser.py` to pytest (#2063)
|
test/test_sparql/test_sparql_parser.py
|
test/test_sparql/test_sparql_parser.py
|
import math
import sys
import unittest
from typing import Set, Tuple
from rdflib import Graph, Literal
from rdflib.namespace import Namespace
from rdflib.plugins.sparql.processor import processUpdate
from rdflib.term import Node
def triple_set(graph: Graph) -> Set[Tuple[Node, Node, Node]]:
return set(graph.triples((None, None, None)))
class SPARQLParserTests(unittest.TestCase):
def test_insert_recursionlimit(self) -> None:
# These values are experimentally determined
# to cause the RecursionError reported in
# https://github.com/RDFLib/rdflib/issues/1336
resource_count = math.ceil(sys.getrecursionlimit() / (33 - 3))
self.do_insert(resource_count)
def test_insert_large(self) -> None:
self.do_insert(200)
def do_insert(self, resource_count: int) -> None:
EGV = Namespace("http://example.org/vocab#")
EGI = Namespace("http://example.org/instance#")
prop0, prop1, prop2 = EGV["prop0"], EGV["prop1"], EGV["prop2"]
g0 = Graph()
for index in range(resource_count):
resource = EGI[f"resource{index}"]
g0.add((resource, prop0, Literal(index)))
g0.add((resource, prop1, Literal("example resource")))
g0.add((resource, prop2, Literal(f"resource #{index}")))
g0ntriples = g0.serialize(format="ntriples")
g1 = Graph()
self.assertNotEqual(triple_set(g0), triple_set(g1))
processUpdate(g1, f"INSERT DATA {{ {g0ntriples!s} }}")
self.assertEqual(triple_set(g0), triple_set(g1))
if __name__ == "__main__":
unittest.main()
|
Python
| 0
|
@@ -20,24 +20,8 @@
sys%0A
-import unittest%0A
from
@@ -329,16 +329,20 @@
%0A%0Aclass
+Test
SPARQLPa
@@ -349,32 +349,8 @@
rser
-Tests(unittest.TestCase)
:%0A
@@ -1360,28 +1360,15 @@
-self.
assert
-NotEqual(
+
trip
@@ -1369,33 +1369,35 @@
t triple_set(g0)
-,
+ !=
triple_set(g1))
@@ -1395,17 +1395,16 @@
_set(g1)
-)
%0A%0A
@@ -1473,25 +1473,15 @@
-self.
assert
-Equal(
+
trip
@@ -1490,17 +1490,19 @@
_set(g0)
-,
+ ==
triple_
@@ -1512,56 +1512,5 @@
(g1)
-)%0A%0A%0Aif __name__ == %22__main__%22:%0A%0A unittest.main()
%0A
|
413dfb6e782e16b6509fdb34b1ece89af9d2237f
|
Simplify number_of_nonisomorphic_trees.
|
networkx/generators/nonisomorphic_trees.py
|
networkx/generators/nonisomorphic_trees.py
|
"""
Implementation of the Wright, Richmond, Odlyzko and McKay (WROM)
algorithm for the enumeration of all non-isomorphic free trees of a
given order. Rooted trees are represented by level sequences, i.e.,
lists in which the i-th element specifies the distance of vertex i to
the root.
"""
# Copyright (C) 2013 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
__author__ = "\n".join(["Aric Hagberg (hagberg@lanl.gov)",
"Mridul Seth (seth.mridul@gmail.com)"])
__all__ = ['nonisomorphic_trees',
'number_of_nonisomorphic_trees']
import networkx as nx
def nonisomorphic_trees(order, create="graph"):
"""Returns a list of nonisomporphic trees
Parameters
----------
order : int
order of the desired tree(s)
create : graph or matrix (default="Graph)
If graph is selected a list of trees will be returned,
if matrix is selected a list of adjancency matrix will
be returned
Returns
-------
G : List of NetworkX Graphs
M : List of Adjacency matrices
Reference
---------
"""
if order < 2:
raise ValueError
# start at the path graph rooted at its center
layout = list(range(order // 2 + 1)) + list(range(1, (order + 1) // 2))
while layout is not None:
layout = _next_tree(layout)
if layout is not None:
if create == "graph":
yield _layout_to_graph(layout)
elif create == "matrix":
yield _layout_to_matrix(layout)
layout = _next_rooted_tree(layout)
def number_of_nonisomorphic_trees(order):
"""Returns the number of nonisomorphic trees
Parameters
----------
order : int
order of the desired tree(s)
Returns
-------
length : Number of nonisomorphic graphs for the given order
Reference
---------
"""
f = lambda x: list(nonisomorphic_trees(x))
return len(f(order))
def _next_rooted_tree(predecessor, p=None):
"""One iteration of the Beyer-Hedetniemi algorithm."""
if p is None:
p = len(predecessor) - 1
while predecessor[p] == 1:
p -= 1
if p == 0:
return None
q = p - 1
while predecessor[q] != predecessor[p] - 1:
q -= 1
result = list(predecessor)
for i in range(p, len(result)):
result[i] = result[i - p + q]
return result
def _next_tree(candidate):
"""One iteration of the Wright, Richmond, Odlyzko and McKay
algorithm."""
# valid representation of a free tree if:
# there are at least two vertices at layer 1
# (this is always the case because we start at the path graph)
left, rest = _split_tree(candidate)
# and the left subtree of the root
# is less high than the tree with the left subtree removed
left_height = max(left)
rest_height = max(rest)
valid = rest_height >= left_height
if valid and rest_height == left_height:
# and, if left and rest are of the same height,
# if left does not encompass more vertices
if len(left) > len(rest):
valid = False
# and, if they have the same number or vertices,
# if left does not come after rest lexicographically
elif len(left) == len(rest) and left > rest:
valid = False
if valid:
return candidate
else:
# jump to the next valid free tree
p = len(left)
new_candidate = _next_rooted_tree(candidate, p)
if candidate[p] > 2:
new_left, new_rest = _split_tree(new_candidate)
new_left_height = max(new_left)
suffix = range(1, new_left_height + 2)
new_candidate[-len(suffix):] = suffix
return new_candidate
def _split_tree(layout):
"""Return a tuple of two layouts, one containing the left
subtree of the root vertex, and one containing the original tree
with the left subtree removed."""
one_found = False
m = None
for i in range(len(layout)):
if layout[i] == 1:
if one_found:
m = i
break
else:
one_found = True
if m is None:
m = len(layout)
left = [layout[i] - 1 for i in range(1, m)]
rest = [0] + [layout[i] for i in range(m, len(layout))]
return (left, rest)
def _layout_to_matrix(layout):
"""Create the adjacency matrix for the tree specified by the
given layout (level sequence)."""
result = [[0] * len(layout) for i in range(len(layout))]
stack = []
for i in range(len(layout)):
i_level = layout[i]
if stack:
j = stack[-1]
j_level = layout[j]
while j_level >= i_level:
stack.pop()
j = stack[-1]
j_level = layout[j]
result[i][j] = result[j][i] = 1
stack.append(i)
return result
def _layout_to_graph(layout):
"""Create a NetworkX Graph for the tree specified by the
given layout(level sequence)"""
result = [[0] * len(layout) for i in range(len(layout))]
G = nx.Graph()
stack = []
for i in range(len(layout)):
i_level = layout[i]
if stack:
j = stack[-1]
j_level = layout[j]
while j_level >= i_level:
stack.pop()
j = stack[-1]
j_level = layout[j]
G.add_edge(i, j)
stack.append(i)
return G
|
Python
| 0.000082
|
@@ -1978,27 +1978,32 @@
-f = lambda x: list(
+length = sum(1 for _ in
noni
@@ -2018,17 +2018,21 @@
c_trees(
-x
+order
))%0A r
@@ -2044,18 +2044,11 @@
len
-(f(order))
+gth
%0A%0A%0Ad
|
6668b6daca1707be4bacfcf26f03af94b6c82551
|
raise useful error if dir missing #1540
|
bcbio/upload/filesystem.py
|
bcbio/upload/filesystem.py
|
"""Extract files from processing run into output directory, organized by sample.
"""
import os
import shutil
from bcbio import utils
from bcbio.log import logger
from bcbio.upload import shared
def copy_finfo(finfo, storage_dir, pass_uptodate=False):
"""Copy a file into the output storage directory.
"""
if "sample" in finfo and "ext" in finfo and "type" in finfo:
out_file = os.path.join(storage_dir, "%s-%s%s%s" % (finfo["sample"], finfo["ext"],
"-" if (".txt" in finfo["type"]) else ".",
finfo["type"]))
else:
out_file = os.path.join(storage_dir, os.path.basename(finfo["path"]))
out_file = os.path.abspath(out_file)
if not shared.up_to_date(out_file, finfo):
logger.info("Storing in local filesystem: %s" % out_file)
shutil.copy(finfo["path"], out_file)
return out_file
if pass_uptodate:
return out_file
def copy_finfo_directory(finfo, storage_dir):
"""Copy a directory into the final output directory.
"""
out_dir = os.path.abspath(os.path.join(storage_dir, finfo["ext"]))
if not shared.up_to_date(out_dir, finfo):
logger.info("Storing directory in local filesystem: %s" % out_dir)
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
shutil.copytree(finfo["path"], out_dir)
for tmpdir in ["tx", "tmp"]:
if os.path.exists(os.path.join(out_dir, tmpdir)):
shutil.rmtree(os.path.join(out_dir, tmpdir))
os.utime(out_dir, None)
return out_dir
def update_file(finfo, sample_info, config, pass_uptodate=False):
"""Update the file in local filesystem storage.
"""
# skip if we have no directory to upload to
if "dir" not in config:
return
if "sample" in finfo:
storage_dir = utils.safe_makedir(os.path.join(config["dir"], finfo["sample"]))
elif "run" in finfo:
storage_dir = utils.safe_makedir(os.path.join(config["dir"], finfo["run"]))
else:
raise ValueError("Unexpected input file information: %s" % finfo)
if "dir" in finfo:
storage_dir = utils.safe_makedir(os.path.join(storage_dir, finfo["dir"]))
if finfo.get("type") == "directory":
return copy_finfo_directory(finfo, storage_dir)
else:
return copy_finfo(finfo, storage_dir, pass_uptodate=pass_uptodate)
|
Python
| 0
|
@@ -1846,21 +1846,181 @@
r
-eturn
+aise ValueError(%22Expect %60dir%60 in upload specification: %22%0A %22http://bcbio-nextgen.readthedocs.io/en/latest/contents/configuration.html#upload%22)
%0A if
|
450f89b6fd2bb44c2e89336594f5037ed409f60d
|
Return attached command output.
|
dockermap/map/runner/attached.py
|
dockermap/map/runner/attached.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from docker.utils import create_host_config
from ...functional import resolve_value
from ..action import UTIL_ACTION_PREPARE_CONTAINER
from ..policy.utils import update_kwargs, get_instance_volumes
from .utils import get_preparation_cmd
class AttachedConfigMixin(object):
def get_attached_preparation_create_kwargs(self, config, volume_container, kwargs=None):
"""
Generates keyword arguments for the Docker client to prepare an attached container (i.e. adjust user and
permissions).
:param config: Configuration.
:type config: dockermap.map.runner.ActionConfig
:param volume_container: Name of the container that shares the volume.
:type volume_container: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict
"""
path = resolve_value(config.container_map.volumes[config.instance_name])
cmd = get_preparation_cmd(config.container_config, path)
if not cmd:
return None
c_kwargs = dict(
image=self._policy.core_image,
command=' && '.join(cmd),
user='root',
network_disabled=True,
)
hc_extra_kwargs = kwargs.pop('host_config', None) if kwargs else None
if config.client_config.get('use_host_config'):
hc_kwargs = self.get_attached_preparation_host_config_kwargs(config, None, volume_container,
kwargs=hc_extra_kwargs)
if hc_kwargs:
c_kwargs['host_config'] = create_host_config(**hc_kwargs)
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_attached_preparation_host_config_kwargs(self, config, container_name, volume_container, kwargs=None):
"""
Generates keyword arguments for the Docker client to set up the HostConfig for preparing an attached container
(i.e. adjust user and permissions) or start the preparation.
:param config: Configuration.
:type config: dockermap.map.runner.ActionConfig
:param container_name: Container name or id. Set ``None`` when included in kwargs for ``create_container``.
:type container_name: unicode | str | NoneType
:param volume_container: Name of the container that shares the volume.
:type volume_container: unicode | str
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict
"""
c_kwargs = dict(volumes_from=[volume_container], version=config.client_config.version)
if container_name:
c_kwargs['container'] = container_name
update_kwargs(c_kwargs, kwargs)
return c_kwargs
def get_attached_preparation_wait_kwargs(self, config, container_name, kwargs=None):
"""
Generates keyword arguments for waiting for a container when preparing a volume. The container name may be
the container being prepared, or the id of the container calling preparation commands.
:param config: Configuration.
:type config: dockermap.map.runner.ActionConfig
:param container_name: Container name or id. Set ``None`` when included in kwargs for ``create_container``.
:type container_name: unicode | str | NoneType
:param kwargs: Additional keyword arguments to complement or override the configuration-based values.
:type kwargs: dict | NoneType
:return: Resulting keyword arguments.
:rtype: dict
"""
client_config = config.client_config
wait_timeout = client_config.get('wait_timeout')
if wait_timeout is not None:
c_kwargs = dict(timeout=wait_timeout)
update_kwargs(c_kwargs, kwargs)
return c_kwargs
return kwargs
class AttachedPreparationMixin(AttachedConfigMixin):
"""
Utility mixin for preparing attached containers with file system owners and permissions.
"""
attached_action_method_names = [
(UTIL_ACTION_PREPARE_CONTAINER, 'prepare_attached'),
]
prepare_local = True
policy_options = ['prepare_local']
def _prepare_container(self, client, config, volume_container):
"""
Runs a temporary container for preparing an attached volume for a container configuration.
:param client: Docker client.
:type client: docker.Client
:param config: Configuration.
:type config: dockermap.map.runner.ActionConfig
:param volume_container: Name of the container that shares the volume.
:type volume_container: unicode | str
"""
apc_kwargs = self.get_attached_preparation_create_kwargs(config, volume_container)
if not apc_kwargs:
return
images = self._policy.images[config.client_name]
images.ensure_image(apc_kwargs['image'])
a_wait_kwargs = self.get_attached_preparation_wait_kwargs(config, volume_container)
client.wait(volume_container, **a_wait_kwargs)
temp_container = client.create_container(**apc_kwargs)
temp_id = temp_container['Id']
try:
if config.client_config.get('use_host_config'):
client.start(temp_id)
else:
aps_kwargs = self.get_attached_preparation_host_config_kwargs(config, temp_id, volume_container)
client.start(**aps_kwargs)
temp_wait_kwargs = self.get_attached_preparation_wait_kwargs(config, temp_id)
client.wait(temp_id, **temp_wait_kwargs)
finally:
client.remove_container(temp_id)
def prepare_attached(self, config, a_name, **kwargs):
"""
Prepares an attached volume for a container configuration.
:param config: Configuration.
:type config: dockermap.map.runner.ActionConfig
:param a_name: The full name or id of the container sharing the volume.
:type a_name: unicode | str
"""
client = config.client
if not (self.prepare_local and hasattr(client, 'run_cmd')):
return self._prepare_container(client, config, a_name)
instance_detail = client.inspect_container(a_name)
volumes = get_instance_volumes(instance_detail)
path = resolve_value(config.container_map.volumes[config.instance_name])
local_path = volumes.get(path)
if not local_path:
raise ValueError("Could not locate local path of volume alias '{0}' / "
"path '{1}' in container {2}.".format(config.instance_name, path, a_name))
for cmd in get_preparation_cmd(config.container_config, local_path):
client.run_cmd(cmd)
|
Python
| 0.000001
|
@@ -6921,16 +6921,69 @@
_name))%0A
+ return %5B%0A client.run_cmd(cmd)%0A
@@ -7049,17 +7049,16 @@
al_path)
-:
%0A
@@ -7058,32 +7058,10 @@
- client.run_cmd(cmd)
+%5D
%0A
|
2495d9ddaeb9f57a1a46129eb330e0c2f8636890
|
Check for non-interactive shell before prompting when deleting
|
bin/disco_elasticsearch.py
|
bin/disco_elasticsearch.py
|
#!/usr/bin/env python
"""
Manages ElasticSearch
"""
from __future__ import print_function
import argparse
import sys
from disco_aws_automation import DiscoElasticsearch
from disco_aws_automation.disco_aws_util import run_gracefully
from disco_aws_automation.disco_logging import configure_logging
# R0912 Allow more than 12 branches so we can parse a lot of commands..
# pylint: disable=R0912
def get_parser():
'''Returns command line parser'''
parser = argparse.ArgumentParser(description='Asiaq ElasticSearch Creation and Management')
parser.add_argument('--debug', dest='debug', action='store_const', const=True, default=False,
help='Log in debug level')
parser.add_argument("--env", dest="env", help="Environment name", type=str)
subparsers = parser.add_subparsers(help='Sub-command help')
parser_list = subparsers.add_parser("list", help="List all ElasticSearch domains")
parser_list.set_defaults(mode="list")
parser_list.add_argument("--endpoint", dest="endpoint", action='store_const', default=False, const=True,
help="Display AWS-provided endpoint")
parser_create = subparsers.add_parser("create",
help="Create an ElasticSearch domain. If no options are provided, "
"default behavior is to create all ElasticSearch domains found in "
"the config.")
parser_create.set_defaults(mode="create")
parser_create.add_argument("--name", dest="name", type=str, action="append",
help="Name of the ElasticSearch domain")
parser_update = subparsers.add_parser("update",
help="Update an ElasticSearch domain. If no options are provided, "
"default behavior is to update all ElasticSearch domains found in "
"the config.")
parser_update.set_defaults(mode="update")
parser_update.add_argument("--name", dest="name", type=str, action="append",
help="Name of the ElasticSearch domain")
parser_delete = subparsers.add_parser("delete",
help="Delete an ElasticSearch domain. If no options are provided, "
"default behavior is to delete all ElasticSearch domains found in "
"the config.")
parser_delete.set_defaults(mode="delete")
parser_delete.add_argument("--name", dest="name", type=str, action="append",
help="Name of the ElasticSearch domain")
parser_delete.add_argument("--all", dest="delete_all", action='store_const', default=False, const=True,
help="Delete *all* ElasticSearch domains")
return parser
def run():
"""Parses command line and dispatches the commands"""
parser = get_parser()
args = parser.parse_args()
configure_logging(args.debug)
env = args.env
disco_es = DiscoElasticsearch(env)
if args.mode == "list":
entries = disco_es.list(include_endpoint=args.endpoint)
headers = ["Elastic Search Domain Name", "Internal Name", "Route 53 Endpoint"]
format_line = u"{0:<28} {1:<15} {2:<35}"
if args.endpoint:
format_line += u" {3:<80}"
headers.append("Elastic Search Endpoint")
print(format_line.format(*headers), file=sys.stderr)
for entry in entries:
values = [entry["elasticsearch_domain_name"], entry["internal_name"], entry["route_53_endpoint"]]
if args.endpoint:
values.append(entry["elasticsearch_endpoint"] or u"-")
print(format_line.format(*values))
elif args.mode == "create":
if args.name:
for name in args.name:
disco_es.create(name)
else:
disco_es.create()
elif args.mode == "update":
if args.name:
for name in args.name:
disco_es.update(name)
else:
disco_es.update()
elif args.mode == "delete":
print("Deleting an ElasticSearch domain destroys all automated snapshots of its data. Be careful!")
if args.name:
prompt = "Are you sure you want to delete ElasticSearch domains {}? (y/N)".format(args.name)
response = raw_input(prompt)
if response.lower().startswith("y"):
for name in args.name:
disco_es.delete(name)
else:
scope = "all configured" if not args.delete_all else "*all*"
prompt = "Are you sure you want to delete {} ElasticSearch domains? (y/N)".format(scope)
response = raw_input(prompt)
if response.lower().startswith("y"):
disco_es.delete(delete_all=args.delete_all)
if __name__ == "__main__":
run_gracefully(run)
|
Python
| 0
|
@@ -224,16 +224,27 @@
acefully
+, is_truthy
%0Afrom di
@@ -3158,16 +3158,63 @@
rch(env)
+%0A interactive_shell = sys.__stdin__.isatty()
%0A%0A if
@@ -4290,20 +4290,23 @@
pr
-int(
+ompt =
%22Deletin
@@ -4344,16 +4344,23 @@
oys all
+of its
automate
@@ -4374,20 +4374,8 @@
hots
- of its data
. Be
@@ -4383,18 +4383,19 @@
careful!
+%5Cn
%22
-)
%0A
@@ -4420,32 +4420,33 @@
prompt
++
= %22Are you sure
@@ -4531,83 +4531,63 @@
-response = raw_input(prompt)%0A if response.lower().startswith(%22y%22
+if not interactive_shell or is_truthy(raw_input(prompt)
):%0A
@@ -4772,16 +4772,17 @@
prompt
++
= %22Are y
@@ -4871,83 +4871,63 @@
-response = raw_input(prompt)%0A if response.lower().startswith(%22y%22
+if not interactive_shell or is_truthy(raw_input(prompt)
):%0A
|
7e24165c828a64389d593c63299df4ff22dcb881
|
disable swagger docs for tests
|
rest_auth/urls.py
|
rest_auth/urls.py
|
from django.conf import settings
from django.conf.urls import patterns, url, include
from rest_auth.views import Login, Logout, Register, UserDetails, \
PasswordChange, PasswordReset, VerifyEmail, PasswordResetConfirm
urlpatterns = patterns('rest_auth.views',
# URLs that do not require a session or valid token
url(r'^register/$', Register.as_view(),
name='rest_register'),
url(r'^password/reset/$', PasswordReset.as_view(),
name='rest_password_reset'),
url(r'^password/reset/confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
PasswordResetConfirm.as_view(
), name='rest_password_reset_confirm'),
url(r'^login/$', Login.as_view(), name='rest_login'),
url(r'^verify-email/(?P<activation_key>\w+)/$',
VerifyEmail.as_view(), name='verify_email'),
# URLs that require a user to be logged in with a valid
# session / token.
url(r'^logout/$', Logout.as_view(), name='rest_logout'),
url(r'^user/$', UserDetails.as_view(),
name='rest_user_details'),
url(r'^password/change/$', PasswordChange.as_view(),
name='rest_password_change'),
)
if settings.DEBUG:
urlpatterns += patterns('',
# Swagger Docs
url(r'^docs/',
include('rest_framework_swagger.urls')),
)
|
Python
| 0
|
@@ -1548,16 +1548,41 @@
gs.DEBUG
+ and not settings.IS_TEST
:%0A ur
|
5679450ef45bc5a32c9395df9ba0fde50707ba8c
|
Use smaller points for the ORIGEN analysis
|
transmutagen/analysis.py
|
transmutagen/analysis.py
|
from collections import defaultdict
import os
import argparse
import tables
import numpy as np
import matplotlib.pyplot as plt
from .tests.test_transmute import run_transmute_test
from .origen_all import TIME_STEPS
from .util import plt_show_in_terminal
from .cram import get_CRAM_from_cache, CRAM_coeffs
def analyze_origen(file):
plt.clf()
fig, ax = plt.subplots()
times = {'ORIGEN': defaultdict(list), 'CRAM lambdify': defaultdict(list),
'CRAM py_solve': defaultdict(list)}
with tables.open_file(file, mode='r') as h5file:
for run in 'ORIGEN', 'CRAM lambdify', 'CRAM py_solve':
for lib in h5file.root:
table = h5file.get_node(lib, run.lower().replace(' ', '-'))
for row in table:
times[run][row['time']].append(row['execution time ' + run])
xvals = sorted(TIME_STEPS)
x = []
y = []
for i, t in enumerate(xvals):
itimes = times[run][sorted(times[run])[i]]
x += [t]*len(itimes)
y += itimes
print("Longest", run, "runtime", max(y), "seconds")
print("Shortest", run, "runtime", min(y), "seconds")
ax.plot(x, y, 'o', label=run)
# Tweak spacing to prevent clipping of tick-labels
plt.subplots_adjust(bottom=0.15)
plt.title("""\
Runtimes for ORIGEN, CRAM lambdify, and CRAM py_solve computing transmutation
over several starting libraries, nuclides, and timesteps.""")
ax.set_xscale('log')
ax.set_xticks(sorted(TIME_STEPS))
ax.xaxis.set_ticklabels([TIME_STEPS[i].replace(' ', '\n') for i in
sorted(TIME_STEPS)], size='small')
ax.set_yscale('log')
ax.legend()
plt.ylabel('Runtime (seconds)')
plt.xlabel('Time step t')
plt_show_in_terminal()
def analyze_nofission():
plt.clf()
for time, time_name in sorted(TIME_STEPS.items()):
nofission_transmutes = {}
for f in os.listdir('data'):
if f.endswith('_nofission.npz'):
lib = f.split('_', 1)[0]
data = os.path.join('data', f)
print("analyzing", data, 'on', time_name)
nofission_transmutes[lib] = run_transmute_test(data, 14, 200,
time, run_all=False, _print=True)
for lib in nofission_transmutes:
for r in nofission_transmutes[lib]:
m = nofission_transmutes[lib][r]
if not isinstance(m, np.ndarray):
m = m.toarray()
if m is None or np.isnan(m).any() or np.isinf(m).any():
print("Could not compute", r, "for", lib)
continue
title = lib + ' ' + r + ' ' + time_name
plot_matrix_sum_histogram(m, title)
def plot_matrix_sum_histogram(m, title='', axis=0):
plt.clf()
plt.hist(np.asarray(np.sum(m, axis=axis)).flatten())
plt.yscale('log', nonposy='clip')
plt.title(title)
plt_show_in_terminal()
plt.close()
def analyze_cram_digits():
print("Computing coefficients (or getting from cache)")
exprs = defaultdict(dict)
# {degree: {prec: {'p': [coeffs], 'q': [coeffs]}}}
correct_digits = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
for degree in range(1, 21):
print("Degree", degree)
for prec in range(100, 1100, 100):
print("Precision", prec)
exprs[degree][prec] = CRAM_coeffs(get_CRAM_from_cache(degree,
prec), prec)
# Assume that 1000 has the most correct digits
coeffs1000 = exprs[degree][1000]
for prec in range(100, 1000, 100):
coeffs = exprs[degree][prec]
for l in 'pq':
for coeff, coeff1000 in zip(coeffs[l], coeffs1000[l]):
correct_digits[degree][prec][l].append(len(os.path.commonprefix([coeff,
coeff1000])) - 1)
# Plot minimum number of correct digits as a function of precision
plt.clf()
fig, ax = plt.subplots()
minvals = defaultdict(list)
for degree in range(1, 21):
print("Degree", degree)
for prec in range(100, 1000, 100):
print(" Precision", prec)
for l in 'pq':
print(' ', end='')
print(l, end=' ')
for i in correct_digits[degree][prec][l]:
print(i, end=' ')
print()
minvals[degree].append(min(correct_digits[degree][prec]['p'] + correct_digits[degree][prec]['q']))
ax.plot(range(100, 1000, 100), minvals[degree], label=degree)
# TODO: Make window wider so the legend isn't chopped off
ax.legend(title="Degree", loc="upper left", bbox_to_anchor=(1,1))
plt.ylabel('Number of correct digits')
plt.xlabel('Precision')
plt_show_in_terminal()
# Plot minimum number of correct digits as a function of degree
plt.clf()
fig, ax = plt.subplots()
minvals = defaultdict(list)
for prec in range(100, 1000, 100):
for degree in range(1, 21):
minvals[prec].append(min(correct_digits[degree][prec]['p'] + correct_digits[degree][prec]['q']))
ax.plot(range(1, 21), minvals[prec], label=prec)
# TODO: Make window wider so the legend isn't chopped off
ax.legend(title="Precision", loc="upper left", bbox_to_anchor=(1,1))
plt.ylabel('Number of correct digits')
plt.xlabel('Degree')
ax.set_xticks(range(1, 21))
plt_show_in_terminal()
def analyze():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--origen-results', default='data/results.hdf5',
help="""HDF5 file for the results of the ORIGEN runs.""")
parser.add_argument('--no-origen', action='store_false', dest='origen',
help="""Don't run the origen analysis.""")
parser.add_argument('--no-nofission', action='store_false',
dest='nofission', help="""Don't run the nofission analysis.""")
parser.add_argument('--cram-digits', action='store_true', help="""Analyze
accuracy of CRAM digits. WARNING: If cache values have not been
precomputed, this will take a long time (> 1 day) to compute.""")
try:
import argcomplete
argcomplete.autocomplete(parser)
except ImportError:
pass
args = parser.parse_args()
if args.origen:
analyze_origen(args.origen_results)
if args.nofission:
analyze_nofission()
if args.cram_digits:
analyze_cram_digits()
if __name__ == '__main__':
analyze()
|
Python
| 0.000002
|
@@ -1239,17 +1239,17 @@
(x, y, '
-o
+.
', label
|
fbabdb62ae4ca02bca4318d085bf0ec9cc7ab3ef
|
Add color always to easy get the messages (#334)
|
travis/test_precommit.py
|
travis/test_precommit.py
|
#!/usr/bin/env python
import logging
import os
import re
import subprocess
import sys
logging.basicConfig(level=logging.DEBUG)
_logger = logging.getLogger(__name__)
overwrite = os.environ.get("PRECOMMIT_OVERWRITE_CONFIG_FILES", "") == "1"
exclude_lint = os.environ.get("EXCLUDE_LINT", "")
root_dir = os.path.dirname(os.path.abspath(__file__))
precommit_config_dir = os.path.join(root_dir, "cfg", "precommit")
def get_repo(current_dir=None):
if current_dir is None:
current_dir = os.getcwd()
# TODO: Look for .git dir in parent dirs
if not os.path.isdir(os.path.join(current_dir, ".git")):
raise UserWarning("There are not .git repository")
return current_dir
def copy_cfg_files(cfg_dirname, repo_dirname):
exclude_regex = ""
if exclude_lint:
exclude_regex = '(%s)|' % '|'.join([
re.escape(exclude_path.strip()) for exclude_path in exclude_lint.split(',')
if exclude_path and exclude_path.strip()])
for fname in os.listdir(precommit_config_dir):
if not fname.startswith("."):
# all configuration files are hidden
continue
src = os.path.join(precommit_config_dir, fname)
if not os.path.isfile(src):
# if it is not a file skip
continue
dst = os.path.join(repo_dirname, fname)
if not overwrite and os.path.isfile(dst):
# Use the custom files defined in the repo
_logger.info("Use custom file %s", dst)
continue
_logger.info("Copying %s to %s", src, dst)
with open(src, "r") as fsrc, open(dst, "w") as fdst:
for line in fsrc:
if exclude_lint and fname.startswith(".pre-commit-config") and '# EXCLUDE_LINT' in line:
_logger.info("Apply EXCLUDE_LINT=%s to %s", exclude_lint, dst)
line = ' %s\n' % exclude_regex
fdst.write(line)
def main(argv=None, exit=True):
repo_dirname = get_repo()
copy_cfg_files(precommit_config_dir, repo_dirname)
status = 0
_logger.info("Running mandatory checks (affect status build)")
cmd = ["pre-commit", "run", "--all"]
status += subprocess.call(cmd)
_logger.info("Running optional checks (does not affect status build)")
subprocess.call(cmd + ["-c", ".pre-commit-config-optional.yaml"])
if exit:
sys.exit(0 if status == 0 else 1)
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -2165,16 +2165,34 @@
%22--all%22
+, %22--color=always%22
%5D%0A st
|
33ef365bffb3aefa053409a72d44b069bdae8c77
|
Make django middleware not crash if user isn't set.
|
blueox/contrib/django/middleware.py
|
blueox/contrib/django/middleware.py
|
import sys
import traceback
import logging
import blueox
from django.conf import settings
class Middleware:
def __init__(self):
host = getattr(settings, 'BLUEOX_HOST', '127.0.0.1')
port = getattr(settings, 'BLUEOX_PORT', 3514)
blueox.configure(host, port)
def process_request(self, request):
request.blueox = blueox.Context(".".join((getattr(settings, 'BLUEOX_NAME', ''), 'request')))
request.blueox.start()
blueox.set('method', request.method)
blueox.set('path', request.path)
headers = {}
for k,v in request.META.iteritems():
if k.startswith('HTTP_') or k in ('CONTENT_LENGTH', 'CONTENT_TYPE'):
headers[k] = v
blueox.set('headers', headers)
blueox.set('uri', request.build_absolute_uri())
blueox.set('client_ip', request.META['REMOTE_ADDR'])
for key in ('version', 'revision'):
if hasattr(request, key):
blueox.set(key, getattr(request, key))
if request.user:
blueox.set('user', request.user.id)
return None
def process_response(self, request, response):
# process_request() is not guaranteed to be called
if not hasattr(request, 'blueox'):
return response
# Other middleware may have blocked our response.
if response is not None:
blueox.set('response_status_code', response.status_code)
if not response.streaming:
blueox.set('response_size', len(response.content))
headers = {}
for k, v in response.items():
headers[k] = v
blueox.set('response_headers', headers)
request.blueox.done()
return response
def process_exception(self, request, exception):
blueox.set('exception', ''.join(traceback.format_exception(*sys.exc_info())))
return None
|
Python
| 0.000001
|
@@ -102,16 +102,24 @@
ddleware
+(object)
:%0A de
@@ -897,58 +897,145 @@
-for key in ('version'
+return None%0A%0A def process_response(self
,
-'
re
-vision'):%0A if
+quest, response):%0A # process_request() is not guaranteed to be called%0A if not
has
@@ -1048,19 +1048,24 @@
equest,
-key
+'blueox'
):%0A
@@ -1075,51 +1075,144 @@
- blueox.set(key, getattr(request, key))%0A
+return response%0A%0A # We collect some additional data in the response, just to ensure%0A # middleware ordering doesn't matter.
%0A
@@ -1219,28 +1219,40 @@
if
+hasattr(
request
-.
+, '
user
+')
:%0A
@@ -1306,131 +1306,48 @@
-return None%0A%0A def process_response(self, request
+for key in ('version'
,
+'
re
-sponse):%0A # process_request() is not guaranteed to be called%0A
+vision'):%0A
@@ -1345,36 +1345,32 @@
%0A if
-not
hasattr(request,
@@ -1366,32 +1366,27 @@
tr(request,
-'blueox'
+key
):%0A
@@ -1384,39 +1384,66 @@
-return response
+ blueox.set(key, getattr(request, key))
%0A%0A #
@@ -1523,16 +1523,17 @@
t None:%0A
+%0A
|
869b9c744c589385194c93dccf9b6b14b26b1ec9
|
Read files from local_directory instead of current dir
|
diff.py
|
diff.py
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2015 Ranger Harke
See LICENSE file for details
"""
import sys, argparse, io, os, tarfile, base
def parse_args(args=None):
if args is None:
args = sys.argv
parser = argparse.ArgumentParser(description='Diff a local tree against a remote one and generate a patch file')
parser.add_argument('local_db_file', metavar='LOCAL_DATABASE_FILE', type=str,
help='file from which to read the local checksum database')
parser.add_argument('remote_db_file', metavar='REMOTE_DATABASE_FILE', type=str,
help='file from which to read the remote checksum database')
parser.add_argument('local_directory', metavar='LOCAL_DIRECTORY', type=str,
help='directory corresponding to the local database file')
parser.add_argument('patch_file', metavar='PATCH_FILE', type=str,
help='file in which to store the resulting patch')
parser.add_argument('--ignorelist-file', metavar='IGNORELIST_FILE', type=str, dest='ignorelist_file',
help='file containing a list of shell-style patterns to ignore')
parser.add_argument('--no-diff-new', dest='diff_new', action='store_false',
help='do not generate diffs to add new files to the remote')
parser.add_argument('--diff-changed', dest='diff_changed', action='store_true',
help='generate diffs to update changed files on the remote')
parser.add_argument('--diff-deleted', dest='diff_deleted', action='store_true',
help='generate diffs to remove deleted files from the remote')
parser.add_argument('--verbose', '-v', dest='verbose', action='store_true',
help='display status messages for all operations instead of just exceptional conditions')
parser.set_defaults(ignorelist_file=None, diff_new=True, diff_changed=False,
diff_deleted=False, clean_ignored=False, verbose=False)
return parser.parse_args()
def main():
PatchGenerator(parse_args()).run()
class PatchGenerator(base.VerifierBase):
def __init__(self, args):
super(PatchGenerator, self).__init__(args)
self.local_database = {}
self.remote_database = {}
self.new = 0
self.changed = 0
self.deleted = 0
self.read_database(self.args.local_db_file, self.local_database)
self.read_database(self.args.remote_db_file, self.remote_database)
self.read_ignorelist()
def check_local_file(self, filepath):
local_entry = self.local_database[filepath]
remote_entry = self.remote_database.get(filepath)
if not remote_entry is None:
self.vlog('Existing file %s... ' % (filepath,))
if self.args.diff_changed:
if local_entry[0] != remote_entry[0]:
meta = ('replace\n' + filepath).encode('utf-8')
with io.BytesIO(meta) as metafile:
tarinfo = tarfile.TarInfo(str(self.taridx) + 'meta')
tarinfo.size = len(metafile.getvalue())
self.tarfile.addfile(tarinfo, metafile)
self.tarfile.add(filepath, arcname=str(self.taridx) + 'data')
self.taridx += 1
self.vlog('modified\n')
self.nvlog('Existing file %s changed\n' % (filepath,))
self.changed += 1
else:
self.vlog('unchanged\n')
else:
self.vlog('skipped\n')
remote_entry[1] = True
else:
self.vlog('New file %s... ' % (filepath,))
if self.args.diff_new:
meta = ('add\n' + filepath).encode('utf-8')
with io.BytesIO(meta) as metafile:
tarinfo = tarfile.TarInfo(str(self.taridx) + 'meta')
tarinfo.size = len(metafile.getvalue())
self.tarfile.addfile(tarinfo, metafile)
self.tarfile.add(filepath, arcname=str(self.taridx) + 'data')
self.taridx += 1
self.vlog('added\n')
self.nvlog('New file %s added\n' % (filepath,))
self.new += 1
else:
self.vlog('skipped\n')
def check_remote_file(self, filepath):
remote_entry = self.remote_database[filepath]
if not remote_entry[1]:
self.vlog('Deleted file %s... ' % (filepath,))
if self.args.diff_deleted:
meta = ('delete\n' + filepath).encode('utf-8')
with io.BytesIO(meta) as metafile:
tarinfo = tarfile.TarInfo(str(self.taridx) + 'meta')
tarinfo.size = len(metafile.getvalue())
self.tarfile.addfile(tarinfo, metafile)
self.taridx += 1
self.vlog('removed\n')
self.nvlog('Deleted file %s removed\n' % (filepath,))
self.deleted += 1
else:
self.vlog('skipped\n')
def run(self):
self.tarfile = tarfile.open(self.args.patch_file, 'w|gz')
self.taridx = 0
try:
for filepath in self.local_database:
if not self.match_ignorelist(filepath):
self.check_local_file(filepath)
for filepath in self.remote_database:
if not self.match_ignorelist(filepath):
self.check_remote_file(filepath)
finally:
self.tarfile.close()
self.log('\nSummary:\n')
if self.args.diff_new:
self.log(' %d new files will be added\n' % (self.new,))
if self.args.diff_changed:
self.log(' %d existing files will be updated\n' % (self.changed,))
if self.args.diff_deleted:
self.log(' %d deleted files will be removed\n' % (self.deleted,))
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -3256,32 +3256,72 @@
tarfile.add(
+os.path.join(self.args.local_directory,
filepath
, arcname=st
@@ -3300,32 +3300,33 @@
ectory, filepath
+)
, arcname=str(se
@@ -4140,24 +4140,64 @@
ile.add(
+os.path.join(self.args.local_directory,
filepath
, arcnam
@@ -4188,16 +4188,17 @@
filepath
+)
, arcnam
|
42f1d60093a5a816b24e52f088f2d4c2bc6ecb95
|
Fix logic of modulepath handler when modulepath is not defined
|
jupyterlmod/handler.py
|
jupyterlmod/handler.py
|
import json
import os
import lmod
from functools import partial, wraps
from glob import glob
from tornado import web
from jupyter_core.paths import jupyter_path
from notebook.base.handlers import IPythonHandler
def jupyter_path_decorator(func):
@wraps(func)
async def wrapper(self, *args, **kwargs):
jpath_old = os.environ.get("JUPYTER_PATH")
await func(self, *args, **kwargs)
if jpath_old != os.environ.get("JUPYTER_PATH"):
self.kernel_spec_manager.kernel_dirs = jupyter_path("kernels")
return wrapper
class Lmod(IPythonHandler):
@web.authenticated
async def get(self):
lang = self.get_query_argument(name="lang", default=None)
if lang is None:
result = await lmod.list(include_hidden=False)
elif lang == "python":
result = await lmod.freeze()
else:
raise web.HTTPError(400, u'Unknown value for lang argument')
self.finish(json.dumps(result))
@web.authenticated
@jupyter_path_decorator
async def post(self):
modules = self.get_json_body().get('modules')
if not modules:
raise web.HTTPError(400, u'modules missing from body')
elif not isinstance(modules, list):
raise web.HTTPError(400, u'modules argument needs to be a list')
await lmod.load(*modules)
self.finish(json.dumps("SUCCESS"))
@web.authenticated
@jupyter_path_decorator
async def delete(self):
modules = self.get_json_body().get('modules')
if not modules:
raise web.HTTPError(400, u'modules missing from body')
elif not isinstance(modules, list):
raise web.HTTPError(400, u'modules argument needs to be a list')
await lmod.unload(*modules)
self.finish(json.dumps("SUCCESS"))
class LmodModules(IPythonHandler):
@web.authenticated
async def get(self):
result = await lmod.avail()
self.finish(json.dumps(result))
class LmodModule(IPythonHandler):
@web.authenticated
async def get(self, module=None):
result = await lmod.show(module)
self.finish(json.dumps(result))
class LmodCollections(IPythonHandler):
@web.authenticated
async def get(self):
result = await lmod.savelist()
self.finish(json.dumps(result))
@web.authenticated
async def post(self):
name = self.get_json_body().get('name')
if not name:
raise web.HTTPError(400, u'name argument missing')
await lmod.save(name)
self.finish(json.dumps("SUCCESS"))
@web.authenticated
@jupyter_path_decorator
async def patch(self):
name = self.get_json_body().get('name')
if not name:
raise web.HTTPError(400, u'name argument missing')
await lmod.restore(name)
self.finish(json.dumps("SUCCESS"))
class LmodPaths(IPythonHandler):
@web.authenticated
async def get(self):
result = os.environ.get("MODULEPATH", "").split(':')
self.finish(json.dumps(result))
@web.authenticated
@jupyter_path_decorator
async def post(self):
paths = self.get_json_body().get('paths')
append = self.get_json_body().get('append', False)
if not paths:
raise web.HTTPError(400, u'paths argument missing')
await lmod.use(*paths, append=append)
self.finish(json.dumps("SUCCESS"))
@web.authenticated
@jupyter_path_decorator
async def delete(self):
paths = self.get_json_body().get('paths')
if not paths:
raise web.HTTPError(400, u'paths argument missing')
await lmod.unuse(*paths)
self.finish(json.dumps("SUCCESS"))
class FoldersHandler(IPythonHandler):
@web.authenticated
async def get(self, path):
result = glob(path + "*/")
result = [path[:-1] for path in result]
self.finish(json.dumps(result))
default_handlers = [
(r"/lmod", Lmod),
(r"/lmod/modules", LmodModules),
(r"/lmod/modules/(.*)", LmodModule),
(r"/lmod/collections", LmodCollections),
(r"/lmod/paths", LmodPaths),
(r"/lmod/folders/(.*)", FoldersHandler)
]
|
Python
| 0.000002
|
@@ -2996,24 +2996,117 @@
ATH%22
-, %22%22).split(':')
+)%0A if result is not None:%0A result = result.split(%22:%22)%0A else:%0A result = %5B%5D
%0A
|
a53d54d9a446147cc115b3329722e1e51e9aff3e
|
remove locks for pulling difficulty to stop causing api crashing sooner
|
diff.py
|
diff.py
|
#License#
#bitHopper by Colin Rice is licensed under a Creative Commons
# Attribution-NonCommercial-ShareAlike 3.0 Unported License.
#Based on a work at github.com.
import re
import eventlet
from eventlet.green import threading, socket, urllib2
# Global timeout for sockets in case something leaks
socket.setdefaulttimeout(900)
class Difficulty():
"Stores difficulties and automaticlaly updates them"
def __init__(self, bitHopper):
self.bitHopper = bitHopper
self.difficulty = 1777774.4820015
self.nmc_difficulty = 94037.96
self.ixc_difficulty = 16384
self.i0c_difficulty = 1372
self.scc_difficulty = 5354
self.lock = threading.RLock()
eventlet.spawn_n(self.update_difficulty)
def get_difficulty(self):
with self.lock:
return self.difficulty
def get_nmc_difficulty(self):
with self.lock:
return self.nmc_difficulty
def get_ixc_difficulty(self):
with self.lock:
return self.ixc_difficulty
def get_i0c_difficulty(self):
with self.lock:
return self.i0c_difficulty
def get_scc_difficulty(self):
with self.lock:
return self.scc_difficulty
def updater(self, coin, url_diff, diff_attr, reg_exp = None):
# Generic method to update the difficulty of a given currency
self.bitHopper.log_msg('Updating Difficulty of ' + coin)
try:
useragent = {'User-Agent': self.bitHopper.config.get('main', 'work_user_agent')}
req = urllib2.Request(url_diff, headers = useragent)
response = urllib2.urlopen(req)
if reg_exp == None:
output = response.read()
else:
diff_str = response.read()
output = re.search(reg_exp, diff_str)
output = output.group(1)
self.__dict__[diff_attr] = float(output)
self.bitHopper.log_dbg('Retrieved Difficulty: ' + str(self.__dict__[diff_attr]))
except Exception, e:
self.bitHopper.log_dbg('Unable to update difficulty for ' + coin + ': ' + str(e))
pass
def update_difficulty(self):
while True:
"Tries to update difficulty from the internet"
with self.lock:
self.updater("Bitcoin", 'http://blockexplorer.com/q/getdifficulty', 'difficulty')
self.updater("Namecoin", 'http://namebit.org/', 'nmc_difficulty', '<td id="difficulty">([.0-9]+)</td>')
self.updater("SolidCoin", 'http://solidcoin.whmcr.co.uk/chain/SolidCoin?count=1', 'scc_difficulty', '<td>\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}</td><td>\d{1,}</td><td>[.0-9]+</td><td>([.0-9]+)</td>')
self.updater("IXcoin", 'http://allchains.info', 'ixc_difficulty', "ixc </td><td align='right'> ([0-9]+) </td><td align='right'> [.0-9]+ </td>")
self.updater("I0coin", 'http://allchains.info', 'i0c_difficulty', "i0c </td><td align='right'> ([0-9]+) </td><td align='right'> [.0-9]+ </td>")
eventlet.sleep(60*60*6)
|
Python
| 0
|
@@ -780,36 +780,8 @@
f):%0A
- with self.lock:%0A
@@ -850,36 +850,8 @@
f):%0A
- with self.lock:%0A
@@ -920,36 +920,8 @@
f):%0A
- with self.lock:%0A
@@ -994,36 +994,8 @@
f):%0A
- with self.lock:%0A
@@ -1068,36 +1068,8 @@
f):%0A
- with self.lock:%0A
@@ -2018,25 +2018,8 @@
(e))
-%0A pass
%0A%0A
|
a3ff6773651eafdf2c51165cac4c6a43f2aa677b
|
simplify the channel handling in containers
|
bioconda_utils/pkg_test.py
|
bioconda_utils/pkg_test.py
|
import subprocess as sp
import tempfile
import tarfile
import os
import shlex
from shutil import which
import logging
from . import utils
from conda_build.metadata import MetaData
logger = logging.getLogger(__name__)
# Use Miniconda 4.3.27 for now to avoid running into an issue in
# 'conda >4.4.7,<4.4.11': https://github.com/conda/conda/issues/6811
# TODO: Make this configurable in bioconda_utils.build and bioconda_utils.cli.
MULLED_CONDA_IMAGE = "continuumio/miniconda3:4.3.27"
def get_tests(path):
"Extract tests from a built package"
tmp = tempfile.mkdtemp()
t = tarfile.open(path)
t.extractall(tmp)
input_dir = os.path.join(tmp, 'info', 'recipe')
tests = []
recipe_meta = MetaData(input_dir)
tests_commands = recipe_meta.get_value('test/commands')
tests_imports = recipe_meta.get_value('test/imports')
requirements = recipe_meta.get_value('requirements/run')
if tests_imports or tests_commands:
if tests_commands:
tests.append(' && '.join(tests_commands))
if tests_imports and 'python' in requirements:
tests.append(
' && '.join('python -c "import %s"' % imp
for imp in tests_imports)
)
elif tests_imports and (
'perl' in requirements or 'perl-threaded' in requirements
):
tests.append(
' && '.join('''perl -e "use %s;"''' % imp
for imp in tests_imports)
)
tests = ' && '.join(tests)
tests = tests.replace('$R ', 'Rscript ')
# this is specific to involucro, the way how we build our containers
tests = tests.replace('$PREFIX', '/usr/local')
tests = tests.replace('${PREFIX}', '/usr/local')
return tests
def get_image_name(path):
"""
Returns name of generated docker image.
Parameters
----------
path : str
Path to .tar.by2 package build by conda-build
"""
assert path.endswith('.tar.bz2')
pkg = os.path.basename(path).replace('.tar.bz2', '')
toks = pkg.split('-')
build_string = toks[-1]
version = toks[-2]
name = '-'.join(toks[:-2])
spec = '%s=%s--%s' % (name, version, build_string)
return spec
def test_package(
path,
name_override=None,
channels=("conda-forge", "local", "bioconda", "defaults"),
mulled_args="",
base_image=None,
conda_image=MULLED_CONDA_IMAGE,
):
"""
Tests a built package in a minimal docker container.
Parameters
----------
path : str
Path to a .tar.bz2 package built by conda-build
name_override : str
Passed as the --name-override argument to mulled-build
channels : list
List of Conda channels to use. Must include an entry "local" for the
local build channel.
mulled_args : str
Mechanism for passing arguments to the mulled-build command. They will
be split with shlex.split and passed to the mulled-build command. E.g.,
mulled_args="--dry-run --involucro-path /opt/involucro"
base_image : None | str
Specify custom base image. Busybox is used in the default case.
conda_image : None | str
Conda Docker image to install the package with during the mulled based
tests.
"""
assert path.endswith('.tar.bz2'), "Unrecognized path {0}".format(path)
# assert os.path.exists(path), '{0} does not exist'.format(path)
conda_bld_dir = os.path.abspath(os.path.dirname(os.path.dirname(path)))
sp.check_call([utils.bin_for('conda'), 'index', os.path.dirname(path)])
# always build noarch index to make conda happy
sp.check_call([utils.bin_for('conda'), 'index', os.path.join(conda_bld_dir, "noarch")])
spec = get_image_name(path)
if "local" not in channels:
raise ValueError('"local" must be in channel list')
channel, *extra_channels = [
'file://{0}'.format(conda_bld_dir) if channel == 'local' else 'channel'
for channel in channels
]
# '--channel' currently defaults to 'bioconda' in mulled-build and is
# prepended to all remaining channels. To prevent this, i.e., to give
# 'conda-forge' a higher priority, we use '--channel' explicitly here.
channel_args = ['--channel', channel, '--extra-channels', ','.join(extra_channels)]
tests = get_tests(path)
logger.debug('Tests to run: %s', tests)
cmd = [
'mulled-build',
'build-and-test',
spec,
'-n', 'biocontainers',
'--test', tests
]
if name_override:
cmd += ['--name-override', name_override]
cmd += channel_args
cmd += shlex.split(mulled_args)
# galaxy-lib always downloads involucro, unless it's in cwd or its path is explicitly given.
# TODO: This should go into galaxy-lib. Once it is fixed upstream, remove this here.
involucro_path = which('involucro')
if involucro_path:
cmd += ['--involucro-path', involucro_path]
logger.debug('mulled-build command: %s' % cmd)
env = os.environ.copy()
if base_image is not None:
env["DEST_BASE_IMAGE"] = base_image
env["CONDA_IMAGE"] = conda_image
with tempfile.TemporaryDirectory() as d:
with utils.Progress():
p = utils.run(cmd, env=env, cwd=d, mask=False)
return p
|
Python
| 0.000012
|
@@ -3876,24 +3876,8 @@
-channel, *extra_
chan
@@ -3881,24 +3881,24 @@
hannels = %5B%0A
+
'fil
@@ -4007,231 +4007,8 @@
%5D%0A
- # '--channel' currently defaults to 'bioconda' in mulled-build and is%0A # prepended to all remaining channels. To prevent this, i.e., to give%0A # 'conda-forge' a higher priority, we use '--channel' explicitly here.%0A
@@ -4030,36 +4030,8 @@
%5B'--
-channel', channel, '--extra-
chan
@@ -4050,14 +4050,8 @@
oin(
-extra_
chan
|
d21843d91132aa29ae56e4f52b77dd51180bfe41
|
Fix getFutureLastStep usage
|
tests/auto/tsp_stub_and_stdout_test.py
|
tests/auto/tsp_stub_and_stdout_test.py
|
#!/usr/bin/env python
import logging
import os
import time
import threading
import getopt, sys
import dtest
def usage():
print "Usage:\n %s --provider=<user>@[<host>]:<stub_path> --consumer=<user>@[<host>]:<stdout_path>" % sys.argv[0]
def getUserHostPath(argument):
if argument.find("@") != -1:
(user,argument) = argument.split("@",1)
else:
user = os.environ["USER"]
if argument.find(":") != -1:
(host,path) = argument.split(":",1)
else:
host = "localhost"
path = argument
retval = dict()
retval['user'] = user
retval['host'] = host
retval['path'] = path
return retval
try:
opts, args = getopt.getopt(sys.argv[1:], "p:c:", ["provider=", "consumer="])
except getopt.GetoptError, err:
print >> stderr, "opt = %s, msg = %s" % (err.opt,err.msg)
usage()
sys.exit(2)
if len(opts) < 2:
usage()
sys.exit(2)
for o, a in opts:
if o in ("-p", "--provider"):
stub_param = getUserHostPath(a)
if o in ("-c", "--consumer"):
stdout_param = getUserHostPath(a)
stdout = dtest.DTester("tsp_stdout",
session=dtest.SSHSessionHandler(stdout_param['user'],host=stdout_param['host']))
stub = dtest.DTester("tsp_stubbed_server",
session=dtest.SSHSessionHandler(stdout_param['user'],host=stdout_param['host']))
# you may change the default time out value
stub.timeout = 8
# you add want to save the output of your dtester to a file.
stub.stdout = file(stub.name + ".out",'w+')
stub.stdin = file(stub.name + ".in",'w+')
stdout.stdout = file(stdout.name + ".out",'w+')
#stdout.stdin = file(stdout.name + ".in",'w+')
dtest.DTester.logger.setLevel(level=logging.WARNING)
stub.addRunStep("ok",True,"TSP Stub and Stdout Starts")
stub.addRunStep("runCommand",command=stub_param['path'])
stub.addRunStep("expectFromCommand",pattern="TSP Provider on PID")
stub.addRunStep("barrier","provider started")
stdout.addRunStep("barrier","provider started")
stdout.addRunStep("runCommand",command=stdout_param['path']+" -u rpc://"+stub.session.host)
stdout.addRunStep("expectFromCommand",pattern="tsp_stdout_client: Using provider URL")
stdout.addRunStep("expectFromCommand",pattern="End of Test OK")
stdout.addRunStep("ok",stdout.getFutureLastStepStatus(),"First stdout run (no args)")
stdout.addRunStep("terminateCommand")
stdout.addRunStep("waitCommandTermination")
stdout.addRunStep("runCommand",command=stdout_param['path']+" -n 150 -u rpc://"+stub.session.host)
stdout.addRunStep("expectFromCommand",pattern="tsp_stdout_client: Using provider URL")
stdout.addRunStep("expectFromCommand",pattern="End of Test OK")
stdout.addRunStep("ok",stdout.getFutureLastStepStatus(),"Second stdout run (-n 150) ")
stdout.addRunStep("terminateCommand")
stdout.addRunStep("waitCommandTermination")
stdout.addRunStep("barrier","consumer ended",timeout=1)
stub.addRunStep("barrier","consumer ended")
stub.addRunStep("terminateCommand")
stub.addRunStep("waitCommandTermination")
stub.addRunStep("ok",True,"TSP Stub and Stdout Ends")
# Here begins the test
dtest.DTestMaster.logger.setLevel(level=logging.WARNING)
dtest.DTester.logger.setLevel(level=logging.WARNING)
dtest.SSHSessionHandler.logger.setLevel(level=logging.WARNING)
def goTest():
myDTestMaster = dtest.DTestMaster("TSP Stub Server/Stdout Test",description="This TSP test sequence launch a stubbed server and an stdout client")
myDTestMaster.timeout = 40
myDTestMaster.register(stdout)
myDTestMaster.register(stub)
myDTestMaster.startTestSequence()
myDTestMaster.waitTestSequenceEnd()
goTest()
|
Python
| 0
|
@@ -1706,30 +1706,28 @@
vel=logging.
-WARNIN
+DEBU
G)%0A%0Astub.add
@@ -2288,18 +2288,16 @@
epStatus
-()
,%22First
@@ -2704,18 +2704,16 @@
epStatus
-()
,%22Second
|
80bdb7748524fb2a01f3f042e11071827302186e
|
Add Controller, Actuator, and ControlledProcess items
|
gaphor/RAAML/stpa/stpatoolbox.py
|
gaphor/RAAML/stpa/stpatoolbox.py
|
"""The definition for the STPA section of the RAAML toolbox."""
from gaphor.diagram.diagramtoolbox import (
ToolDef,
ToolSection,
default_namespace,
namespace_config,
)
from gaphor.diagram.diagramtools import new_item_factory
from gaphor.i18n import gettext
from gaphor.RAAML import diagramitems, raaml
from gaphor.SysML import diagramitems as sysml_items
from gaphor.UML import diagramitems as uml_items
def loss_config(new_item):
default_namespace(new_item)
new_item.subject.name = "Loss"
def hazard_config(new_item):
default_namespace(new_item)
new_item.subject.name = "Hazard"
def abstract_operational_situation_config(new_item):
default_namespace(new_item)
new_item.subject.name = "AbstractOperationalSituation"
stpa = ToolSection(
"STPA",
(
ToolDef(
"toolbox-generalization",
gettext("Generalization"),
"gaphor-generalization-symbolic",
"<Shift>G",
new_item_factory(uml_items.GeneralizationItem),
),
ToolDef(
"loss",
gettext("Loss"),
"gaphor-loss-symbolic",
"<Shift>L",
new_item_factory(
sysml_items.BlockItem, raaml.Loss, config_func=loss_config
),
),
ToolDef(
"hazard",
gettext("Hazard"),
"gaphor-hazard-symbolic",
"<Shift>H",
new_item_factory(
sysml_items.BlockItem, raaml.Hazard, config_func=hazard_config
),
),
ToolDef(
"situation",
gettext("Situation"),
"gaphor-situation-symbolic",
"s",
new_item_factory(
sysml_items.BlockItem,
raaml.Situation,
config_func=namespace_config,
),
),
ToolDef(
"control-structure",
gettext("Control Structure"),
"gaphor-control-structure-symbolic",
"f",
new_item_factory(
sysml_items.BlockItem,
raaml.ControlStructure,
config_func=namespace_config,
),
),
ToolDef(
"controller",
gettext("Controller"),
"gaphor-controller-symbolic",
"w",
new_item_factory(uml_items.ClassItem),
),
ToolDef(
"actuator",
gettext("Actuator"),
"gaphor-actuator-symbolic",
"q",
new_item_factory(uml_items.ClassItem),
),
ToolDef(
"controlled-process",
gettext("Controlled Process"),
"gaphor-controlled-process-symbolic",
"<Shift>P",
new_item_factory(uml_items.ClassItem),
),
ToolDef(
"abstract-operational-situation",
gettext("Abstract Operational Situation"),
"gaphor-abstract-operational-situation-symbolic",
"<Shift>J",
new_item_factory(
diagramitems.OperationalSituationItem,
raaml.AbstractOperationalSituation,
config_func=abstract_operational_situation_config,
),
),
ToolDef(
"operational-situation",
gettext("Operational Situation"),
"gaphor-operational-situation-symbolic",
"<Shift>O",
new_item_factory(
diagramitems.OperationalSituationItem,
raaml.OperationalSituation,
config_func=namespace_config,
),
),
ToolDef(
"unsafe-control-action",
gettext("Unsafe Control Action"),
"gaphor-unsafe-control-action-symbolic",
"u",
new_item_factory(
diagramitems.UnsafeControlActionItem,
raaml.UnsafeControlAction,
config_func=namespace_config,
),
),
ToolDef(
"relevant-to",
gettext("Relevant To"),
"gaphor-relevant-to-symbolic",
"r",
new_item_factory(
diagramitems.RelevantToItem,
raaml.RelevantTo,
),
),
ToolDef(
"control-action",
gettext("Control Action"),
"gaphor-control-action-symbolic",
"<Shift>M",
new_item_factory(
diagramitems.ControlActionItem,
raaml.ControlAction,
config_func=namespace_config,
),
),
),
)
|
Python
| 0
|
@@ -2369,35 +2369,118 @@
factory(
-uml_items.ClassItem
+%0A sysml_items.PropertyItem, raaml.Controller, config_func=namespace_config%0A
),%0A
@@ -2645,35 +2645,116 @@
factory(
-uml_items.ClassItem
+%0A sysml_items.PropertyItem, raaml.Actuator, config_func=namespace_config%0A
),%0A
@@ -2960,27 +2960,150 @@
ory(
-uml_items.ClassItem
+%0A sysml_items.PropertyItem,%0A raaml.ControlledProcess,%0A config_func=namespace_config,%0A
),%0A
|
d1b1a6d845419b5c1b8bec3d7f3bded83cf6c9a1
|
Fix ObjectNodeItem upperBound visibility
|
gaphor/UML/actions/objectnode.py
|
gaphor/UML/actions/objectnode.py
|
"""Object node item."""
from gaphor import UML
from gaphor.core.modeling.properties import attribute
from gaphor.diagram.presentation import ElementPresentation, Named
from gaphor.diagram.shapes import Box, EditableText, IconBox, Text, draw_border
from gaphor.diagram.support import represents
from gaphor.UML.modelfactory import stereotypes_str
DEFAULT_UPPER_BOUND = "*"
@represents(UML.ObjectNode)
class ObjectNodeItem(ElementPresentation, Named):
"""Representation of object node. Object node is ordered and has upper
bound specification.
Ordering information can be hidden by user.
"""
def __init__(self, diagram, id=None):
super().__init__(
diagram,
id,
shape=IconBox(
Box(
Text(
text=lambda: stereotypes_str(self.subject),
),
EditableText(text=lambda: self.subject.name or ""),
style={
"min-width": 50,
"min-height": 30,
"padding": (5, 10, 5, 10),
},
draw=draw_border,
),
Text(
text=lambda: self.subject.upperBound
not in (None, DEFAULT_UPPER_BOUND)
and f"{{ upperBound = {self.subject.upperBound} }}",
),
Text(
text=lambda: self.show_ordering
and self.subject.ordering
and f"{{ ordering = {self.subject.ordering} }}"
or "",
),
),
)
self.watch("subject[NamedElement].name")
self.watch("subject.appliedStereotype.classifier.name")
self.watch("subject[ObjectNode].upperBound")
self.watch("subject[ObjectNode].ordering")
self.watch("show_ordering")
show_ordering: attribute[bool] = attribute("show_ordering", bool, False)
|
Python
| 0.000003
|
@@ -1301,16 +1301,20 @@
n (None,
+ %22%22,
DEFAULT
@@ -1398,16 +1398,42 @@
und%7D %7D%7D%22
+%0A or %22%22
,%0A
|
2ad8f039112977c4dfdf4389586df74263d753b6
|
Handle certificate in shaker.py
|
functest/opnfv_tests/openstack/shaker/shaker.py
|
functest/opnfv_tests/openstack/shaker/shaker.py
|
#!/usr/bin/env python
# Copyright (c) 2018 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
"""
Shaker_ wraps around popular system network testing tools like iperf, iperf3
and netperf (with help of flent). Shaker is able to deploy OpenStack instances
and networks in different topologies. Shaker scenario specifies the deployment
and list of tests to execute.
.. _Shaker: http://pyshaker.readthedocs.io/en/latest/
"""
import logging
import os
import scp
from functest.core import singlevm
class Shaker(singlevm.SingleVm2):
"""Run shaker full+perf l2 and l3"""
# pylint: disable=too-many-instance-attributes
__logger = logging.getLogger(__name__)
filename = '/home/opnfv/functest/images/shaker-image.qcow2'
flavor_ram = 512
flavor_vcpus = 1
flavor_disk = 3
username = 'ubuntu'
port = 9000
ssh_connect_loops = 12
def prepare(self):
super(Shaker, self).prepare()
self.cloud.create_security_group_rule(
self.sec.id, port_range_min=self.port, port_range_max=self.port,
protocol='tcp', direction='ingress')
def execute(self):
"""
Returns:
- 0 if success
- 1 on operation error
"""
assert self.ssh
keystone_id = self.orig_cloud.search_services('keystone')[0].id
self.__logger.debug("keystone id: %s", keystone_id)
endpoint = self.orig_cloud.search_endpoints(
filters={'interface': 'public',
'service_id': keystone_id})[0].url
self.__logger.debug("keystone endpoint: %s", endpoint)
self.orig_cloud.grant_role(
"admin", user=self.project.user.id,
project=self.project.project.id,
domain=self.project.domain.id)
self.orig_cloud.grant_role(
"heat_stack_owner", user=self.project.user.id,
project=self.project.project.id,
domain=self.project.domain.id)
scpc = scp.SCPClient(self.ssh.get_transport())
scpc.put('/home/opnfv/functest/conf/env_file', remote_path='~/')
(_, stdout, stderr) = self.ssh.exec_command(
'source ~/env_file && '
'export OS_INTERFACE=public && '
'export OS_AUTH_URL={} && '
'export OS_USERNAME={} && '
'export OS_PROJECT_NAME={} && '
'export OS_PASSWORD={} && '
'env && '
'shaker --image-name {} --flavor-name {} '
'--server-endpoint {}:9000 --scenario '
'openstack/full_l2,'
'openstack/full_l3_east_west,'
'openstack/full_l3_north_south,'
'openstack/perf_l3_north_south '
'--report report.html --output report.json'.format(
endpoint, self.project.user.name, self.project.project.name,
self.project.password, self.image.name, self.flavor.name,
self.fip.floating_ip_address))
self.__logger.info("output:\n%s", stdout.read())
self.__logger.info("error:\n%s", stderr.read())
if not os.path.exists(self.res_dir):
os.makedirs(self.res_dir)
try:
scpc.get('report.json', self.res_dir)
scpc.get('report.html', self.res_dir)
except scp.SCPException:
self.__logger.exception("cannot get report files")
return 1
return stdout.channel.recv_exit_status()
|
Python
| 0
|
@@ -2287,16 +2287,133 @@
h='~/')%0A
+ if os.environ.get('OS_CACERT'):%0A scpc.put(os.environ.get('OS_CACERT'), remote_path='~/os_cacert')%0A
@@ -2457,16 +2457,16 @@
ommand(%0A
-
@@ -2694,32 +2694,49 @@
ASSWORD=%7B%7D && '%0A
+ '%7B%7D'%0A
'env
@@ -3151,24 +3151,24 @@
oject.name,%0A
-
@@ -3193,16 +3193,144 @@
assword,
+%0A 'export OS_CACERT=~/os_cacert && ' if os.environ.get(%0A 'OS_CACERT') else '',%0A
self.im
|
5a2500f0f74321fffa02ab405f1b440874f2309e
|
Add jaxb directory as license check exception.
|
tools/licensescheck.py
|
tools/licensescheck.py
|
#!/usr/bin/python
import os, sys, re
# Path to eng checkout root directory. To use this as a git pre-commit
# hook, copy this script to .git/hooks/pre-commit, set basepath as the
# absolute path to your volt checkout and set ascommithook to true.
basepath = "../"
ascommithook = False
prunelist = ('hsqldb19b3',
'hsqldb',
'jetty716',
'proj_gen',
'jni_md.h',
'jni.h',
'org_voltdb_jni_ExecutionEngine.h',
'org_voltdb_utils_DBBPool.h',
'org_voltdb_utils_DBBPool_DBBContainer.h',
'xml2',
'simplejson',
'projectfile',
'deploymentfile',
'xml',
'helloworld',
'CSVReader.java')
def verifyLicense(f, content, approvedLicensesJavaC, approvedLicensesPython):
if f.endswith('.py'):
if not content.startswith("#"):
if content.lstrip().startswith("#"):
print "ERROR: \"%s\" contains whitespace before initial comment." % f
return 1
else:
print "ERROR: \"%s\" does not begin with a comment." % f
return 1
# skip hashbang
if content.startswith("#!"):
(ignore, content) = content.split("\n", 1)
content = content.lstrip()
# skip python coding magic
if content.startswith("# -*-"):
(ignore, content) = content.split("\n", 1)
content = content.lstrip()
# verify license
for license in approvedLicensesPython:
if content.startswith(license):
return 0
print "ERROR: \"%s\" does not start with an approved license." % f
else:
if not content.startswith("/*"):
if content.lstrip().startswith("/*"):
print "ERROR: \"%s\" contains whitespace before initial comment." % f
else:
print "ERROR: \"%s\" does not begin with a comment." % f
return 1
for license in approvedLicensesJavaC:
if content.startswith(license):
return 0
print "ERROR: \"%s\" does not start with an approved license." % f
return 1
def verifyTrailingWhitespace(f, content):
if re.search(r'[\t\f\v ]\n', content):
print("ERROR: \"%s\" contains trailing whitespace." % (f))
return 1
return 0
def verifyTabs(f, content):
num = content.count('\t')
if num > 0:
print("ERROR: \"%s\" contains %d tabs." % (f, num))
return 1
return 0
def verifySprintf(f, content):
num = content.count('sprintf')
if num > 0:
print("ERROR: \"%s\" contains %d calls to sprintf(). Use snprintf()." % (f, num))
return 1
return 0
def readFile(filename):
"read a file into a string"
FH=open(filename, 'r')
fileString = FH.read()
FH.close()
return fileString
def processFile(f, approvedLicensesJavaC, approvedLicensesPython):
for suffix in ('.java', '.cpp', '.cc', '.h', '.hpp', '.py'):
if f.endswith(suffix):
break
else:
return 0
content = readFile(f)
retval = verifyLicense(f, content, approvedLicensesJavaC, approvedLicensesPython)
if retval != 0:
return retval
retval = verifyTabs(f, content)
if retval != 0:
return retval
retval = verifyTrailingWhitespace(f, content)
if (retval != 0):
return retval
retval = verifySprintf(f, content)
if (retval != 0):
return retval
return 0
def processAllFiles(d, approvedLicensesJavaC, approvedLicensesPython):
files = os.listdir(d)
errcount = 0
for f in [f for f in files if not f.startswith('.') and f not in prunelist]:
fullpath = os.path.join(d,f)
if os.path.isdir(fullpath):
errcount += processAllFiles(fullpath, approvedLicensesJavaC, approvedLicensesPython)
else:
errcount += processFile(fullpath, approvedLicensesJavaC, approvedLicensesPython)
return errcount
testLicenses = [basepath + 'tools/approved_licenses/mit_x11_hstore_and_voltdb.txt',
basepath + 'tools/approved_licenses/mit_x11_evanjones_and_voltdb.txt',
basepath + 'tools/approved_licenses/mit_x11_michaelmccanna_and_voltdb.txt',
basepath + 'tools/approved_licenses/mit_x11_voltdb.txt']
srcLicenses = [basepath + 'tools/approved_licenses/gpl3_hstore_and_voltdb.txt',
basepath + 'tools/approved_licenses/gpl3_evanjones_and_voltdb.txt',
basepath + 'tools/approved_licenses/gpl3_base64_and_voltdb.txt',
basepath + 'tools/approved_licenses/gpl3_voltdb.txt']
testLicensesPy = [basepath + 'tools/approved_licenses/mit_x11_voltdb_python.txt']
srcLicensesPy = [basepath + 'tools/approved_licenses/gpl3_voltdb_python.txt']
errcount = 0
errcount += processAllFiles(basepath + "src",
tuple([readFile(f) for f in srcLicenses]),
tuple([readFile(f) for f in srcLicensesPy]))
errcount += processAllFiles(basepath + "tests",
tuple([readFile(f) for f in testLicenses]),
tuple([readFile(f) for f in testLicensesPy]))
errcount += processAllFiles(basepath + "examples",
tuple([readFile(f) for f in testLicenses]),
tuple([readFile(f) for f in testLicensesPy]))
if errcount == 0:
print "SUCCESS. Found 0 license text errors, 0 files containing tabs or trailing whitespace."
else:
print "FAILURE. Found %d license text or whitespace errors." % errcount
# run through any other source the caller wants checked
# assumes a single valid license in $repo/tools/approved_licenses/license.txt
# "${voltpro}" is the build.xml property - can be seen as a literal if the
# property is not set.
if not ascommithook:
for arg in sys.argv[1:]:
if arg != "${voltpro}":
print "Checking additional repository: " + arg;
proLicenses = ["../" + arg + '/tools/approved_licenses/license.txt']
proLicensesPy = ["../" + arg + '/tools/approved_licenses/license_python.txt']
errcount = 0
errcount += processAllFiles("../" + arg + "/src/",
tuple([readFile(f) for f in proLicenses]),
tuple([readFile(f) for f in proLicensesPy]))
errcount += processAllFiles("../" + arg + "/tests/",
tuple([readFile(f) for f in proLicenses]),
tuple([readFile(f) for f in proLicensesPy]))
if errcount == 0:
print "SUCCESS. Found 0 license text errors, 0 files containing tabs or trailing whitespace."
else:
print "FAILURE (%s). Found %d license text or whitespace errors." % (arg, errcount)
sys.exit(errcount)
|
Python
| 0
|
@@ -759,16 +759,37 @@
er.java'
+,%0A 'jaxb'
)%0A%0Adef v
|
75e941f8010ebc916fa9b38cb40c49b5dbbdea12
|
fix typo preventing state consistency
|
Hydraulics/attract_manager.py
|
Hydraulics/attract_manager.py
|
import logging
import time
from threading import Thread
import event_manager
import hydraulics_drv
gAttractModeTimeout = 30 # seconds
gAutoAttractModeEnabled = False
gOldPos = [0,0,0]
gMaxDelta = 20 # 0-4095 scale
gAttractModeStartTime = 0
gInAttractMode = False
gAttractMonitorThread = None
gOriginalDriverInput = "controller"
gInterruptable = True
isRunning = True
logger = logging.getLogger("hydraulics")
def init(timeout = 30, autoEnable=False, delta=20):
global gAttractModeStartTime
global gAttractMonitorThread
logger.info("Attract Manager Init")
attractModeTimeout(timeout)
attractModeDelta(delta)
event_manager.addListener(eventHandler, "pos")
gAttractModeStartTime = time.time() + gAttractModeTimeout
gAttractMonitorThread = Thread(target=attractModeMonitor)
gAttractMonitorThread.start()
def shutdown():
global isRunning
global gAttractMonitorThread
logger.info("Attract Manager Shutdown")
isRunning = False
if gAttractMonitorThread != None:
gAttractMonitorThread.join()
gAttractMonitorThread = None
def inAttractMode():
return gInAttractMode
# interruptable is sort of a mode. You've got
# - autoattract
# - one shot attract
# - continuous attract
# - attract on/off (separate switch)
# So maybe I should have a way of setting that?
# Who controls? Us. Event manager can handle the autoattract
# Other issues:
# - Attract mode recovery time. How long does it take to recover from a movement?
# What do we do to get the sculpture into position? Do we wait until get gets close?
# (would have to change the playback manager)
def startAttractMode(interruptable=True):
global gInattractMode
global gAttractModeTimeout
global gOriginalDriverInput
global gInterruptable
gInAttractMode = True
logger.info("Starting attract mode")
gOriginalDriverInput = hydraulics_drv.getInputSource()
hydraulics_drv.setInputSource("recording")
gAttractModeTimeout = 0
gInterruptable = interruptable
def interruptable(tf=None):
global gInterruptable
if tf == None:
return gInterruptable
else:
gInterruptable = tf
def stopAttractMode():
global gInAttractMode
logger.info("leaving attract mode")
hydraulics_drv.setInputSource(gOriginalDriverInput)
gInAttractMode = False
def attractModeTimeout(timeout=None): # time in seconds
global gAttractModeTimeout
global gAttractModeStartTime
if timeout == None:
return gAttractModeTimeout
else:
gAttractModeTimeout = timeout # XXX check numeric
gAttractModeStartTime = time.time() + timeout
def autoAttractModeEnabled(tf=None):
global gAutoAttractModeEnabled
if tf == None:
return gAutoAttractModeEnabled
else:
gAutoAttractModeEnabled = (tf == True)
def attractModeDelta(delta=None):
global gMaxDelta
if delta == None:
return gMaxDelta
else:
gMaxDelta = delta # XXX check numeric
def eventHandler(msg):
global gInAttractMode
global gAttractModeStartTime
if msg["msgType"] == "pos":
x = msg["x"]
y = msg["y"]
z = msg["z"]
# XXX - do I want to add a low-pass filter?
if ((abs(x - gOldPos[0]) > gMaxDelta) or
(abs(y - gOldPos[1]) > gMaxDelta) or
(abs(z - gOldPos[2]) > gMaxDelta)):
logger.debug("Position has changed. Resetting attract mode position and timeout")
gOldPos[0] = x
gOldPos[1] = y
gOldPos[2] = z
gAttractModeStartTime = time.time() + gAttractModeTimeout
if gInAttractMode and gInterruptable:
stopAttractMode()
def attractModeMonitor():
while isRunning:
if (time.time() > gAttractModeStartTime and not gInAttractMode):
startAttractMode()
time.sleep(1)
if __name__ == "__main__":
try:
logging.basicConfig(format='%(asctime)-15s %(levelname)s %(module)s %(lineno)d: %(message)s', level=logging.DEBUG)
event_manager.init()
hydraulics_drv.init()
init(2, True)
event_manager.postEvent({"msgType":"pos", "x":50, "y":50, "z":50})
time.sleep(5)
if gInAttractMode:
logger.debug("SUCCESS! In attract mode!")
else:
logger.debug("FAILURE! Not in attract mode")
event_manager.postEvent({"msgType":"pos", "x":500, "y":50, "z":50})
time.sleep(1)
if not gInAttractMode:
logger.debug("SUCCESS! Not in attract mode!")
else:
logger.debug("FAILURE! In attract mode")
except Exception:
logger.exception("Unexpect failure in test")
hydraulics_drv.shutdown()
event_manager.shutdown()
shutdown()
|
Python
| 0.000624
|
@@ -1709,17 +1709,17 @@
obal gIn
-a
+A
ttractMo
|
0d7add686605d9d86e688f9f65f617555282ab60
|
Add debugging CLI hook for email sending
|
opwen_email_server/backend/email_sender.py
|
opwen_email_server/backend/email_sender.py
|
from typing import Tuple
from opwen_email_server import azure_constants as constants
from opwen_email_server import config
from opwen_email_server.services.queue import AzureQueue
from opwen_email_server.services.sendgrid import SendgridEmailSender
QUEUE = AzureQueue(account=config.QUEUES_ACCOUNT, key=config.QUEUES_KEY,
name=constants.QUEUE_EMAIL_SEND)
EMAIL = SendgridEmailSender(key=config.EMAIL_SENDER_KEY)
def send(email: dict) -> Tuple[str, int]:
success = EMAIL.send_email(email)
if not success:
return 'error', 500
return 'sent', 200
|
Python
| 0
|
@@ -582,8 +582,317 @@
t', 200%0A
+%0A%0Aif __name__ == '__main__':%0A from argparse import ArgumentParser%0A from json import loads%0A from uuid import uuid4%0A%0A parser = ArgumentParser()%0A parser.add_argument('email')%0A args = parser.parse_args()%0A%0A email = loads(args.email)%0A email.setdefault('_uid', str(uuid4()))%0A%0A send(email)%0A
|
13a146c5e4a96d2f01aaecb6fd6839a9c290a2b3
|
Fix bug assuming multiple '/' in URL
|
tools/pluginmanager.py
|
tools/pluginmanager.py
|
#----------------------------------------------------------------------
# Copyright (c) 2011-2016 Raytheon BBN Technologies
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and/or hardware specification (the "Work") to
# deal in the Work without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Work, and to permit persons to whom the Work
# is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Work.
#
# THE WORK IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE WORK OR THE USE OR OTHER DEALINGS
# IN THE WORK.
#----------------------------------------------------------------------
ALL_SERVICES = {}
def registerService(name, service):
ALL_SERVICES[name] = service
def getService(name):
return ALL_SERVICES[name]
# Stored in plugin manager as 'xmlrpc' service
class XMLRPCHandler(object):
_entries = []
_entries_by_endpoint = {}
_entries_by_service_name = {}
def registerXMLRPC(self, unique_service_name, instance, endpoint):
entry = XMLRPCEntry(unique_service_name, instance, endpoint)
self._entries.append(entry)
self._entries_by_endpoint[endpoint] = entry
self._entries_by_service_name[unique_service_name] = entry
def lookupByEndpoint(self, endpoint):
return self._entries_by_endpoint[endpoint]
def lookupByServiceName(self, service_name):
return self._entries_by_service_name[service_name]
class XMLRPCEntry(object):
def __init__(self, unique_service_name, instance, endpoint):
self._unique_service_name = unique_service_name
self._instance = instance
self._endpoint = endpoint
# Stored in plugin manager as 'config' service
class ConfigDB(object):
_mapping = {}
def install(self, key, defaultValue, defaultDescription, force=False):
# print "ConfigDB.install %s %s %s %s" % \
# (key, defaultValue, defaultDescription, force)
# ***
pass
def set(self, key, value):
self._mapping[key] = value
def get(self, key):
return self._mapping[key]
def getAll(self):
return self._mapping.keys()
class RESTEntry(object):
def __init__(self, endpoint, rule, handler, defaults, methods):
self._endpoint = endpoint
self._rule = rule
self._handler = handler
self._defaults = defaults
self._methods = methods
class RESTDispatcher(object):
_entries_by_endpoint = {}
def add_url_rule(self, endpoint, rule, handler, defaults, methods):
print "RESTDispatcher called: %s %s %s %s %s" % \
(endpoint, rule, handler, defaults, methods)
entry = RESTEntry(endpoint, rule, handler, defaults, methods)
key = endpoint.split('/')[1]
self._entries_by_endpoint[key] = entry
def lookup_handler(self, endpoint):
key = endpoint.split('/')[1]
if key in self._entries_by_endpoint:
return self._entries_by_endpoint[key]._handler
return None
class RESTServer(object):
app = RESTDispatcher()
def runServer(self):
print "FlaskServer.runServer"
pass
|
Python
| 0.000023
|
@@ -3412,32 +3412,101 @@
elf, endpoint):%0A
+ pieces = endpoint.split('/')%0A if len(pieces) %3E 2:%0A
key = en
@@ -3534,16 +3534,29 @@
+%0A
if key i
@@ -3584,16 +3584,20 @@
dpoint:%0A
+
|
5875de6fb894a2903ec1f10c7dbc65c7071c7732
|
Fix NullHandler logger addition
|
rmqid/__init__.py
|
rmqid/__init__.py
|
__version__ = '0.4.0'
from rmqid.connection import Connection
from rmqid.exchange import Exchange
from rmqid.message import Message
from rmqid.queue import Queue
from rmqid.tx import Tx
from rmqid.simple import consumer
from rmqid.simple import get
from rmqid.simple import publish
import logging
try:
from logging import NullHandler
except ImportError:
# Python 2.6 does not have a NullHandler
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger().addHandler(NullHandler())
|
Python
| 0.000003
|
@@ -508,16 +508,23 @@
tLogger(
+'rmqid'
).addHan
|
d5345e34deaf5510c8b45f7d5fbed49f9cee8e41
|
echo 0
|
echo.py
|
echo.py
|
Python
| 0.999334
|
@@ -0,0 +1,230 @@
+import IPYthon%0Aimport warnings%0A%0Afrom golix import Ghid%0Afrom hypergolix.service import HypergolixLink%0A%0Ahgxlink = HypergolixLink(threaded=True)%0A%0Awith warnings.catch_warnings():%0A warnings.simplefilter('ignore')%0A IPython.embed()
|
|
c260b974426da547600725b9d5814c0e3d35bb53
|
make more pythonic
|
robogrid/robot.py
|
robogrid/robot.py
|
from .grids import Simple_Grid
class Robot(object):
def __init__(self, name, grid=None):
self.name = name
if grid == None:
grid = Simple_Grid(20)
self.grid = grid
start_pos = self.grid.free_position()
if start_pos == None:
raise ValueError("No space in proposed grid")
self._heading = 0
self._x, self._y = start_pos
def __repr__(self):
summary = {
"name": self.name,
"grid": repr(self.grid)
}
return 'Robot("{name}", {grid})'.format(**summary)
def __str__(self):
arrow = "^>v<"[self.heading]
result = ""
for row_i, row in enumerate(self.grid):
for col_i, cell in enumerate(row):
if (col_i, row_i) == self.pos:
result += arrow
else:
result += self.grid.char(cell)
result += "\n"
return result
def forward(self):
if self.heading == 0:
if self.grid[self.x, self.y-1] == False:
self._y -= 1
elif self.heading == 1:
if self.grid[self.x+1, self.y] == False:
self._x += 1
elif self.heading == 2:
if self.grid[self.x, self.y+1] == False:
self._y += 1
elif self.heading == 3:
if self.grid[self.x-1, self.y] == False:
self._x -= 1
def can_move_forward(self):
if self.heading == 0:
return self.grid[self.x, self.y-1] == False
elif self.heading == 1:
return self.grid[self.x+1, self.y] == False
elif self.heading == 2:
return self.grid[self.x, self.y+1] == False
elif self.heading == 3:
return self.grid[self.x-1, self.y] == False
def backward(self):
self.right()
self.right()
self.forward()
self.right()
self.right()
def right(self):
self.heading += 1
def left(self):
self.heading -= 1
@property
def heading(self):
return self._heading
@heading.setter
def heading(self, val):
self._heading = val % 4
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def pos(self):
return self.x, self.y
def isFinished(self):
if self.x == self.grid.width - 2 and self.y == self.grid.height - 2:
return True
return False
|
Python
| 0.000028
|
@@ -2380,17 +2380,18 @@
def is
-F
+_f
inished(
@@ -2401,26 +2401,30 @@
f):%0A
-if
+return
self.x == s
@@ -2480,51 +2480,5 @@
- 2
-:%0A return True%0A return False
%0A
|
c205b6a8f5d96240aacbe9f99aeecbf3b2d398ed
|
Fix issue #2411. Don't leak temp file handle. (#2417)
|
google/cloud/forseti/common/util/file_loader.py
|
google/cloud/forseti/common/util/file_loader.py
|
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for reading and parsing files in a variety of formats."""
import json
import os
import tempfile
import yaml
from google.cloud.forseti.common.gcp_api import storage
from google.cloud.forseti.common.util import errors as util_errors
from google.cloud.forseti.common.util import logger
LOGGER = logger.get_logger(__name__)
def read_and_parse_file(file_path):
"""Parse a json or yaml formatted file from a local path or GCS.
Args:
file_path (str): The full path to the file to read and parse.
Returns:
dict: The results of parsing the file.
"""
file_path = file_path.strip()
if file_path.startswith('gs://'):
return _read_file_from_gcs(file_path)
return _read_file_from_local(file_path)
def copy_file_from_gcs(file_path, output_path=None, storage_client=None):
"""Copy file from GCS to local file.
Args:
file_path (str): The full GCS path to the file.
output_path (str): The local file to copy to, if not set creates a
temporary file.
storage_client (storage.StorageClient): The Storage API Client to use
for downloading the file using the API.
Returns:
str: The output_path the file was copied to.
"""
if not storage_client:
storage_client = storage.StorageClient()
if not output_path:
_, output_path = tempfile.mkstemp()
with open(output_path, mode='wb') as f:
storage_client.download(full_bucket_path=file_path, output_file=f)
return output_path
def _get_filetype_parser(file_path, parser_type):
"""Return a parser function for parsing the file.
Args:
file_path (str): The file path.
parser_type (str): The file parser type.
Returns:
function: The parser function.
"""
filetype_handlers = {
'json': {
'string': _parse_json_string,
'file': _parse_json_file
},
'yaml': {
'string': _parse_yaml,
'file': _parse_yaml
}
}
file_ext = file_path.split('.')[-1]
if file_ext not in filetype_handlers:
raise util_errors.InvalidFileExtensionError(
'Unsupported file type: {}'.format(file_ext))
if parser_type not in filetype_handlers[file_ext]:
raise util_errors.InvalidParserTypeError(
'Unsupported parser type: {}'.format(parser_type))
return filetype_handlers[file_ext][parser_type]
def _read_file_from_gcs(file_path, storage_client=None):
"""Load file from GCS.
Args:
file_path (str): The GCS path to the file.
storage_client (storage.StorageClient): The Storage API Client to use
for downloading the file using the API.
Returns:
dict: The parsed dict from the loaded file.
"""
if not storage_client:
storage_client = storage.StorageClient()
file_content = storage_client.get_text_file(full_bucket_path=file_path)
parser = _get_filetype_parser(file_path, 'string')
return parser(file_content)
def _read_file_from_local(file_path):
"""Load rules file from local path.
Args:
file_path (str): The path to the file.
Returns:
dict: The parsed dict from the loaded file.
"""
with open(os.path.abspath(file_path), 'r') as rules_file:
parser = _get_filetype_parser(file_path, 'file')
return parser(rules_file)
def _parse_json_string(data):
"""Parse the data from a string of json.
Args:
data (str): String data to parse into json.
Returns:
dict: The json string successfully parsed into a dict.
Raises:
ValueError: If there was an error parsing the data.
"""
try:
return json.loads(data)
except ValueError as json_error:
raise json_error
def _parse_json_file(data):
"""Parse the data from a json file.
Args:
data (filepointer): File-like object containing a Json document,
to be parsed into json.
Returns:
dict: The file successfully parsed into a dict.
Raises:
ValueError: If there was an error parsing the file.
"""
try:
return json.load(data)
except ValueError as json_error:
raise json_error
def _parse_yaml(data):
"""Parse yaml data.
Args:
data (stream): A yaml data stream to parse.
Returns:
dict: The stream successfully parsed into a dict.
Raises:
YAMLError: If there was an error parsing the stream.
"""
try:
return yaml.safe_load(data)
except yaml.YAMLError as yaml_error:
LOGGER.exception(yaml_error)
raise yaml_error
|
Python
| 0
|
@@ -1976,17 +1976,24 @@
-_
+tmp_file
, output
@@ -2018,16 +2018,106 @@
kstemp()
+%0A # Ensure the handle returned by mkstemp is not leaked.%0A os.close(tmp_file)
%0A%0A wi
|
2ae23b275eb0ac919fe4fd874663915beb3e7bac
|
Add domain tag to celery time_to_run metric
|
corehq/celery_monitoring/signals.py
|
corehq/celery_monitoring/signals.py
|
import datetime
import inspect
from django.core.cache import cache
from celery.signals import before_task_publish, task_postrun, task_prerun
from dimagi.utils.parsing import string_to_utc_datetime
from corehq.util.metrics import push_metrics
from corehq.util.quickcache import quickcache
@before_task_publish.connect
def celery_add_time_sent(headers=None, body=None, **kwargs):
info = headers if 'task' in headers else body
task_id = info['id']
eta = info['eta']
if eta:
eta = TimeToStartTimer.parse_iso8601(eta)
TimeToStartTimer(task_id).start_timing(eta)
@task_prerun.connect
def celery_record_time_to_start(task_id=None, task=None, **kwargs):
from corehq.util.metrics import metrics_counter, metrics_gauge
from corehq.util.metrics.const import MPM_MAX
tags = {
'celery_task_name': task.name,
'celery_queue': task.queue,
}
timer = TimeToStartTimer(task_id)
try:
time_to_start = timer.stop_and_pop_timing()
except TimingNotAvailable:
metrics_counter('commcare.celery.task.time_to_start_unavailable', tags=tags)
else:
metrics_gauge('commcare.celery.task.time_to_start', time_to_start.total_seconds(), tags=tags,
multiprocess_mode=MPM_MAX)
get_task_time_to_start.set_cached_value(task_id).to(time_to_start)
TimeToRunTimer(task_id).start_timing()
@task_postrun.connect
def celery_record_time_to_run(task_id=None, task=None, state=None, **kwargs):
from corehq.util.metrics import (
DAY_SCALE_TIME_BUCKETS,
metrics_counter,
metrics_histogram,
)
get_task_time_to_start.clear(task_id)
tags = {
'celery_task_name': task.name,
'celery_queue': task.queue,
'state': state,
}
timer = TimeToRunTimer(task_id)
try:
time_to_run = timer.stop_and_pop_timing()
except TimingNotAvailable:
metrics_counter('commcare.celery.task.time_to_run_unavailable', tags=tags)
else:
metrics_histogram(
'commcare.celery.task.time_to_run.seconds', time_to_run.total_seconds(),
bucket_tag='duration', buckets=DAY_SCALE_TIME_BUCKETS, bucket_unit='s',
tags=tags
)
@task_postrun.connect
def celery_push_metrics(**kwargs):
push_metrics()
@quickcache(['task_id'])
def get_task_time_to_start(task_id):
pass # Actual values are set by the celery event hooks below
class TimingNotAvailable(Exception):
pass
class CeleryTimer(object):
def __init__(self, task_id, timing_type):
self.task_id = task_id
self.timing_type = timing_type
@property
def _cache_key(self):
return 'task.{}.{}'.format(self.task_id, self.timing_type)
def start_timing(self, eta=None):
cache.set(self._cache_key, eta or datetime.datetime.utcnow(), timeout=3 * 24 * 60 * 60)
def stop_and_pop_timing(self):
"""
Return timedelta since running start_timing
Only the first call to stop_and_pop_timing will return a timedelta;
subsequent calls will return None until the next time start_timing is called.
This helps avoid double-recording timings (for example when a task is retried).
"""
time_sent = cache.get(self._cache_key)
if time_sent is None:
raise TimingNotAvailable()
cache.delete(self._cache_key)
return datetime.datetime.utcnow() - time_sent
@staticmethod
def parse_iso8601(datetime_string):
return string_to_utc_datetime(datetime_string)
class TimeToStartTimer(CeleryTimer):
def __init__(self, task_id):
super(TimeToStartTimer, self).__init__(task_id, timing_type='time_sent')
class TimeToRunTimer(CeleryTimer):
def __init__(self, task_id):
super(TimeToRunTimer, self).__init__(task_id, timing_type='time_started')
def get_domain_from_task(task, args, kwargs):
undecorated_task_function = inspect.unwrap(task)
call_args = inspect.getcallargs(undecorated_task_function, *args, **kwargs)
if 'domain' in call_args:
return call_args['domain']
elif 'domain_name' in call_args:
return call_args['domain_name']
else:
return None
|
Python
| 0.000001
|
@@ -1460,32 +1460,52 @@
state=None,
-**kw
args
+=None, kwargs=None, **kw
):%0A from
@@ -1624,16 +1624,71 @@
%0A )%0A%0A
+ domain = get_domain_from_task(task, args, kwargs)%0A%0A
get_
@@ -1806,24 +1806,24 @@
task.queue,%0A
-
'sta
@@ -1834,16 +1834,51 @@
state,%0A
+ 'domain': domain or 'N/A',%0A
%7D%0A
|
157f5151b4935c1f7f963addb06ea33ea12146e6
|
replace StreamFieldPanel
|
meinberlin/apps/cms/models/pages.py
|
meinberlin/apps/cms/models/pages.py
|
from django.db import models
from wagtail import blocks
from wagtail import fields
from wagtail.admin import edit_handlers
from wagtail.images.edit_handlers import ImageChooserPanel
from wagtail.models import Page
from wagtail.snippets.edit_handlers import SnippetChooserPanel
from meinberlin.apps.actions import blocks as actions_blocks
from meinberlin.apps.cms import blocks as cms_blocks
class SimplePage(Page):
body = fields.RichTextField(blank=True)
content_panels = [
edit_handlers.FieldPanel('title'),
edit_handlers.FieldPanel('body'),
]
subpage_types = []
class StreamfieldSimplePage(Page):
body = fields.StreamField([
('paragraph', blocks.RichTextBlock()),
('html', blocks.RawHTMLBlock())
], blank=True)
content_panels = [
edit_handlers.FieldPanel('title'),
edit_handlers.StreamFieldPanel('body'),
]
subpage_types = []
class HomePage(Page):
body = fields.StreamField([
('paragraph', blocks.RichTextBlock(
template='meinberlin_cms/blocks/richtext_block.html'
)),
('call_to_action', cms_blocks.CallToActionBlock()),
('image_call_to_action', cms_blocks.ImageCallToActionBlock()),
('columns_text', cms_blocks.ColumnsBlock()),
('activities', actions_blocks.PlatformActivityBlock()),
('accordion', cms_blocks.DocsBlock()),
('infographic', cms_blocks.InfographicBlock()),
('map_teaser', cms_blocks.MapTeaserBlock())
])
subtitle = models.CharField(max_length=120)
header_image = models.ForeignKey(
'meinberlin_cms.CustomImage',
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name='+'
)
storefront = models.ForeignKey(
'meinberlin_cms.Storefront',
on_delete=models.SET_NULL,
null=True,
related_name='+'
)
content_panels = Page.content_panels + [
edit_handlers.FieldPanel('subtitle'),
ImageChooserPanel('header_image'),
edit_handlers.StreamFieldPanel('body'),
SnippetChooserPanel('storefront')
]
class DocsPage(Page):
body = fields.StreamField([
('documents_list', cms_blocks.DocsBlock()),
('header', blocks.CharBlock(
template='meinberlin_cms/blocks/header.html'))
])
description = fields.RichTextField(blank=True)
content_panels = Page.content_panels + [
edit_handlers.FieldPanel('description'),
edit_handlers.StreamFieldPanel('body'),
]
class Meta:
verbose_name = 'Documents'
subpage_types = []
|
Python
| 0
|
@@ -850,38 +850,32 @@
edit_handlers.
-Stream
FieldPanel('body
@@ -2033,38 +2033,32 @@
edit_handlers.
-Stream
FieldPanel('body
@@ -2489,22 +2489,16 @@
andlers.
-Stream
FieldPan
|
3e8e59656892b8ba8e46affb5987a2fbf633ae00
|
Add some authentication reference documentation.
|
googleanalytics/auth/__init__.py
|
googleanalytics/auth/__init__.py
|
# encoding: utf-8
"""
Convenience functions for authenticating with Google
and asking for authorization with Google, with
`authenticate` at its core.
`authenticate` will do what it says on the tin, but unlike
the basic `googleanalytics.oauth.authenticate`, it also tries
to get existing credentials from the keyring, from environment
variables, it prompts for information when required and so on.
"""
from . import keyring
from . import oauth
from .oauth import Flow, Credentials
def navigate(accounts, account=None, webproperty=None, profile=None):
scope = accounts
if account:
scope = scope[account]
if webproperty:
if account:
scope = scope.webproperties[webproperty].profile
else:
raise KeyError("Cannot navigate to a webproperty or profile without knowing the account.")
if profile:
if account and webproperty:
scope = scope.profiles[profile]
else:
raise KeyError("Cannot navigate to a profile without knowing account and webproperty.")
return scope
def find(**kwargs):
return oauth.Credentials.find(**kwargs)
def identity(name):
return find(identity=name)
def authenticate(
client_id=None, client_secret=None,
client_email=None, private_key=None,
access_token=None, refresh_token=None,
account=None, webproperty=None, profile=None,
identity=None, prefix=None, suffix=None,
interactive=False, save=False):
credentials = oauth.Credentials.find(
valid=True,
interactive=interactive,
prefix=prefix,
suffix=suffix,
client_id=client_id,
client_secret=client_secret,
client_email=client_email,
private_key=private_key,
access_token=access_token,
refresh_token=refresh_token,
identity=identity,
)
if credentials.incomplete:
if interactive:
credentials = authorize(
client_id=credentials.client_id,
client_secret=credentials.client_secret,
save=save,
identity=credentials.identity,
prefix=prefix,
suffix=suffix,
)
elif credentials.type == 2:
credentials = authorize(
client_email=credentials.client_email,
private_key=credentials.private_key,
identity=credentials.identity,
save=save,
)
else:
raise KeyError("Cannot authenticate: enable interactive authorization, pass a token or use a service account.")
accounts = oauth.authenticate(credentials)
scope = navigate(accounts, account=account, webproperty=webproperty, profile=profile)
return scope
def authorize(client_id=None, client_secret=None, client_email=None, private_key=None, save=False, identity=None, prefix=None, suffix=None):
base_credentials = oauth.Credentials.find(
valid=True,
interactive=True,
identity=identity,
client_id=client_id,
client_secret=client_secret,
client_email=client_email,
private_key=private_key,
prefix=prefix,
suffix=suffix,
)
if base_credentials.incomplete:
credentials = oauth.authorize(base_credentials.client_id, base_credentials.client_secret)
credentials.identity = base_credentials.identity
else:
credentials = base_credentials
if save:
keyring.set(credentials.identity, credentials.serialize())
return credentials
def revoke(client_id, client_secret, access_token=None, refresh_token=None, identity=None, prefix=None, suffix=None):
credentials = oauth.Credentials.find(
complete=True,
interactive=False,
identity=identity,
client_id=client_id,
client_secret=client_secret,
access_token=access_token,
refresh_token=refresh_token,
prefix=prefix,
suffix=suffix,
)
retval = credentials.revoke()
keyring.delete(credentials.identity)
return retval
|
Python
| 0
|
@@ -1487,24 +1487,591 @@
False):%0A
+%22%22%22%0A The %60authenticate%60 function will authenticate the user with the Google Analytics API, %0A using a variety of strategies: keyword arguments provided to this function, credentials%0A stored in in environment variables, credentials stored in the keychain and, finally, by %0A asking for missing information interactively in a command-line prompt.%0A%0A If necessary (but only if %60interactive=True%60) this function will also allow the user %0A to authorize this Python module to access Google Analytics data on their behalf, %0A using an OAuth2 token.%0A %22%22%22%0A
%0A credent
@@ -4289,24 +4289,241 @@
ffix=None):%0A
+ %22%22%22%0A Given a client id, client secret and either an access token or a refresh token, %0A revoke OAuth access to the Google Analytics data and remove any stored credentials %0A that use these tokens.%0A %22%22%22%0A%0A
credenti
|
e3ac422da8a0c873a676b57ff796d15fac6fc532
|
change haproxy config formatting
|
bin/haproxy_get_ssl.py
|
bin/haproxy_get_ssl.py
|
#!/usr/bin/env python
import redis, sys, os, json, jinja2
from jinja2 import Template
r_server = redis.StrictRedis('127.0.0.1', db=2)
check = r_server.get("need_CSR")
if check == "1":
i_key = "owner-info"
data=json.loads (r_server.get(i_key))
email = data['Email']
hostname = data['Hostname']
frontend_conf = ""
backend_conf = ""
#### get certificate
os.system("mkdir -p /opt/certs/letsencrypt")
os.system("cd /opt/certs && openssl req -inform pem -outform der -in server.csr -out ./letsencrypt/server.der")
os.system("service haproxy stop")
request = ("cd /opt/certs/letsencrypt && letsencrypt certonly --csr server.der --standalone --non-interactive --agree-tos --email %s --standalone-supported-challenges http-01" % email)
os.system(request)
os.system(" cd /opt/certs/letsencrypt && cat 0001_chain.pem ../server.key > ../server.bundle.pem")
os.system("rm -rf /opt/certs/letsencrypt")
r_server.set("need_CSR", "0")
r_server.bgsave
### reconfigure haproxy
app_key="apps"
data_apps=r_server.get(app_key)
os.system("rm -rf /opt/haproxy/haproxy.cfg")
config_template=open('/opt/controlbox/bin/templates/haproxy.cfg').read()
if data_apps:
apps=json.loads(data_apps)
for app in apps:
i = "use_backend %s if { hdr_end(host) -i %s }\n " % (app["name"], app["name"] + "." + data['Hostname'])
frontend_conf = frontend_conf + i
ii = ("backend %s\n balance roundrobin\n server %s 127.0.0.1:%s check\n " % (app["name"], app["name"], app["port"]))
backend_conf = backend_conf + ii
template = Template(config_template)
config = (template.render(hostname=hostname, crt_path="/opt/certs/server.bundle.pem", subdomain1=frontend_conf, backend2=backend_conf))
else:
template = Template(config_template)
config = (template.render(hostname=hostname, crt_path="/opt/certs/server.bundle.pem"))
open("/opt/haproxy/haproxy.cfg", "w").write(config)
os.system("service haproxy start")
else:
print "Don't need new certificate"
|
Python
| 0
|
@@ -1495,26 +1495,24 @@
%25s check%5Cn
-
%22 %25 (app%5B%22na
|
cc6c80ad64fe7f4d4cb2b4e367c595f1b08f9d3b
|
Remove script crash when no sonos is found
|
i3blocks-sonos.py
|
i3blocks-sonos.py
|
#!/usr/bin/env python3
#
# By Henrik Lilleengen (mail@ithenrik.com)
#
# Released under the MIT License: https://opensource.org/licenses/MIT
import soco, sys
speakers = list(soco.discover())
state = speakers[0].get_current_transport_info()['current_transport_state']
if state == 'PLAYING':
if len(sys.argv) > 1 and sys.argv[1] == "1":
speakers[0].stop()
print("")
else:
track = speakers[0].get_current_track_info()
print(" " + track['title'] + " - " + track['artist'])
else:
if len(sys.argv) > 1 and sys.argv[1] == "1":
speakers[0].play()
track = speakers[0].get_current_track_info()
print(" " + track['title'] + " - " + track['artist'])
else:
print("")
|
Python
| 0
|
@@ -186,17 +186,42 @@
ver())%0A%0A
-%0A
+if len(speakers) %3E 0:%0A
state =
@@ -289,16 +289,20 @@
tate'%5D%0A%0A
+
if state
@@ -316,24 +316,28 @@
AYING':%0A
+
+
if len(sys.a
@@ -369,32 +369,36 @@
== %221%22:%0A
+
speakers%5B0%5D.stop
@@ -400,16 +400,20 @@
.stop()%0A
+
@@ -423,16 +423,20 @@
nt(%22%EF%81%8C%22)%0A
+
else
@@ -429,32 +429,36 @@
)%0A else:%0A
+
track =
@@ -494,32 +494,36 @@
_info()%0A
+
+
print(%22%EF%81%8B %22 + tra
@@ -561,22 +561,30 @@
tist'%5D)%0A
+
else:%0A
+
if l
@@ -624,32 +624,36 @@
== %221%22:%0A
+
speakers%5B0%5D.play
@@ -655,16 +655,20 @@
.play()%0A
+
@@ -712,32 +712,36 @@
_info()%0A
+
+
print(%22%EF%81%8B %22 + tra
@@ -771,32 +771,36 @@
rack%5B'artist'%5D)%0A
+
else:%0A
@@ -785,32 +785,36 @@
)%0A else:%0A
+
print(%22%EF%81%8C
|
74084defad8222ba69340d0d983acdf33ddef17c
|
Correct the test of assertEqual failing.
|
calexicon/dates/tests/test_dates.py
|
calexicon/dates/tests/test_dates.py
|
import unittest
from datetime import date, timedelta
from calexicon.dates import DateWithCalendar
class TestDateWithCalendar(unittest.TestCase):
def setUp(self):
date_dt = date(2010, 8, 1)
self.date_wc = DateWithCalendar(None, date_dt)
def test_equality(self):
self.assertTrue(self.date_wc != date(2010, 8, 1))
def test_not_equal(self):
""" Check that assertNotEqual works correctly.
The main purpose of this test is to have code
coverage for the false branch of the
custom assertEqual. """
self.assertNotEqual(self.date_wc, date(2010, 8, 1))
def test_comparisons(self):
self.assertTrue(self.date_wc < date(2010, 8, 2))
self.assertFalse(self.date_wc < date(2010, 7, 31))
self.assertTrue(self.date_wc > date(2010, 7, 2))
self.assertFalse(self.date_wc > date(2010, 8, 31))
def test_nonstrict_comparisons(self):
self.assertTrue(self.date_wc <= date(2010, 8, 2))
self.assertFalse(self.date_wc <= date(2010, 7, 31))
self.assertTrue(self.date_wc >= date(2010, 7, 2))
self.assertFalse(self.date_wc >= date(2010, 8, 31))
self.assertTrue(self.date_wc <= date(2010, 8, 1))
self.assertTrue(self.date_wc >= date(2010, 8, 1))
def test_subtraction(self):
self.assertEqual(self.date_wc - date(2012, 10, 30), timedelta(days=-821))
|
Python
| 0.000021
|
@@ -570,16 +570,33 @@
al. %22%22%22%0A
+ try:%0A
@@ -602,27 +602,24 @@
self.assert
-Not
Equal(self.d
@@ -635,32 +635,80 @@
ate(2010, 8, 1))
+%0A except AssertionError:%0A pass
%0A%0A def test_c
|
a51231f4a9e588718f77c06481d20eb6d090e996
|
fix authorization policy a bit
|
caliopen/api/user/authentication.py
|
caliopen/api/user/authentication.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import base64
from zope.interface import implements, implementer
from pyramid.interfaces import IAuthenticationPolicy, IAuthorizationPolicy
from pyramid.security import Everyone, NO_PERMISSION_REQUIRED
from pyramid.httpexceptions import HTTPOk
from caliopen.base.user.core import User
log = logging.getLogger(__name__)
class _NotAuthenticated(Exception):
"""Raised when the authenticated user cannot be built."""
class AuthenticatedUser(object):
"""Represent an authenticated user."""
def __init__(self, request):
if 'Authorization' not in request.headers:
raise _NotAuthenticated
# Case sensitive ?
authorization = request.headers['Authorization'].split()
if authorization[0] != 'Bearer' and len(authorization) != 2:
raise _NotAuthenticated
log.debug('Authentication via Access Token')
auth = base64.decodestring(authorization[1])
if ':' not in auth:
raise _NotAuthenticated
user_id, token = auth.split(':')
infos = request.cache.get(user_id)
if infos.get('access_token') != token:
raise _NotAuthenticated
self.user_id = user_id
self.access_token = token
self._user = None
def _load_user(self):
if self._user:
return
self._user = User.get(self.user_id)
@property
def id(self):
self._load_user()
return self._user.user_id
@property
def username(self):
self._load_user()
return self._user.name
class AuthenticationPolicy(object):
"""Global authentication policy."""
implements(IAuthenticationPolicy)
def authenticated_userid(self, request):
if hasattr(request, '_CaliopenUser'):
return request._CaliopenUser
try:
request._CaliopenUser = AuthenticatedUser(request)
except _NotAuthenticated:
return None
return request._CaliopenUser
def effective_principals(self, request):
account = self.authenticated_userid(request)
if not account:
return [Everyone]
return ["%s:%s" % (account.user_id, account.access_token)]
def unauthenticated_userid(self, request):
try:
return AuthenticatedUser(request)
except _NotAuthenticated:
return None
def remember(self, request, principal, **kw):
"""Token Key mechanism can't remember anyone."""
return []
def forget(self, request):
return [('WWW-Authenticate', 'Bearer realm="Caliopen"')]
@implementer(IAuthorizationPolicy)
class AuthorizationPolicy(object):
"""Basic authorization policy."""
def permits(self, context, principals, permission):
""" Return an instance of
:class:`pyramid.security.ACLAllowed` instance if the policy
permits access, return an instance of
:class:`pyramid.security.ACLDenied` if not."""
if permission == NO_PERMISSION_REQUIRED:
return True
if not principals:
False
token = principals[0]
result = token
log.info('principals %r, result %r' % (principals, result))
return True
def principals_allowed_by_permission(self, context, permission):
raise NotImplementedError
|
Python
| 0.000001
|
@@ -279,50 +279,8 @@
RED%0A
-from pyramid.httpexceptions import HTTPOk%0A
%0A%0Afr
@@ -659,35 +659,8 @@
ed%0A%0A
- # Case sensitive ?%0A
@@ -932,16 +932,65 @@
ion%5B1%5D)%0A
+ # authentication values is user_id:token%0A
@@ -3172,90 +3172,184 @@
-result = token%0A log.info('principals %25r, result %25r' %25 (principals, result))
+if ':' in token and permission == 'authenticated':%0A # All managed objects belong to authenticated user%0A # no other policy to apply%0A return True
%0A
@@ -3356,27 +3356,28 @@
return
-Tru
+Fals
e%0A%0A def p
|
aea456e92c434e56ae589e996a9b775af6e1f6eb
|
Update client
|
calplus/v1/object_storage/client.py
|
calplus/v1/object_storage/client.py
|
import logging
import calplus.conf
from calplus.base import BaseClient
from calplus.exceptions import DriverException
CONF = calplus.conf.CONF
LOG = logging.getLogger(__name__)
class Client(BaseClient):
"""Top-level object to access CAL API
This class must be extended base.Singleton class to make
sure only one instance of this one is ever created."""
def __init__(self, provider, cloud_config):
BaseClient.__init__(self, CONF.object_storage.driver_path,
provider, cloud_config)
def create_container(self, container, **kwargs):
"""Create container
:param container(string): container name.
:param **kwargs(dict): extend args for specific driver.
"""
try:
LOG.debug('create_container() with %s is success.', self.driver)
return self.driver.create_container(container, **kwargs)
except DriverException as e:
LOG.exception('create_container() with %s raised\
an exception %s.', self.driver, e)
def delete_container(self, container):
"""Delete container
:param container: container name.
"""
try:
LOG.debug('delete_container() with %s is success.', self.driver)
return self.driver.delete_container(container)
except DriverException as e:
LOG.exception('delete_container() with %s raised\
an exception %s.', self.driver, e)
def list_containers(self):
"""List owned containers
"""
LOG.debug('list_buckets() with %s is success.', self.driver)
return self.driver.list_container()
def stat_container(self, container):
"""Stat container metadata
:param container: container name.
"""
LOG.debug('stat_container() with %s is success.', self.driver)
return self.driver.stat_container(container)
def update_container(self, container, headers, **kwargs):
"""Update container metadata
:param container: container name.
:param headers(dict): additional headers to include in the request.
:param **kwargs(dict): extend args for specific driver.
"""
LOG.debug('update_object() with %s is success.', self.driver)
return self.driver.update_container(container, headers, **kwargs)
def upload_object(self, container, obj, contents,
content_length=None, **kwargs):
"""Upload object
:param container: container name.
:param obj: object name.
:param contents: object content.
:param content_length(int): content length.
:param **kwargs(dict): extend args for specific driver.
"""
try:
LOG.debug('upload_object() with %s is success.', self.driver)
return self.driver.download_object(container, obj,
contents=contents,
content_length=content_length,
**kwargs)
except DriverException as e:
LOG.exception('upload_object() with %s raised\
an exception %s.', self.driver, e)
def download_object(self, container, obj, **kwargs):
"""Download specific object
:param container: container name.
:param obj: object name.
"""
try:
LOG.debug('download_object() with %s is success.', self.driver)
return self.driver.download_object(container, obj, **kwargs)
except DriverException as e:
LOG.exception('download_object() with %s raised\
an exception %s.', self.driver, e)
def stat_object(self, container, obj):
"""Stat object metadata
:param container: container name.
:param obj: object name.
"""
LOG.debug('stat_object() with %s is success.', self.driver)
return self.driver.stat_object(container, obj)
def delete_object(self, container, obj, **kwargs):
"""Delete object in container
:param container: container name.
:param obj: obj name.
"""
try:
LOG.debug('delete_object() with %s is success.', self.driver)
return self.driver.delete_object(container, obj, **kwargs)
except DriverException as e:
LOG.exception('download_object() with %s raised\
an exception %s.', self.driver, e)
def list_container_objects(self, container):
"""List container objects
:param container: container name.
"""
LOG.debug('list_container_objects() with %s is success.', self.driver)
return self.driver.list_container_objects(container)
def update_object(self, container, obj, headers, **kwargs):
"""Update object metadata
:param container: container name.
:param obj: object name.
:param headers(dict): additional headers to include in the request.
"""
try:
LOG.debug('update_object() with %s is success.', self.driver)
return self.driver.update_object(container, obj, headers, **kwargs)
except DriverException as e:
LOG.exception('copy_object() with %s raised\
an exception %s.', self.driver, e)
def copy_object(self, container, obj, **kwargs):
"""Copy object
:param container: container name.
:param obj: object name.
"""
try:
LOG.debug('copy_object() with %s is success.', self.driver)
return self.driver.copy_object(container, obj, **kwargs)
except DriverException as e:
LOG.exception('copy_object() with %s raised\
an exception %s.', self.driver, e)
|
Python
| 0.000001
|
@@ -1979,39 +1979,40 @@
elf, container,
-headers
+metadata
, **kwargs):%0A
@@ -2095,39 +2095,40 @@
%0A :param
-headers
+metadata
(dict): addition
@@ -2122,39 +2122,40 @@
ct): additional
-headers
+metadata
to include in t
@@ -2367,23 +2367,24 @@
tainer,
-headers
+metadata
, **kwar
@@ -4882,39 +4882,40 @@
container, obj,
-headers
+metadata
, **kwargs):%0A
@@ -5036,23 +5036,24 @@
:param
-headers
+metadata
(dict):
@@ -5063,23 +5063,24 @@
itional
-headers
+metadata
to incl
@@ -5262,16 +5262,62 @@
obj,
- headers
+%0A metadata
, **
@@ -5515,32 +5515,85 @@
container, obj,
+ metadata=None,%0A destination=None,
**kwargs):%0A
@@ -5629,32 +5629,44 @@
param container:
+ destination
container name.
@@ -5677,32 +5677,44 @@
:param obj:
+ destination
object name.%0A
@@ -5703,32 +5703,445 @@
on object name.%0A
+ :param destination: The container and object name of the destination%0A object in the form of /container/object; if None,%0A the copy will use the source as the destination.%0A :param metadata(dict): additional metadata(headers)%0A to include in the request%0A :param **kwargs(dict): extend args for specific driver.%0A
%22%22%22%0A
@@ -6271,32 +6271,119 @@
(container, obj,
+ metadata=metadata,%0A destination=destination,
**kwargs)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.