commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
2a63c3cc4a795e23ff00d7c2273ee40939ec3dea
|
mark string literal as regex to avoid runtime warning in python 3
|
custom/aaa/urls.py
|
custom/aaa/urls.py
|
from __future__ import absolute_import
from __future__ import unicode_literals
from django.conf.urls import url, include
from custom.aaa.views import (
AggregationScriptPage,
LocationFilterAPI,
ProgramOverviewReport,
ProgramOverviewReportAPI,
UnifiedBeneficiaryReport,
UnifiedBeneficiaryReportAPI,
UnifiedBeneficiaryDetailsReport,
UnifiedBeneficiaryDetailsReportAPI,
)
dashboardurls = [
url('^program_overview/', ProgramOverviewReport.as_view(), name='program_overview'),
url('^unified_beneficiary/$', UnifiedBeneficiaryReport.as_view(), name='unified_beneficiary'),
url(
'^unified_beneficiary/(?P<details_type>[\w-]+)/(?P<beneficiary_id>[\w-]+)/$',
UnifiedBeneficiaryDetailsReport.as_view(),
name='unified_beneficiary_details'
),
]
dataurls = [
url('^program_overview/', ProgramOverviewReportAPI.as_view(), name='program_overview_api'),
url('^unified_beneficiary/', UnifiedBeneficiaryReportAPI.as_view(), name='unified_beneficiary_api'),
url(
'^unified_beneficiary_details/',
UnifiedBeneficiaryDetailsReportAPI.as_view(),
name='unified_beneficiary_details_api'
),
url('^location_api/', LocationFilterAPI.as_view(), name='location_api'),
url(r'^aggregate/', AggregationScriptPage.as_view(), name=AggregationScriptPage.urlname),
]
urlpatterns = [
url(r'^aaa_dashboard/', include(dashboardurls)),
url(r'^aaa_dashboard_data/', include(dataurls)),
]
|
Python
| 0.000008
|
@@ -611,32 +611,33 @@
url(%0A
+r
'%5Eunified_benefi
|
0240627e799672a3386202523015949588834ae2
|
Update Sanctions test to reflect new _on_reject behavior
|
tests/test_registrations/test_models.py
|
tests/test_registrations/test_models.py
|
# -*- coding: utf-8 -*-
"""Unit tests for models and their factories."""
from nose.tools import * # noqa (PEP8 asserts)
import mock
from modularodm import Q
import datetime as dt
from website.models import MetaSchema, DraftRegistrationApproval
from tests.factories import (
UserFactory, ApiOAuth2ApplicationFactory, NodeFactory, PointerFactory,
ProjectFactory, NodeLogFactory, WatchConfigFactory,
NodeWikiFactory, RegistrationFactory, UnregUserFactory,
ProjectWithAddonFactory, UnconfirmedUserFactory, CommentFactory, PrivateLinkFactory,
AuthUserFactory, DashboardFactory, FolderFactory,
NodeLicenseRecordFactory, DraftRegistrationFactory
)
from tests.test_registrations.base import RegistrationsTestBase
class TestDraftRegistrations(RegistrationsTestBase):
def test_factory(self):
draft = DraftRegistrationFactory()
assert_is_not_none(draft.branched_from)
assert_is_not_none(draft.initiator)
assert_is_not_none(draft.registration_schema)
user = AuthUserFactory()
draft = DraftRegistrationFactory(initiator=user)
assert_equal(draft.initiator, user)
node = ProjectFactory()
draft = DraftRegistrationFactory(branched_from=node)
assert_equal(draft.branched_from, node)
assert_equal(draft.initiator, node.creator)
# Pick an arbitrary v2 schema
schema = MetaSchema.find(
Q('schema_version', 'eq', 2)
)[0]
data = {'some': 'data'}
draft = DraftRegistrationFactory(registration_schema=schema, registration_metadata=data)
assert_equal(draft.registration_schema, schema)
assert_equal(draft.registration_metadata, data)
@mock.patch('website.project.model.Node.register_node')
def test_register(self, mock_register_node):
self.draft.register(self.auth)
mock_register_node.assert_called_with(
schema=self.draft.registration_schema,
auth=self.auth,
data=self.draft.registration_metadata,
)
def test_update_metadata_tracks_changes(self):
self.draft.registration_metadata = {
'foo': {
'value': 'bar',
},
'a': {
'value': 1,
},
'b': {
'value': True
},
}
changes = self.draft.update_metadata({
'foo': {
'value': 'foobar',
},
'a': {
'value': 1,
},
'b': {
'value': True,
},
'c': {
'value': 2,
},
})
self.draft.save()
for key in ['foo', 'c']:
assert_in(key, changes)
def test_update_metadata_interleaves_comments_by_created_timestamp(self):
now = dt.datetime.today()
comments = []
times = (now + dt.timedelta(minutes=i) for i in range(6))
for time in times:
comments.append({
'created': time.isoformat(),
'value': 'Foo'
})
orig_data = {
'foo': {
'value': 'bar',
'comments': [comments[i] for i in range(0, 6, 2)]
}
}
self.draft.update_metadata(orig_data)
self.draft.save()
assert_equal(
self.draft.registration_metadata['foo']['comments'],
[comments[i] for i in range(0, 6, 2)]
)
new_data = {
'foo': {
'value': 'bar',
'comments': [comments[i] for i in range(1, 6, 2)]
}
}
self.draft.update_metadata(new_data)
self.draft.save()
assert_equal(
self.draft.registration_metadata['foo']['comments'],
comments,
)
class TestDraftRegistrationApprovals(RegistrationsTestBase):
def setUp(self):
super(TestDraftRegistrationApprovals, self).setUp()
self.approval = DraftRegistrationApproval(
initiated_by=self.user,
meta={
'registration_choice': 'immediate'
}
)
self.authorizer1 = AuthUserFactory()
self.authorizer2 = AuthUserFactory()
self.approval.save()
self.draft.registration_schema = MetaSchema.find_one(
Q('name', 'eq', 'Prereg Challenge') &
Q('schema_version', 'eq', 2)
)
self.draft.approval = self.approval
self.draft.save()
@mock.patch('framework.tasks.handlers.enqueue_task')
def test_on_complete_immediate_creates_registration_for_draft_initiator(self, mock_enquque):
self.approval._on_complete(self.user)
registered_node = self.draft.registered_node
assert_is_not_none(registered_node)
assert_true(registered_node.is_pending_registration)
assert_equal(registered_node.registered_user, self.draft.initiator)
@mock.patch('framework.tasks.handlers.enqueue_task')
def test_on_complete_embargo_creates_registration_for_draft_initiator(self, mock_enquque):
end_date = dt.datetime.now() + dt.timedelta(days=366) # <- leap year
self.approval = DraftRegistrationApproval(
initiated_by=self.user,
meta={
'registration_choice': 'embargo',
'embargo_end_date': end_date.isoformat()
}
)
self.authorizer1 = AuthUserFactory()
self.authorizer2 = AuthUserFactory()
self.approval.save()
self.draft.approval = self.approval
self.draft.save()
self.approval._on_complete(self.user)
registered_node = self.draft.registered_node
assert_is_not_none(registered_node)
assert_true(registered_node.is_pending_embargo)
assert_equal(registered_node.registered_user, self.draft.initiator)
def test_approval_requires_only_a_single_authorizer(self):
with mock.patch.object(self.approval, '_on_complete') as mock_on_complete, mock.patch.object(self.draft, 'get_authorizers', mock.Mock(return_value=[self.authorizer1._id])):
self.approval.approve(self.authorizer1)
assert_true(mock_on_complete.called)
assert_true(self.approval.is_approved)
@mock.patch('website.mails.send_mail')
def test_on_reject(self, mock_send_mail):
self.approval._on_reject(self.user)
assert_equal(self.approval.meta, {})
assert_is_none(self.draft.approval)
assert_false(self.draft.is_pending_review)
assert_true(mock_send_mail.called_once)
|
Python
| 0
|
@@ -6464,103 +6464,8 @@
%7B%7D)%0A
- assert_is_none(self.draft.approval)%0A assert_false(self.draft.is_pending_review)%0A
|
b86d157579b4e625bbe73dfa2d8dd7f24a787394
|
debug fix fix
|
hyml/ext.py
|
hyml/ext.py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# Copyright (c) Marko Manninen <elonmedia@gmail.com>, 2017
#
# entry_points = """
# [babel.extractors]
# hyml = hyml.ext:babel_extract
# """
#
# should be added to distutils setup.py file and then:
#
# [hyml: **.hyml]
# [hyml: **.hy]
# extensions=hyml.ext.babel_extract
#
# to babel.cfg file
import hy, hy.importer as hyi
import itertools
# special reader macro symbol for translating string less verbose way.
# instead of (_ "message") in hy you can do ㎕"message" as long as you have
# defined: (defreader ㎕ [args] `(_ ~@args)) in your program
readermacro = hy.HySymbol("㎕")
# accepted gettext / babel keywords
keywords = [hy.HySymbol("_"),
hy.HySymbol("gettext"),
hy.HySymbol("ngettext"),
hy.HySymbol("lgettext"),
hy.HySymbol("lngettext")]
# string and int are accepted as gettext messages
def is_message(e):
return not isinstance(e, hy.HySymbol) and (isinstance(e, hy.HyString) or isinstance(e, hy.HyInteger))
# create message dictionary
def message(e, f):
singular, plural, context = None, None, None
if f == 0:
singular = str(e)
elif f == 1:
plural = str(e)
else:
context = int(e)
return {"context":context, "singular":singular, "plural":plural}
def extract_from_ast(ast):
d, f = None, 0
def filter_hy(e):
# basicly we are searching for babel keyword expressions here
# and when one is found, it is returned along with:
# linenumber, keyword itself, and message string
global d, f
if isinstance(e, hy.HyExpression) or isinstance(e, list):
if isinstance(e, hy.HyExpression):
d, f = e[0], 0
# recursively filter expression so that only gettext and _ parts are returned
x, f = list(itertools.chain(*filter(None, map(filter_hy, e)))), 0
# reset keyword
d = None
return x
print(1, e, readermacro)
if e == readermacro:
d, f = e, 0
elif is_message(e):
print(2, d, readermacro)
# reader macro is regarded as singular form gettext function
if d == readermacro:
# we dont accept any more argument for gettext / readermacro
d = None
return e.start_line, "gettext", message(e, 0)
# possible keys are:
# ngettext, pgettext, ungettext, dngettext, dgettext, ugettext, gettext, _, N_, npgettext
# but only gettext and _ are supported at the moment
if d in keywords:
# there are no comments available in ast, thus only three items are returned
# mark singular and plural forms. later in chunks
# plural and singular forms are combined. this is not particularly genious
# way of doing it. other recursive parsing technique could handle everything
# more efficiently
msg = message(e, f)
f += 1
return e.start_line, str(d), msg
return filter_hy(ast)
# return list of message items
def items(l, i, n):
return l[i : i + n], l[i + n : i + n + n], l[i + n + n : i + n + n + n]
# detect plural and singular forms of messages
def message_form(t1, t2, t3):
if t2 and "plural" in t2[2] and t2[2]["plural"] != None:
return [t1[2]["singular"], t2[2]["plural"], t3[2]["context"]]
else:
return [t1[2]["singular"]]
# make extracted message list to 4 item chunks
def chunks(l, n):
print(l, n)
for i in range(0, len(l), n):
t1, t2, t3 = items(l, i, n)
if t1[2]["singular"] != None:
# add empty keyword list to the tuple for babel
yield tuple(t1[:2]+[message_form(t1, t2, t3)]+[[]])
def babel_extract(fileobj, *args, **kw):
byte = fileobj.read()
# unfortunately line breaks (line numbers) are lost at this point...
text = "".join(map(chr, byte))
node = hyi.import_buffer_to_hst(text)
tpls = extract_from_ast(node)
return chunks(tpls, 3)
|
Python
| 0.000002
|
@@ -1977,19 +1977,28 @@
rint(1,
-e,
+str(e), str(
readerma
@@ -1993,32 +1993,33 @@
str(readermacro)
+)
%0A if e ==
@@ -2106,25 +2106,17 @@
int(
-2, d, readermacro
+1, str(d)
)%0A
|
e7ae8140beb50e3091c0bd7ad1db4535540c95df
|
remove copyright
|
cwready/cwready.py
|
cwready/cwready.py
|
"""
Clan War Readiness
"""
# -*- coding: utf-8 -*-
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import argparse
import itertools
import os
from collections import defaultdict
from random import choice
import discord
from cogs.utils import checks
from cogs.utils.chat_formatting import box
from cogs.utils.chat_formatting import pagify
from cogs.utils.dataIO import dataIO
from discord.ext import commands
from discord.ext.commands import Context
PATH = os.path.join("data", "cwready")
JSON = os.path.join(PATH, "settings.json")
def nested_dict():
"""Recursively nested defaultdict."""
return defaultdict(nested_dict)
class CWReadiness:
"""Clan War Readinesx"""
def __init__(self, bot):
"""Init."""
self.bot = bot
self.settings = nested_dict()
self.settings.update(dataIO.load_json(JSON))
def check_folder():
"""Check folder."""
os.makedirs(PATH, exist_ok=True)
def check_file():
"""Check files."""
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, {})
def setup(bot):
"""Setup."""
check_folder()
check_file()
n = CWReadiness(bot)
bot.add_cog(n)
|
Python
| 0
|
@@ -25,1112 +25,8 @@
%22%22%0A%0A
-# -*- coding: utf-8 -*-%0A%0A%22%22%22%0AThe MIT License (MIT)%0A%0ACopyright (c) 2017 SML%0A%0APermission is hereby granted, free of charge, to any person obtaining a%0Acopy of this software and associated documentation files (the %22Software%22),%0Ato deal in the Software without restriction, including without limitation%0Athe rights to use, copy, modify, merge, publish, distribute, sublicense,%0Aand/or sell copies of the Software, and to permit persons to whom the%0ASoftware is furnished to do so, subject to the following conditions:%0A%0AThe above copyright notice and this permission notice shall be included in%0Aall copies or substantial portions of the Software.%0A%0ATHE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS%0AOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0AFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0AAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0ALIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING%0AFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER%0ADEALINGS IN THE SOFTWARE.%0A%22%22%22%0A%0A
impo
|
a8339f5a1393745f17eae5d287a85c7212d47789
|
Remove break in def
|
tictactoe.py
|
tictactoe.py
|
from logishort import *
from getch import *
from logipy import logi_led
from logimap import logimap
import time
class TicTacToe:
def __init__(self):
init()
time.sleep(1)
self.accepted_keys = {
't':[0x14, 0, 0],
'y':[0x15, 0, 1],
'u':[0x16, 0, 2],
'g':[0x22, 1, 0],
'h':[0x23, 1, 1],
'j':[0x24, 1, 2],
'b':[0x30, 2, 0],
'n':[0x31, 2, 1],
',':[0x32, 2, 2]
}
self.board = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
all(20, 20, 100)
self.turn_count = 0
self.someone_won = 0
self.draw()
self.ended = False
def getKey(self, c):
if self.ended: break
if self.accepted_keys.get(c) != None and c != b'\x1b':
self.someone_won = self.play(c)
self.turn_count += 1
if self.someone_won == 0 and (c == b'\x1b' or self.turn_count >= 9):
self.someone_won = -1
if self.someone_won != 0:
self.ended = True
shutdown()
def draw(self):
for key in self.accepted_keys:
k = self.accepted_keys[key]
if(self.board[k[1]][k[2]] == 0):
one(k[0], 0, 0, 0)
if(self.board[k[1]][k[2]] == 1):
one(k[0], 100, 0, 0)
if(self.board[k[1]][k[2]] == 2):
one(k[0], 0, 100, 0)
def checkWin(self):
b = self.board
won = 0
#print(str(b[0][0]) + " " + str(b[0][1]) + " " + str(b[0][2]))
#print(str(b[0][0] == b[0][1] and b[0][0] == b[0][2]))
if (b[0][0] == b[0][1] and b[0][0] == b[0][2]) or (b[0][0] == b[1][0] and b[0][0] == b[2][0]) or (b[0][0] == b[1][1] and b[0][0] == b[2][2]):
won = int(b[0][0])
elif (b[1][1] == b[2][1] and b[1][1] == b[1][2]) or (b[1][1] == b[1][0] and b[1][1] == b[1][2]) or (b[1][1] == b[0][1] and b[1][1] == b[2][1]):
won = int(b[1][1])
elif (b[2][2] == b[2][0] and b[2][2] == b[2][1]) or (b[2][2] == b[0][2] and b[2][2] == b[1][2]):
won = int(b[2][2])
return won
def play(self, c):
k = self.accepted_keys.get(c)
player = self.turn_count % 2 + 1
self.board[k[1]][k[2]] = player
self.draw()
return self.checkWin()
|
Python
| 0.00005
|
@@ -712,24 +712,28 @@
%0A if
+not
self.ended:
@@ -735,15 +735,13 @@
ded:
- break%0A
+%0A
@@ -803,32 +803,36 @@
b':%0A
+
self.someone_won
@@ -851,32 +851,36 @@
(c)%0A
+
+
self.turn_count
@@ -884,16 +884,20 @@
nt += 1%0A
+
@@ -969,32 +969,36 @@
9):%0A
+
self.someone_won
@@ -1003,32 +1003,36 @@
on = -1%0A
+
+
if self.someone_
@@ -1045,32 +1045,36 @@
0:%0A
+
self.ended = Tru
@@ -1075,16 +1075,20 @@
= True%0A
+
|
48e66346dd12faa5cbecc052b877a847b5285e43
|
disable a unit test for python 2.7
|
_unittests/ut_pycode/test_pip_helper.py
|
_unittests/ut_pycode/test_pip_helper.py
|
"""
@brief test tree node (time=2s)
"""
import sys
import os
import unittest
import re
import shutil
import warnings
import pandas
try:
import src
except ImportError:
path = os.path.normpath(
os.path.abspath(
os.path.join(
os.path.split(__file__)[0],
"..",
"..")))
if path not in sys.path:
sys.path.append(path)
import src
from src.pyquickhelper import fLOG
from src.pyquickhelper.pycode.pip_helper import get_packages_list, get_package_info, package2dict
class TestPipHelper(unittest.TestCase):
def test_pip_list(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
li = get_packages_list()
dt = package2dict(li[0])
for k, v in dt.items():
fLOG(k, v)
assert len(li) > 0
def test_pip_show(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
info = get_package_info("pandas")
# if "license" not in info:
# raise Exception(str(info))
if "version" not in info:
raise Exception(str(info))
if "travis" not in sys.executable:
info = get_package_info("sphinx-py3doc-enhanced-theme")
# if "license" not in info:
# raise Exception(str(info))
if "version" not in info:
raise Exception(str(info))
def test_pip_show_all(self):
fLOG(
__file__,
self._testMethodName,
OutputPrint=__name__ == "__main__")
info = get_package_info(start=0, end=2)
df = pandas.DataFrame(info)
assert len(info) > 0
assert isinstance(info[0], dict)
if __name__ == "__mahin__":
info = get_package_info(fLOG=fLOG)
df = pandas.DataFrame(info)
df.to_excel("out_packages.xlsx")
if __name__ == "__main__":
unittest.main()
|
Python
| 0.000003
|
@@ -1270,16 +1270,45 @@
ecutable
+ and sys.version_info%5B0%5D %3E= 3
:%0A
|
0e13bf2b23df3584109fcf9b62710efbb81b2226
|
exit with help if no subcommand is supplied.
|
iacli/ia.py
|
iacli/ia.py
|
#!/usr/bin/env python
"""A command line interface for Archive.org.
usage:
ia [--debug] <command> [<args>...]
ia --help
ia --version
options:
-h, --help
-v, --version
-d, --debug [default: True]
commands:
help Retrieve help for subcommands.
configure Configure `ia`.
metadata Retrieve and modify metadata for items on Archive.org.
upload Upload items to Archive.org.
download Download files from Archive.org.
delete Delete files from Archive.org.
search Search Archive.org.
mine Download item metadata from Archive.org concurrently.
catalog Retrieve information about your Archive.org catalog tasks.
list List files in a given item.
See 'ia help <command>' for more information on a specific command.
"""
from sys import stderr, exit
from subprocess import call
from docopt import docopt
from internetarchive import __version__
# main()
#_________________________________________________________________________________________
def main():
"""This script is the CLI driver for ia-wrapper. It dynamically
imports and calls the subcommand specified on the command line. It
depends on the ``internetarchive`` and ``iacli`` packages.
Subcommands can be arbitrarily added to the ``iacli`` package as
modules, and can be dynamically executed via this script, ``ia``.
"""
args = docopt(__doc__, version=__version__, options_first=True)
# Get subcommand.
cmd = args['<command>']
aliases = dict(
md = 'metadata',
up = 'upload',
do = 'download',
rm = 'delete',
se = 'search',
mi = 'mine',
ca = 'catalog',
ls = 'list',
)
if cmd in aliases:
cmd = aliases[cmd]
argv = [cmd] + args['<args>']
if cmd == 'help':
if not args['<args>']:
call(['ia', '--help'])
else:
call(['ia', args['<args>'][0], '--help'])
exit(0)
if cmd == 'help':
if not args['<args>']:
call(['ia', '--help'])
else:
call(['ia', args['<args>'][0], '--help'])
exit(0)
# Dynamically import and call subcommand module specified on the
# command line.
module = 'iacli.ia_{0}'.format(cmd)
try:
globals()['ia_module'] = __import__(module, fromlist=['iacli'])
except ImportError:
stderr.write('error: "{0}" is not an `ia` command!\n'.format(cmd))
exit(1)
try:
ia_module.main(argv)
except KeyboardInterrupt:
exit(1)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -83,18 +83,40 @@
%5B--debug
+ %7C --help %7C --version
%5D
+%5B
%3Ccommand
@@ -116,16 +116,17 @@
command%3E
+%5D
%5B%3Cargs%3E
@@ -133,39 +133,8 @@
...%5D
-%0A ia --help%0A ia --version
%0A%0Aop
@@ -1850,181 +1850,19 @@
elp'
-:%0A if not args%5B'%3Cargs%3E'%5D:%0A call(%5B'ia', '--help'%5D)%0A else:%0A call(%5B'ia', args%5B'%3Cargs%3E'%5D%5B0%5D, '--help'%5D)%0A exit(0)%0A%0A if cmd == 'help'
+ or not cmd
:%0A
|
f46731c1bfd3be6e7d66b4a1078ca09460d25af5
|
Add OCA as author of OCA addons
|
account_partner_required/__openerp__.py
|
account_partner_required/__openerp__.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Account partner required module for OpenERP
# Copyright (C) 2014 Acsone (http://acsone.eu).
# @author Stéphane Bidoul <stephane.bidoul@acsone.eu>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Account partner required',
'version': '0.1',
'category': 'Generic Modules/Accounting',
'license': 'AGPL-3',
'description': """This module adds an option "partner policy"
on account types.
You have the choice between 3 policies : optional (the default),
always (require a partner), and never (forbid a partner).
This module is useful to enforce a partner on account move lines on
customer and supplier accounts.
Module developed by Stéphane Bidoul <stephane.bidoul@acsone.eu>,
inspired by Alexis de Lattre <alexis.delattre@akretion.com>'s
account_analytic_required module.
""",
'author': 'ACSONE SA/NV',
'website': 'http://acsone.eu/',
'depends': ['account'],
'data': ['account_view.xml'],
'installable': True,
}
|
Python
| 0
|
@@ -1651,17 +1651,17 @@
uthor':
-'
+%22
ACSONE S
@@ -1664,17 +1664,50 @@
NE SA/NV
-'
+,Odoo Community Association (OCA)%22
,%0A 'w
|
8660650210eee8e7b2784cff5aad4e14c1edd4fc
|
Rename _apply_validator_chain argument
|
chassis/util/params.py
|
chassis/util/params.py
|
"""Utility Parameter Tools for Chassis Applications."""
import six
from tornado import web
from chassis.util import decorators
from chassis.util import validators
def _fetch_arguments(handler, method):
"""Get the arguments depending on the type of HTTP method."""
if method.__name__ == 'get':
arguments = {}
for key, value in six.iteritems(handler.request.arguments):
# Tornado supports comma-separated lists of values in
# parameters. We're undoing that here, and if a list
# is expected the _validate method can handle it.
if isinstance(value, list):
arguments[key] = ','.join(value)
else:
arguments[key] = value
else: # post, put, patch, delete?
arguments = handler.get_post_arguments()
return arguments
def _apply_validator_chain(_validators, value, handler):
"""Apply validators in sequence to a value."""
if hasattr(_validators, 'validate'): # not a list
_validators = [_validators, ]
for _validators in _validators:
if hasattr(_validators, 'validate'):
value = _validators.validate(value, handler)
else:
raise web.HTTPError(500)
return value
def parse(parameters):
"""Decorator to parse parameters according to a set of criteria.
This outer method is called to set up the decorator.
Arguments:
parameters: An array of parameter declarations tuples in the format:
('<param_name>', {'validate': [<ValidatorClass>,...], <options...>})
Usage:
@chassis.util.parameters.parse([
('email', {'validators': [validators.Email], 'required': True}),
('password', {'validators': [validators.Password], 'required': True})
])
def post(self, email=None, password=None):
# Render JSON for the provided parameters
self.render_json({'email': email, 'password': password})
"""
# pylint: disable=protected-access
@decorators.include_original
def decorate(method):
"""Setup returns this decorator, which is called on the method."""
def call(self, *args):
"""This is called whenever the decorated method is invoked."""
arguments = _fetch_arguments(self, method)
kwargs = {}
errors = []
for key, properties in parameters:
if key in arguments:
value = arguments[key]
try:
kwargs[key] = _apply_validator_chain(
properties.get('validators', []), value, self)
except validators.ValidationError as err:
errors.append(err)
else:
if properties.get('required', False):
raise web.HTTPError(
400,
('Missing required parameter: %s'
% (key, ))
)
else:
if properties.get('default', None) is not None:
kwargs[key] = properties['default']
else:
kwargs[key] = None
if errors:
raise web.HTTPError(400, 'There were %s errors' % len(errors))
return method(self, *args, **kwargs)
# TODO: Autogenerate documentation data for parameters.
return call
return decorate
|
Python
| 0.000065
|
@@ -868,27 +868,21 @@
r_chain(
-_validators
+chain
, value,
@@ -955,35 +955,29 @@
if hasattr(
-_validators
+chain
, 'validate'
@@ -1005,34 +1005,22 @@
-_validators = %5B_validators
+chain = %5Bchain
, %5D%0A
@@ -1028,17 +1028,16 @@
for
-_
validato
@@ -1041,24 +1041,17 @@
ator
-s
in
-_validators
+chain
:%0A
@@ -1067,17 +1067,16 @@
hasattr(
-_
validato
@@ -1076,17 +1076,16 @@
alidator
-s
, 'valid
@@ -1111,17 +1111,16 @@
value =
-_
validato
@@ -1120,17 +1120,16 @@
alidator
-s
.validat
|
8bcc13c082fd366b3e4f7e93d4cf31a99846418d
|
Add comment about folsom compatibility change
|
ceilometer/service.py
|
ceilometer/service.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 eNovance <licensing@enovance.com>
#
# Author: Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from nova import flags
try:
from nova import config as nova_config
except ImportError:
nova_config = False
from ceilometer.openstack.common import cfg
from ceilometer.openstack.common import context
from ceilometer.openstack.common import log
from ceilometer.openstack.common.rpc import service as rpc_service
cfg.CONF.register_opts([
cfg.IntOpt('periodic_interval',
default=600,
help='seconds between running periodic tasks')
])
CLI_OPTIONS = [
cfg.StrOpt('os-username',
default=os.environ.get('OS_USERNAME', 'glance'),
help='Username to use for openstack service access'),
cfg.StrOpt('os-password',
default=os.environ.get('OS_PASSWORD', 'admin'),
help='Password to use for openstack service access'),
cfg.StrOpt('os-tenant-id',
default=os.environ.get('OS_TENANT_ID', ''),
help='Tenant ID to use for openstack service access'),
cfg.StrOpt('os-tenant-name',
default=os.environ.get('OS_TENANT_NAME', 'admin'),
help='Tenant name to use for openstack service access'),
cfg.StrOpt('os-auth-url',
default=os.environ.get('OS_AUTH_URL',
'http://localhost:5000/v2.0'),
help='Auth URL to use for openstack service access'),
]
cfg.CONF.register_cli_opts(CLI_OPTIONS)
cfg.CONF.register_cli_opts(flags.core_opts)
cfg.CONF.register_cli_opts(flags.global_opts)
class PeriodicService(rpc_service.Service):
def start(self):
super(PeriodicService, self).start()
admin_context = context.RequestContext('admin', 'admin', is_admin=True)
self.tg.add_timer(cfg.CONF.periodic_interval,
self.manager.periodic_tasks,
context=admin_context)
def _sanitize_cmd_line(argv):
"""Remove non-nova CLI options from argv."""
cli_opt_names = ['--%s' % o.name for o in CLI_OPTIONS]
return [a for a in argv if a in cli_opt_names]
def _init_nova_config(argv):
if nova_config:
nova_config.parse_args(argv)
else:
flags.parse_args(argv)
def prepare_service(argv=[]):
cfg.CONF(argv[1:])
# FIXME(dhellmann): We must set up the nova.flags module in order
# to have the RPC and DB access work correctly because we are
# still using the Service object out of nova directly. We need to
# move that into openstack.common.
_init_nova_config(_sanitize_cmd_line(argv))
log.setup('ceilometer')
|
Python
| 0.000001
|
@@ -793,16 +793,261 @@
tError:%0A
+ # NOTE(dhellmann): We want to try to maintain compatibility%0A # with folsom for the time being, so set the name nova_config%0A # to a sentinal we can use to trigger different behavior%0A # when we try to set up the configuration object.%0A
nova
@@ -3005,24 +3005,267 @@
nfig(argv):%0A
+ # NOTE(dhellmann): We want to try to maintain compatibility%0A # with folsom for the time being, so this function is%0A # just here to isolate the rest of the module from having%0A # to know how to configure different versions of nova.%0A
if nova_
|
15785b306557bb81643270415944ca1bda3ae0a8
|
Remove database_project_name from api
|
dbaas_zabbix/dbaas_api.py
|
dbaas_zabbix/dbaas_api.py
|
# -*- coding: utf-8 -*-
class DatabaseAsAServiceApi(object):
def __init__(self, databaseinfra, credentials):
self.databaseinfra = databaseinfra
self.credentials = credentials
@property
def user(self):
return self.credentials.user
@property
def password(self):
return self.credentials.password
@property
def endpoint(self):
return self.credentials.endpoint
@property
def main_clientgroup(self):
return self.credentials.get_parameter_by_name("main_clientgroup")
@property
def extra_clientgroup(self):
return self.credentials.get_parameter_by_name("extra_clientgroup")
def extra_parameters(self, group):
return self.credentials.get_parameters_by_group(group)
@property
def alarm_notes(self):
return self.credentials.get_parameter_by_name("alarm_notes")
@property
def instances(self):
return self.databaseinfra.instances.all()
@property
def driver(self):
return self.databaseinfra.get_driver()
@property
def database_instances(self):
return self.driver.get_database_instances()
@property
def non_database_instances(self):
return self.driver.get_non_database_instances()
@property
def hosts(self):
return list(set([instance.hostname for instance in self.instances]))
@property
def databaseifra_name(self):
return self.databaseinfra.name
@property
def secondary_ips(self):
return self.databaseinfra.cs_dbinfra_attributes.all()
@property
def is_ha(self):
return self.databaseinfra.plan.is_ha
@property
def engine_name(self):
return self.databaseinfra.engine.engine_type.name
@property
def database_project_name(self):
return self.databaseinfra.databases.get().project.name
|
Python
| 0.000019
|
@@ -1755,119 +1755,4 @@
ame%0A
-%0A @property%0A def database_project_name(self):%0A return self.databaseinfra.databases.get().project.name%0A
|
fac00c413d446a875000d11707755ccc6111b25b
|
Allow allura_import.py to find the dependent libraries in ../Allura
|
scripts/allura_import.py
|
scripts/allura_import.py
|
import os
import sys
import urllib
import urllib2
import urlparse
import hmac
import hashlib
import json
from optparse import OptionParser
from pprint import pprint
from datetime import datetime
from allura.lib.import_api import AlluraImportApiClient
def main():
optparser, options, args = parse_options()
import_options = {}
for s in options.import_opts:
k, v = s.split('=', 1)
if v == 'false':
v = False
import_options[k] = v
user_map = {}
if options.user_map_file:
f = open(options.user_map_file)
try:
user_map = json.load(f)
if type(user_map) is not type({}):
raise ValueError
for k, v in user_map.iteritems():
print k, v
if not isinstance(k, basestring) or not isinstance(v, basestring):
raise ValueError
except ValueError:
optparser.error('--user-map should specify JSON file with format {"original_user": "sf_user", ...}')
finally:
f.close()
import_options['user_map'] = user_map
cli = AlluraImportApiClient(options.base_url, options.api_key, options.secret_key, options.verbose)
doc_txt = open(args[0]).read()
# import the tracker (if any)
if options.tracker:
import_tracker(cli, options.project, options.tracker, import_options, doc_txt,
validate=options.validate,
verbose=options.verbose)
elif options.forum:
import_forum(cli, options.project, options.forum, user_map, doc_txt, validate=options.validate)
def import_tracker(cli, project, tool, import_options, doc_txt, validate=True, verbose=False):
url = '/rest/p/' + project + '/' + tool
if validate:
url += '/validate_import'
else:
url += '/perform_import'
existing_map = {}
if options.cont:
existing_tickets = cli.call('/rest/p/' + options.project + '/' + options.tracker + '/')['tickets']
for t in existing_tickets:
existing_map[t['ticket_num']] = t['summary']
doc = json.loads(doc_txt)
tickets_in = doc['trackers']['default']['artifacts']
doc['trackers']['default']['artifacts'] = []
if options.verbose:
print "Processing %d tickets" % len(tickets_in)
for cnt, ticket_in in enumerate(tickets_in):
if ticket_in['id'] in existing_map:
if options.verbose:
print 'Ticket id %d already exists, skipping' % ticket_in['id']
continue
doc['trackers']['default']['artifacts'] = [ticket_in]
res = cli.call(url, doc=json.dumps(doc), options=json.dumps(import_options))
assert res['status'] and not res['errors']
if options.validate:
if res['warnings']:
print "Ticket id %s warnings: %s" % (ticket_in['id'], res['warnings'])
else:
print "Imported ticket id %s" % (ticket_in['id'])
def import_forum(cli, project, tool, user_map, doc_txt, validate=True):
url = '/rest/p/' + project + '/' + tool
if validate:
url += '/validate_import'
print cli.call(url, doc=doc_txt, user_map=json.dumps(user_map))
else:
url += '/perform_import'
print cli.call(url, doc=doc_txt, user_map=json.dumps(user_map))
def parse_options():
optparser = OptionParser(usage='''%prog [options] <JSON dump>
Import project data dump in JSON format into an Allura project.''')
optparser.add_option('-a', '--api-ticket', dest='api_key', help='API ticket')
optparser.add_option('-s', '--secret-key', dest='secret_key', help='Secret key')
optparser.add_option('-p', '--project', dest='project', help='Project to import to')
optparser.add_option('-t', '--tracker', dest='tracker', help='Tracker to import to')
optparser.add_option('-f', '--forum', dest='forum', help='Forum tool to import to')
optparser.add_option('-u', '--base-url', dest='base_url', default='https://sourceforge.net', help='Base Allura URL (%default)')
optparser.add_option('-o', dest='import_opts', default=[], action='append', help='Specify import option(s)', metavar='opt=val')
optparser.add_option('--user-map', dest='user_map_file', help='Map original users to SF.net users', metavar='JSON_FILE')
optparser.add_option('--validate', dest='validate', action='store_true', help='Validate import data')
optparser.add_option('-v', '--verbose', dest='verbose', action='store_true', help='Verbose operation')
options, args = optparser.parse_args()
if len(args) != 1:
optparser.error("Wrong number of arguments")
if not options.api_key or not options.secret_key:
optparser.error("Keys are required")
if not options.project:
optparser.error("Target project is required")
return optparser, options, args
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -189,16 +189,124 @@
tetime%0A%0A
+import os.path%0AscriptDir=os.path.dirname(__file__)%0Asys.path.append(os.path.join(scriptDir, '..', 'Allura'))%0A
from all
|
8fb8dc0734e91f2a2037c6446e36174e9878a4b6
|
fix EndOfInfo
|
merc/features/rfc1459/info.py
|
merc/features/rfc1459/info.py
|
import collections
import datetime
from merc import util
from merc import feature
from merc import message
INFO_TEMPLATE = """\
____
__/ / /___ _ ___ ________
/_ . __/ ' \/ -_) __/ __/
/_ __/_/_/_/\__/_/ \__/
/_/_/
The Modern Extensible Relay Chat daemon, version {version}.
Copyright (C) {year}, #merc-devel
This software is licensed under the terms of the MIT license. The LICENSE file
in the source root contains full details and usage terms.
Visit us: http://merc-devel.com
Visit us on IRC: #merc @ irc.merc-devel.com
Get the merc source code at: https://github.com/merc-devel/merc
The following people have contributed significantly to merc, in
nickname-alphabetical order:
rfw, Tony Young <tony@rfw.name>
Shiz <hi@shiz.me>
This merc instance has been online since {online_since}, meaning it has been up
for {online_for}!
"""
class InfoFeature(feature.Feature):
NAME = __name__
install = InfoFeature.install
@InfoFeature.register_server_command
class InfoReply(message.Reply):
NAME = "371"
FORCE_TRAILING = True
MIN_ARITY = 1
def __init__(self, line):
self.line = line
def as_reply_params(self):
return [self.line]
@InfoFeature.register_server_command
class EndOfInfo(message.Reply):
NAME = "374"
MIN_ARITY = 2
def __init__(self, line, reason="End of /INFO list", *args):
self.line = line
self.reason = reason
def as_reply_params(self):
return [self.reason]
@InfoFeature.register_user_command
class Info(message.Command):
NAME = "INFO"
MIN_ARITY = 0
@message.Command.requires_registration
def handle_for(self, app, user, prefix):
year = datetime.date.today().year
online_since = app.creation_time.strftime("%c")
online_for = friendly_timespan(datetime.datetime.now() -
app.creation_time)
lines = INFO_TEMPLATE.format(
version=app.version,
year=year,
online_since=online_since,
online_for=online_for)
for line in lines.splitlines():
user.send_reply(InfoReply(line))
app.run_hooks("server.info", user)
user.send_reply(EndOfInfo())
def friendly_timespan(diff, range=3):
UNITS = collections.OrderedDict([
('year', 31536000),
('month', 2592000),
('week', 604800),
('day', 86400),
('hour', 3600),
('minute', 60),
('second', 1)
])
seconds = round(diff.total_seconds())
indications = []
for unit, amount in UNITS.items():
n, seconds = divmod(seconds, amount)
if n == 0:
continue
elif n > 1:
unit += "s"
indications.append('{} {}'.format(n, unit))
if range is not None:
range -= 1
if range == 0:
break
if len(indications) > 0:
if len(indications) > 1:
return ", ".join(indications[:-1]) + " and " + indications[-1]
return indications[0]
else:
return "a small while"
|
Python
| 0.000001
|
@@ -1267,17 +1267,17 @@
ARITY =
-2
+1
%0A%0A def
@@ -1294,14 +1294,8 @@
elf,
- line,
rea
@@ -1327,37 +1327,16 @@
*args):%0A
- self.line = line%0A
self
|
4cbcb3ede718249b78b46ac0572d0e6c6c8c88b9
|
Fix currency compute
|
bin/addons/base/res/res_currency.py
|
bin/addons/base/res/res_currency.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2004-2006 TINY SPRL. (http://tiny.be) All Rights Reserved.
#
# $Id: account.py 1005 2005-07-25 08:41:42Z nicoe $
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import time
import netsvc
from osv import fields, osv
import ir
from tools.misc import currency
import mx.DateTime
from mx.DateTime import RelativeDateTime, now, DateTime, localtime
class res_currency(osv.osv):
def _current_rate(self, cr, uid, ids, name, arg, context):
res={}
for id in ids:
cr.execute("SELECT currency_id, rate FROM res_currency_rate WHERE currency_id = %d AND name <= '%s' ORDER BY name desc LIMIT 1" % (id, time.strftime('%Y-%m-%d')))
if cr.rowcount:
id, rate=cr.fetchall()[0]
res[id]=rate
else:
res[id]=0
return res
_name = "res.currency"
_description = "Currency"
_columns = {
'name': fields.char('Currency', size=32, required=True),
'code': fields.char('Code', size=3),
'rate': fields.function(_current_rate, method=True, string='Current rate',digits=(12,6)),
'rate_ids': fields.one2many('res.currency.rate', 'currency_id', 'Rates'),
'accuracy': fields.integer('Computational Accuracy'),
'rounding': fields.float('Rounding factor', digits=(12,6)),
'active': fields.boolean('Active'),
}
_defaults = {
'active': lambda *a: 1,
}
_order = "code"
def round(self, cr, uid, currency, amount):
return round(amount / currency.rounding) * currency.rounding
def compute(self, cr, uid, from_currency_id, to_currency_id, from_amount):
if to_currency_id==from_currency_id:
return from_amount
[from_currency]=self.read(cr, uid, [from_currency_id])
[to_currency] = self.read(cr, uid, [to_currency_id])
if from_currency['rate'] == 0 or to_currency['rate'] == 0:
raise osv.except_osv('Error', 'No rate found for the currency')
return self.round(cr, uid, to_currency, from_amount * from_currency['rate']/to_currency['rate'])
res_currency()
class res_currency_rate(osv.osv):
_name = "res.currency.rate"
_description = "Currency Rate"
_columns = {
'name': fields.date('Date', required=True, select=True),
'rate': fields.float('Rate', digits=(12,6), required=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
}
_defaults = {
'name': lambda *a: time.strftime('%Y-%m-%d'),
}
_order = "name desc"
res_currency_rate()
|
Python
| 0.056095
|
@@ -2692,19 +2692,31 @@
m_amount
+, round=True
):%0A
-
%09%09if to_
@@ -2770,17 +2770,16 @@
mount%0A%09%09
-%5B
from_cur
@@ -2787,19 +2787,20 @@
ency
-%5D
=self.
-read
+browse
(cr,
@@ -2828,12 +2828,14 @@
id%5D)
+%5B0%5D
%0A%09%09
-%5B
to_c
@@ -2845,17 +2845,16 @@
ency
-%5D
= self.
read
@@ -2849,20 +2849,22 @@
= self.
-read
+browse
(cr, uid
@@ -2882,16 +2882,19 @@
ncy_id%5D)
+%5B0%5D
%0A%09%09if fr
@@ -3014,16 +3014,29 @@
rency')%0A
+%09%09if round:%0A%09
%09%09return
@@ -3096,24 +3096,91 @@
currency
-%5B'rate'%5D
+.rate/to_currency.rate)%0A%09%09else:%0A%09%09%09return (from_amount * from_currency.rate
/to_curr
@@ -3183,24 +3183,21 @@
currency
-%5B'
+.
rate
-'%5D
)%0Ares_cu
|
f5c34b40c42490e5cf1bc5a67178a5fe9e0e635c
|
refactor _add_quicklink_context
|
myuw/views/page.py
|
myuw/views/page.py
|
import re
import logging
import traceback
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.contrib.auth import logout as django_logout
from django.conf import settings
from myuw.dao.term import get_current_quarter
from myuw.dao.affiliation import get_all_affiliations
from myuw.dao.user import is_oldmyuw_user, get_netid_of_current_user,\
is_oldmyuw_mobile_user
from myuw.dao.emaillink import get_service_url_for_address
from myuw.dao.exceptions import EmailServiceUrlException
from myuw.dao.quicklinks import get_quicklink_data
from myuw.logger.timer import Timer
from myuw.logger.logback import log_exception
from myuw.logger.logresp import log_invalid_netid_response
from myuw.logger.logresp import log_success_response_with_affiliation
from myuw.logger.session_log import log_session
from myuw.views.error import invalid_session
from myuw.dao.uwemail import get_email_forwarding_for_current_user
from myuw.dao.card_display_dates import get_card_visibilty_date_values
from myuw.views import prefetch_resources, get_enabled_features
from restclients_core.exceptions import DataFailureException
from myuw.dao.messages import get_current_messages
logger = logging.getLogger(__name__)
LOGOUT_URL = "/user_logout"
def page(request,
context=None,
template='index.html',
prefetch=True,
add_quicklink_context=False):
if context is None:
context = {}
timer = Timer()
netid = get_netid_of_current_user()
if not netid:
log_invalid_netid_response(logger, timer)
return invalid_session()
context["user"] = {
"netid": netid,
"session_key": request.session.session_key,
}
if prefetch:
# Some pages need to prefetch before this point
failure = try_prefetch(request)
if failure:
return failure
log_session(netid, request.session.session_key, request)
if _is_mobile(request):
# On mobile devices, all students get the current myuw. Non-students
# are sent to the legacy site.
try:
if is_oldmyuw_mobile_user():
logger.info("mobile user %s, redirect to legacy!" % netid)
return redirect_to_legacy_site()
except Exception:
log_exception(logger,
'%s is_oldmyuw_mobile_user' % netid,
traceback.format_exc())
logger.info("%s, redirected to legacy!" % netid)
return redirect_to_legacy_site()
else:
if is_oldmyuw_user():
return redirect_to_legacy_site()
context["home_url"] = "/"
context["err"] = None
context["user"]["affiliations"] = get_all_affiliations(request)
context["banner_messages"] = get_current_messages(request)
context["card_display_dates"] = get_card_visibilty_date_values(request)
try:
my_uwemail_forwarding = get_email_forwarding_for_current_user()
if my_uwemail_forwarding.is_active():
c_user = context["user"]
try:
(c_user['email_forward_url'],
c_user['email_forward_title'],
c_user['email_forward_icon']) = get_service_url_for_address(
my_uwemail_forwarding.fwd)
except EmailServiceUrlException:
c_user['login_url'] = None
c_user['title'] = None
c_user['icon'] = None
logger.info('No Mail Url: %s' % (
my_uwemail_forwarding.fwd))
except Exception:
log_exception(logger,
'get_email_forwarding_for_current_user',
traceback.format_exc())
pass
if ('year' not in context or context['year'] is None or
'quarter' not in context and context['quarter'] is None):
cur_term = get_current_quarter(request)
if cur_term is None:
context["err"] = "No current quarter data!"
else:
context["year"] = cur_term.year
context["quarter"] = cur_term.quarter
else:
pass
context['enabled_features'] = get_enabled_features()
context['google_search_key'] = getattr(
settings, "GOOGLE_SEARCH_KEY", None)
if add_quicklink_context:
_add_quicklink_context(request, context)
log_success_response_with_affiliation(logger, timer, request)
return render(request, template, context)
def try_prefetch(request):
try:
prefetch_resources(request,
prefetch_email=True,
prefetch_enrollment=True)
except DataFailureException:
log_exception(logger,
"prefetch_resources",
traceback.format_exc())
context["webservice_outage"] = True
return render(request, template, context)
return
def _is_mobile(request):
user_agent = request.META.get("HTTP_USER_AGENT")
if not user_agent:
return False
# This is the check we were doing in our apache config...
if re.match('.*iPhone.*', user_agent):
return True
if re.match('.*Android.*Mobile.*', user_agent):
return True
return False
def redirect_to_legacy_site():
legacy_url = getattr(settings,
"MYUW_USER_SERVLET_URL",
"https://myuw.washington.edu/servlet/user")
return HttpResponseRedirect(legacy_url)
def logout(request):
# Expires current myuw session
django_logout(request)
# Redirects to weblogin logout page
return HttpResponseRedirect(LOGOUT_URL)
def _add_quicklink_context(request, context):
link_data = get_quicklink_data(get_all_affiliations(request))
for key in link_data:
context[key] = link_data[key]
|
Python
| 0.001025
|
@@ -2672,24 +2672,73 @@
rr%22%5D = None%0A
+ affiliations = get_all_affiliations(request)%0A
context%5B
@@ -2763,24 +2763,16 @@
ons%22%5D =
-get_all_
affiliat
@@ -2775,25 +2775,16 @@
liations
-(request)
%0A%0A co
@@ -4352,39 +4352,44 @@
icklink_context(
-request
+affiliations
, context)%0A%0A
@@ -5700,23 +5700,28 @@
context(
-request
+affiliations
, contex
@@ -5759,24 +5759,16 @@
nk_data(
-get_all_
affiliat
@@ -5771,25 +5771,16 @@
liations
-(request)
)%0A%0A f
|
518ee3265b619c763c20394baa046f18dbfb08d9
|
Fix bug
|
my-ACG/util/ani_gamer_com_tw_animeVideo.py
|
my-ACG/util/ani_gamer_com_tw_animeVideo.py
|
import argparse
import logging
import re
import pywikibot
import requests
from bs4 import BeautifulSoup
class AniGamerComTwAnimeVideo:
RATING_IMG = {
'ALL': 0,
'6TO12': 6,
'12TO18': 12,
'15TO18': 15,
'18UP': 18,
}
RATING_ITEM = {
0: 'Q46',
6: 'Q47',
12: 'Q48',
15: 'Q49',
18: 'Q50',
}
def getData(self, url):
text = requests.get(url).text
data = {}
if '目前無此動畫或動畫授權已到期!' in text:
data['removed'] = True
return data
soup = BeautifulSoup(text, 'html.parser')
season = soup.find('section', {'class': 'season'})
if season is None:
data['episodes'] = 1
else:
data['episodes'] = len(season.find('ul').findAll('li'))
rating = soup.find('div', {'class': 'rating'})
if rating:
src = rating.find('img').get('src')
m = re.search(r'TW-(.+?)\.gif', src)
if m:
data['rating'] = self.RATING_IMG[m.group(1)]
data_intro = soup.find('div', {'class': 'data_intro'})
if data_intro:
linkdiv = data_intro.find('div', {'class': 'link'})
if linkdiv:
for link in linkdiv.findAll('a'):
if link.text == '作品資料':
data['acg_link'] = link.get('href')
if data['acg_link'].startswith('//'):
data['acg_link'] = 'https:' + data['acg_link']
return data
def updateItem(self, datasite, item):
itemlabel = item.get()['labels']['zh-tw']
logging.info('%s %s', item.id, itemlabel)
claims = item.get()['claims']
if 'P34' not in claims:
logging.error('No anime gamer claims')
return
url = claims['P34'][0].getTarget()
data = self.getData(url)
# 移除巴哈姆特動畫瘋連結
if 'removed' in data and data['removed']:
logging.info('\tRemove anime gamer link')
item.removeClaims(claims['P34'], summary='影片已移除')
return
# 從巴哈姆特動畫瘋匯入巴哈姆特作品資料
if 'acg_link' in data and 'P1' not in claims:
new_claim = pywikibot.page.Claim(datasite, 'P1')
new_claim.setTarget(data['acg_link'])
logging.info('\t Add acg gamer link %s', data['acg_link'])
item.addClaim(new_claim)
# 台灣分級
if 'rating' in data:
rating_exists = False
if 'P23' in claims:
for claim in claims['P23']:
if claim.getTarget().id == self.RATING_ITEM[data['rating']]:
rating_exists = True
if len(claim.sources) == 0:
rating_source = pywikibot.page.Claim(datasite, 'P34')
rating_source.setTarget(url)
logging.info('\t Add source to rating')
claim.addSource(rating_source)
if not rating_exists:
new_claim = pywikibot.page.Claim(datasite, 'P23')
new_claim.setTarget(pywikibot.ItemPage(datasite, self.RATING_ITEM[data['rating']]))
rating_source = pywikibot.page.Claim(datasite, 'P34')
rating_source.setTarget(url)
new_claim.addSource(rating_source)
logging.info('\t Add new rating %s', data['rating'])
item.addClaim(new_claim, summary='新增台灣分級')
# 總集數
if 'episodes' in data:
if 'P27' in claims:
new_episodes = data['episodes']
episodesValue = claims['P27'][0].getTarget()
old_episodes = episodesValue.amount
if new_episodes > old_episodes:
episodesValue.amount = new_episodes
logging.info('\t Update episodes from %s to %s', old_episodes, new_episodes)
claims['P27'][0].changeTarget(episodesValue, summary='更新總集數')
else:
new_claim = pywikibot.page.Claim(datasite, 'P27')
new_claim.setTarget(pywikibot.WbQuantity(new_episodes, site=datasite))
logging.info('\t Add new episodes %s', new_episodes)
item.addClaim(new_claim, summary='新增總集數')
# 播放狀態
if 'P31' in claims and claims['P31'][0].getTarget().id == 'Q57':
logging.info('\t Update status to playing')
statusValue = pywikibot.ItemPage(datasite, 'Q56')
claims['P31'][0].changeTarget(statusValue, summary='更新播放狀態')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('url')
args = parser.parse_args()
print(AniGamerComTwAnimeVideo().getData(args.url))
|
Python
| 0.000001
|
@@ -1793,16 +1793,19 @@
.error('
+%5Ct
No anime
@@ -3586,75 +3586,71 @@
-if 'P27' in claims:%0A new_episodes = data%5B'episodes'%5D
+new_episodes = data%5B'episodes'%5D%0A if 'P27' in claims:
%0A
|
3b89f8df57fd345e873d87df7f342fe6a01b49ce
|
Fix config import from __init__
|
darwin/__init__.py
|
darwin/__init__.py
|
from .logger import setup_logging
setup_logging()
|
Python
| 0
|
@@ -1,14 +1,20 @@
%0Afrom
+.utils
.logger
|
75a2c762ac9714363955530c6908efcc1df95567
|
Improve consistency in the python example
|
bindings/python/test-integration.py
|
bindings/python/test-integration.py
|
#!/usr/bin/env python
import gtk
from igemacintegration import *
class MainWindow(gtk.Window):
def __init__(self):
gtk.Window.__init__(self)
self.set_default_size(400, 300)
vbox = gtk.VBox(False, 0)
self.add(vbox)
vbox.pack_start(gtk.Label("Some content here"), True, True, 0)
# Setup a menu bar with GTK+
menubar = gtk.MenuBar()
menu = gtk.Menu()
item = gtk.MenuItem("Open")
item.connect("activate", self.activate_cb)
menu.add(item)
item = gtk.MenuItem("Save")
item.connect("activate", self.activate_cb)
menu.add(item)
quit_item = gtk.MenuItem("Quit")
quit_item.connect("activate", lambda d: gtk.main_quit())
menu.add(quit_item)
item = gtk.MenuItem("File")
item.set_submenu(menu)
menubar.add(item)
menubar.show_all()
vbox.pack_start(menubar)
menubar.hide()
# Set up the menu bar integration
macmenu = MacMenu()
macmenu.set_menu_bar(menubar)
# Take care of the Quit item, the integration code will put it
# in the standard place
macmenu.set_quit_menu_item(quit_item)
# Add two groups with items in the application menu
group = macmenu.add_app_menu_group()
item = gtk.MenuItem("About")
item.connect("activate", self.activate_cb)
group.add_app_menu_item(item, None)
item = gtk.MenuItem("Check for updates...")
item.connect("activate", self.activate_cb)
group.add_app_menu_item(item, None)
group = macmenu.add_app_menu_group()
item = gtk.MenuItem("Preferences")
item.connect("activate", self.activate_cb)
group.add_app_menu_item(item, None)
# Set up the dock integration
dock = MacDock()
dock.connect('quit-activate', lambda d: gtk.main_quit())
dock.connect('clicked', self.dock_clicked_cb)
# Keep the reference so it's not GC:ed.
self.dock = dock
def dock_clicked_cb(self, dock):
print "Dock clicked"
def activate_cb(self, widget):
try:
print widget.child.get_text()
except:
print widget
if __name__ == '__main__':
window = MainWindow()
window.connect("destroy", gtk.main_quit)
window.show()
gtk.main()
|
Python
| 0.000008
|
@@ -1830,24 +1830,27 @@
ion%0A
+mac
dock = MacDo
@@ -1854,32 +1854,35 @@
cDock()%0A
+mac
dock.connect('qu
@@ -1930,16 +1930,19 @@
+mac
dock.con
@@ -2041,23 +2041,29 @@
self.
+mac
dock =
+mac
dock%0A
|
e972a2436807ff0f5af4282a7842451e07807e5e
|
bump to 0.0.8
|
chacractl/__init__.py
|
chacractl/__init__.py
|
config = {'verbosity': 'info'}
__version__ = '0.0.7'
|
Python
| 0
|
@@ -48,7 +48,7 @@
0.0.
-7
+8
'%0A
|
a31f4ff2fe25b2ac9844590912f4c851db131aa4
|
Update brocade_fastiron_telnet.py
|
netmiko/brocade/brocade_fastiron_telnet.py
|
netmiko/brocade/brocade_fastiron_telnet.py
|
from __future__ import unicode_literals
import re
import time
from netmiko.cisco_base_connection import CiscoBaseConnection
class BrocadeFastironTelnet(CiscoBaseConnection):
"""Brocade FastIron aka ICX support."""
def session_preparation(self):
self.protocol = 'telnet'
"""FastIron requires to be enable mode to disable paging."""
self._test_channel_read()
self.set_base_prompt()
self.enable()
self.disable_paging(command="skip-page-display")
@staticmethod
def normalize_linefeeds(a_string):
"""Convert '\r\n\r\n', '\r\r\n','\r\n', '\n\r' to '\n."""
newline = re.compile(r'(\r\n\r\n|\r\r\n|\r\n|\n\r|\r)')
return newline.sub('\n', a_string)
def telnet_login(self, pri_prompt_terminator='#', alt_prompt_terminator='>',
username_pattern=r"Username:", pwd_pattern=r"assword:",
delay_factor=1, max_loops=60):
"""Telnet login. Can be username/password or just password."""
super(BrocadeFastironTelnet, self).telnet_login(
pri_prompt_terminator=pri_prompt_terminator,
alt_prompt_terminator=alt_prompt_terminator,
username_pattern=username_pattern,
pwd_pattern=pwd_pattern,
delay_factor=delay_factor,
max_loops=max_loops)
def _test_channel_read(self, count=40, pattern=""):
"""Try to read the channel (generally post login) verify you receive data back."""
def _increment_delay(main_delay, increment=1.1, maximum=8):
"""Increment sleep time to a maximum value."""
main_delay = main_delay * increment
if main_delay >= maximum:
main_delay = maximum
return main_delay
i = 0
delay_factor = self.select_delay_factor(delay_factor=0)
main_delay = delay_factor * .1
time.sleep(main_delay * 10)
new_data = ""
while i <= count:
new_data += self._read_channel_timing()
if new_data and pattern:
if re.search(pattern, new_data):
break
elif new_data:
break
else:
"""Must send \r\n for ICX"""
self.write_channel('\r\n')
main_delay = _increment_delay(main_delay)
time.sleep(main_delay)
i += 1
# check if data was ever present
if new_data:
return ""
else:
raise NetMikoTimeoutException("Timed out waiting for data")
def find_prompt(self, delay_factor=1):
"""Finds the current network device prompt, last line only."""
debug = False
delay_factor = self.select_delay_factor(delay_factor)
self.clear_buffer()
"""Must send \r\n for ICX"""
self.write_channel("\r\n")
time.sleep(delay_factor * .1)
# Initial attempt to get prompt
prompt = self.read_channel()
if self.ansi_escape_codes:
prompt = self.strip_ansi_escape_codes(prompt)
if debug:
print("prompt1: {}".format(prompt))
# Check if the only thing you received was a newline
count = 0
prompt = prompt.strip()
while count <= 10 and not prompt:
prompt = self.read_channel().strip()
if prompt:
if debug:
print("prompt2a: {}".format(repr(prompt)))
print("prompt2b: {}".format(prompt))
if self.ansi_escape_codes:
prompt = self.strip_ansi_escape_codes(prompt).strip()
else:
"""Must send \r\n for ICX"""
self.write_channel("\r\n")
time.sleep(delay_factor * .1)
count += 1
if debug:
print("prompt3: {}".format(prompt))
# If multiple lines in the output take the last line
prompt = self.normalize_linefeeds(prompt)
prompt = prompt.split('\n')[-1]
prompt = prompt.strip()
if not prompt:
raise ValueError("Unable to find prompt: {}".format(prompt))
time.sleep(delay_factor * .1)
self.clear_buffer()
return prompt
@staticmethod
def normalize_cmd(command):
"""Normalize CLI commands to have a single trailing newline."""
command = command.rstrip("\n")
"""Must send \r\n for ICX"""
command += '\r\n'
return command
def check_enable_mode(self, check_string=''):
"""Check if in enable mode. Return boolean."""
debug = False
"""Must send \r\n for ICX"""
self.write_channel('\r\n')
output = self.read_until_prompt()
if debug:
print(output)
return check_string in output
def check_enable_mode(self, check_string=''):
"""Check if in enable mode. Return boolean."""
debug = False
"""Must send \r\n for CER"""
self.write_channel('\r\n')
output = self.read_until_prompt()
if debug:
print(output)
return check_string in output
def check_config_mode(self, check_string=')#', pattern=''):
"""Checks if the device is in configuration mode or not."""
debug = True
if not pattern:
pattern = re.escape(self.base_prompt)
if debug:
print("pattern: {}".format(pattern))
"""Must send \r\n for ICX"""
self.write_channel('\r\n')
output = self.read_until_pattern(pattern=pattern)
if debug:
print("check_config_mode: {}".format(repr(output)))
return check_string in output
def config_mode(self, config_command='config term', pattern=''):
if not pattern:
pattern = re.escape(self.base_prompt)
return super(CiscoBaseConnection, self).config_mode(config_command=config_command,
pattern=pattern)
def exit_config_mode(self, exit_config='end', pattern=''):
"""Exit from configuration mode."""
if not pattern:
pattern = re.escape(self.base_prompt)
return super(CiscoBaseConnection, self).exit_config_mode(exit_config=exit_config,
pattern=pattern)
|
Python
| 0.000718
|
@@ -5339,11 +5339,12 @@
g =
-Tru
+Fals
e%0A
|
fe47ba3a235f5b7e265e1e7cb3f1769467ca29db
|
Handle configuration better
|
cog/config.py
|
cog/config.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Activision Publishing, Inc.
# the cog project is free software under 3-clause BSD licence
# see the LICENCE file in the project root for copying terms
# process configuration files
import os, sys
import yaml
import cog.util as util
user_settings_dir = os.environ['HOME'] + os.sep + '.cog'
sys_settings_dir = '/etc/cog'
def read_yaml(file):
data = dict()
try:
fh = open(file)
data = yaml.safe_load(fh)
fh.close()
except (IOError, yaml.YAMLError), e:
print e
return data
def merge_data(*files):
data = dict()
for file in files:
if os.path.exists(file):
data = util.merge(data, read_yaml(file))
return data
def expand_inheritances(template_data, section):
template = dict()
for k, v in template_data.get(section).iteritems():
if v.has_key('inherits'):
base = v.get('inherits')
template[k] = util.merge(template_data.get(section).get(base).get('default'), v.get('default'))
else:
template[k] = v.get('default')
return template
class Profiles(dict):
__metaclass__ = util.Singleton
def __init__(self):
super(self.__class__, self).__init__({})
user_settings_file = user_settings_dir + os.sep + 'settings'
config_files = [sys_settings_dir + os.sep + 'settings']
self.defaults = {
'ldap_uri': 'ldap://ldap/',
'ldap_encryption': True,
'bind_dn': None,
'bind_pass': None,
'user_rdn': 'uid',
'user_query': '(&(%s=%s)(|(objectClass=posixAccount)(objectClass=inetOrgPerson)))',
'group_query': '(&(cn=%s)(objectClass=posixGroup))',
'netgroup_query': '(&(cn=%s)(objectClass=nisNetgroup))',
'min_uidnumber': 420000,
'max_uidnumber': 1000000,
'min_gidnumber': 420000,
'max_gidnumber': 1000000,
'rfc2307bis_group_object_class': 'groupOfMembers',
'rfc2307bis_group_member_attribute': 'member',
'rfc2307bis_group_sync_attributes': True,
'user_config': read_yaml(config_files[0]).get('user_config', True),
}
if self.defaults.get('user_config'):
config_files.append(user_settings_dir + os.sep + 'settings')
settings_data = merge_data(*config_files)
self.user_config = settings_data.pop('user_config')
self.profile = settings_data.pop('profile')
for k, v in settings_data.iteritems():
self[k] = v
def list(self):
return self.keys()
def current(self, name=None):
return util.merge(self.defaults, self.get(name or self.profile))
def use(self, name):
if name in self.keys():
self.profile = name
user_template_file = user_settings_dir + os.sep + 'templates.yaml'
sys_template_file = sys_settings_dir + os.sep + 'templates.yaml'
template_data = merge_data(sys_template_file, user_template_file)
objects = dict()
for object in ['accounts', 'groups', 'netgroups']:
objects[object] = expand_inheritances(template_data, object)
|
Python
| 0.000001
|
@@ -277,32 +277,30 @@
util%0A%0Auser_
-settings
+config
_dir = os.en
@@ -335,37 +335,250 @@
og'%0A
-sys_settings_dir = '/etc/cog'
+config_files = %5B%5D%0Atemplate_files = %5B%5D%0Afor config_dir in %5B'/etc/cog', '/usr/local/etc/cog'%5D:%0A if os.path.exists(config_dir):%0A config_files.append(config_dir + %22/settings%22)%0A template_files.append(config_dir + %22/templates.yaml%22)
%0A%0Ade
@@ -1496,79 +1496,14 @@
ser_
-settings_dir + os.sep + 'settings'%0A config_files = %5Bsys_settings
+config
_dir
@@ -1524,17 +1524,16 @@
ettings'
-%5D
%0A%0A
@@ -2462,33 +2462,12 @@
ngs_
-dir + os.sep + 'settings'
+file
)%0A
@@ -2944,22 +2944,16 @@
= name%0A%0A
-%0Auser_
template
@@ -2961,89 +2961,28 @@
file
- = user_settings_dir + os.sep + 'templates.yaml'%0Asys_template_file = sys_settings
+s.append(user_config
_dir
@@ -3009,17 +3009,17 @@
es.yaml'
-%0A
+)
%0Atemplat
@@ -3042,12 +3042,9 @@
ata(
-sys_
+*
temp
@@ -3056,28 +3056,9 @@
file
-, user_template_file
+s
)%0A%0Ao
|
d88dcaa6e1256452715aa5071cbe326233f03195
|
format model args
|
rest_framework_tracking/base_models.py
|
rest_framework_tracking/base_models.py
|
from django.db import models
from django.conf import settings
from django.utils.six import python_2_unicode_compatible
from .managers import PrefetchUserManager
@python_2_unicode_compatible
class BaseAPIRequestLog(models.Model):
""" Logs Django rest framework API requests """
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL, null=True,
blank=True,
)
requested_at = models.DateTimeField(db_index=True)
response_ms = models.PositiveIntegerField(default=0)
path = models.CharField(
max_length=getattr(settings, 'DRF_TRACKING_PATH_LENGTH', 200),
db_index=True,
)
view = models.CharField(
max_length=getattr(settings, 'DRF_TRACKING_VIEW_LENGTH', 200),
null=True,
blank=True,
db_index=True,
)
view_method = models.CharField(
max_length=getattr(settings, 'DRF_TRACKING_VIEW_METHOD_LENGTH', 27),
null=True,
blank=True,
db_index=True,
)
remote_addr = models.GenericIPAddressField()
host = models.URLField()
method = models.CharField(max_length=10)
query_params = models.TextField(null=True, blank=True)
data = models.TextField(null=True, blank=True)
response = models.TextField(null=True, blank=True)
errors = models.TextField(null=True, blank=True)
status_code = models.PositiveIntegerField(null=True, blank=True)
objects = PrefetchUserManager()
class Meta:
abstract = True
verbose_name = 'API Request Log'
def __str__(self):
return '{} {}'.format(self.method, self.path)
|
Python
| 0.000001
|
@@ -375,16 +375,24 @@
ET_NULL,
+%0A
null=Tr
|
5db261f29cd654605df856d221ce511eeac0aad0
|
remove metadata-root-ca.cer
|
setup_app/installers/fido.py
|
setup_app/installers/fido.py
|
import os
import glob
import shutil
from setup_app import paths
from setup_app.utils import base
from setup_app.static import AppType, InstallOption
from setup_app.config import Config
from setup_app.installers.jetty import JettyInstaller
class FidoInstaller(JettyInstaller):
def __init__(self):
setattr(base.current_app, self.__class__.__name__, self)
self.service_name = 'fido2'
self.needdb = True
self.app_type = AppType.SERVICE
self.install_type = InstallOption.OPTONAL
self.install_var = 'installFido2'
self.register_progess()
self.source_files = [
(os.path.join(Config.distGluuFolder, 'fido2.war'), Config.maven_root + '/maven/org/gluu/fido2-server/{0}/fido2-server-{0}.war'.format(Config.oxVersion))
]
self.fido2ConfigFolder = os.path.join(Config.configFolder, 'fido2')
self.output_folder = os.path.join(Config.outputFolder, 'fido2')
self.template_folder = os.path.join(Config.templateFolder, 'fido2')
self.fido2_dynamic_conf_json = os.path.join(self.output_folder, 'dynamic-conf.json')
self.fido2_static_conf_json = os.path.join(self.output_folder, 'static-conf.json')
self.ldif_fido2 = os.path.join(self.output_folder, 'fido2.ldif')
def install(self):
self.installJettyService(self.jetty_app_configuration[self.service_name], True)
self.logIt("Copying fido.war into jetty webapps folder...")
jettyServiceWebapps = os.path.join(self.jetty_base, self.service_name, 'webapps')
self.copyFile(self.source_files[0][0], jettyServiceWebapps)
self.war_for_jetty10(os.path.join(jettyServiceWebapps, os.path.basename(self.source_files[0][0])))
self.enable()
def render_import_templates(self, do_import=True):
Config.templateRenderingDict['fido2ConfigFolder'] = self.fido2ConfigFolder
self.renderTemplateInOut(self.fido2_dynamic_conf_json, self.template_folder, self.output_folder)
self.renderTemplateInOut(self.fido2_static_conf_json, self.template_folder, self.output_folder)
Config.templateRenderingDict['fido2_dynamic_conf_base64'] = self.generate_base64_file(self.fido2_dynamic_conf_json, 1)
Config.templateRenderingDict['fido2_static_conf_base64'] = self.generate_base64_file(self.fido2_static_conf_json, 1)
self.renderTemplateInOut(self.ldif_fido2, self.template_folder, self.output_folder)
if do_import:
ldif_files = [self.ldif_fido2]
self.dbUtils.import_ldif(ldif_files)
def create_folders(self):
for d in ('authenticator_cert', 'mds/cert', 'mds/toc', 'server_metadata'):
dpath = os.path.join(self.fido2ConfigFolder, d)
self.run([paths.cmd_mkdir, '-p', dpath])
def copy_static(self):
# Fido2 authenticator certs
target_dir = os.path.join(self.fido2ConfigFolder, 'authenticator_cert')
for f in ('yubico-u2f-ca-cert.crt', 'HyperFIDO_CA_Cert_V1.pem', 'HyperFIDO_CA_Cert_V2.pem'):
src = os.path.join(Config.install_dir, 'static/fido2/authenticator_cert/', f)
self.copyFile(src, target_dir)
# Fido2 MDS TOC cert
self.copyFile(
os.path.join(Config.install_dir, 'static/fido2/mds_toc_cert/metadata-root-ca.cer'),
os.path.join(self.fido2ConfigFolder, 'mds/cert')
)
# copy Apple_WebAuthn_Root_CA
apple_webauthn = os.path.join(Config.distAppFolder, 'Apple_WebAuthn_Root_CA.pem')
if os.path.exists(apple_webauthn):
target_dir = os.path.join(self.fido2ConfigFolder, 'apple')
self.run([paths.cmd_mkdir, '-p', target_dir])
self.copyFile(apple_webauthn, target_dir)
# copy external files
self.copyTree(
os.path.join(Config.distAppFolder, 'fido2'),
self.fido2ConfigFolder
)
|
Python
| 0.000459
|
@@ -3177,232 +3177,8 @@
r)%0A%0A
- # Fido2 MDS TOC cert%0A self.copyFile(%0A os.path.join(Config.install_dir, 'static/fido2/mds_toc_cert/metadata-root-ca.cer'),%0A os.path.join(self.fido2ConfigFolder, 'mds/cert')%0A )%0A%0A
|
1b0b96d78d03af813b10359e1ee7d7dd47045307
|
Correct URL to include entire base for API client
|
changes/api/client.py
|
changes/api/client.py
|
import json
from flask import current_app
class APIError(Exception):
pass
class APIClient(object):
"""
An internal API client.
>>> client = APIClient(version=0)
>>> response = client.get('/projects/')
>>> print response
"""
def __init__(self, version):
self.version = version
def dispatch(self, url, method, data=None):
url = '/api/%d/%s' % (self.version, url.lstrip('/'))
client = current_app.test_client()
response = client.open(url, method, data)
if not (200 <= response.status_code < 300):
raise APIError('Request returned invalid status code: %d' % (response.status_code,))
if response.headers['Content-Type'] != 'application/json':
raise APIError('Request returned invalid content type: %s' % (response.headers['Content-Type'],))
# TODO(dcramer): ideally we wouldn't encode + decode this
return json.loads(response.data)
def delete(self, *args, **kwargs):
return self.dispatch(method='DELETE', *args, **kwargs)
def get(self, *args, **kwargs):
return self.dispatch(method='GET', *args, **kwargs)
def head(self, *args, **kwargs):
return self.dispatch(method='HEAD', *args, **kwargs)
def options(self, *args, **kwargs):
return self.dispatch(method='OPTIONS', *args, **kwargs)
def patch(self, *args, **kwargs):
return self.dispatch(method='PATCH', *args, **kwargs)
def post(self, *args, **kwargs):
return self.dispatch(method='POST', *args, **kwargs)
def put(self, *args, **kwargs):
return self.dispatch(method='PUT', *args, **kwargs)
api_client = APIClient(version=0)
|
Python
| 0
|
@@ -379,16 +379,18 @@
url = '
+%25s
/api/%25d/
@@ -396,16 +396,48 @@
/%25s' %25 (
+current_app.config%5B'BASE_URI'%5D,
self.ver
@@ -467,24 +467,20 @@
-client =
+with
current
@@ -497,25 +497,39 @@
client()
-%0A
+ as client:%0A
response
@@ -520,16 +520,17 @@
+
response
@@ -544,16 +544,21 @@
nt.open(
+path=
url, met
@@ -556,26 +556,38 @@
=url, method
-,
+=method, data=
data)%0A
|
81fb181fc3bc7a6866340ac9004004ef438608d7
|
remove trailing period, fix grammar
|
boxoffice/models/discount_policy.py
|
boxoffice/models/discount_policy.py
|
# -*- coding: utf-8 -*-
import string
import random
from datetime import datetime
from werkzeug import cached_property
from itsdangerous import Signer, BadSignature
from sqlalchemy import event
from baseframe import __
from coaster.utils import LabeledEnum, uuid1mc, buid
from boxoffice.models import db, IdMixin, BaseScopedNameMixin
from boxoffice.models import Organization
__all__ = ['DiscountPolicy', 'DiscountCoupon', 'item_discount_policy', 'DISCOUNT_TYPE']
class DISCOUNT_TYPE(LabeledEnum):
AUTOMATIC = (0, __("Automatic"))
COUPON = (1, __("Coupon"))
item_discount_policy = db.Table('item_discount_policy', db.Model.metadata,
db.Column('item_id', None, db.ForeignKey('item.id'), primary_key=True),
db.Column('discount_policy_id', None, db.ForeignKey('discount_policy.id'), primary_key=True),
db.Column('created_at', db.DateTime, default=datetime.utcnow, nullable=False))
class DiscountPolicy(BaseScopedNameMixin, db.Model):
"""
Consists of the discount rules applicable on items
`title` has a GIN index to enable trigram matching.
"""
__tablename__ = 'discount_policy'
__uuid_primary_key__ = True
__table_args__ = (db.UniqueConstraint('organization_id', 'name'),
db.UniqueConstraint('organization_id', 'discount_code_base'),
db.CheckConstraint('percentage > 0 and percentage <= 100', 'discount_policy_percentage_check'),
db.CheckConstraint('discount_type = 0 or (discount_type = 1 and bulk_coupon_usage_limit IS NOT NULL)', 'discount_policy_bulk_coupon_usage_limit_check'))
organization_id = db.Column(None, db.ForeignKey('organization.id'), nullable=False)
organization = db.relationship(Organization, backref=db.backref('discount_policies', order_by='DiscountPolicy.created_at.desc()', lazy='dynamic', cascade='all, delete-orphan'))
parent = db.synonym('organization')
discount_type = db.Column(db.Integer, default=DISCOUNT_TYPE.AUTOMATIC, nullable=False)
# Minimum number of a particular item that needs to be bought for this discount to apply
item_quantity_min = db.Column(db.Integer, default=1, nullable=False)
percentage = db.Column(db.Integer, nullable=True)
# price-based discount
is_price_based = db.Column(db.Boolean, default=False, nullable=False)
discount_code_base = db.Column(db.Unicode(20), nullable=True)
secret = db.Column(db.Unicode(50), nullable=True)
items = db.relationship('Item', secondary=item_discount_policy)
# Coupons generated in bulk are not stored in the database during generation.
# This field allows specifying the number of times a coupon, generated in bulk, can be used
# This is particularly useful for generating referral discount coupons. For instance, one could generate
# a signed coupon and provide it to a user such that the user can share the coupon `n` times
# `n` here is essentially bulk_coupon_usage_limit.
bulk_coupon_usage_limit = db.Column(db.Integer, nullable=True, default=1)
@cached_property
def is_automatic(self):
return self.discount_type == DISCOUNT_TYPE.AUTOMATIC
@cached_property
def is_coupon(self):
return self.discount_type == DISCOUNT_TYPE.COUPON
def gen_signed_code(self, identifier=None):
"""Generates a signed code in the format discount_code_base.randint.signature"""
if not identifier:
identifier = buid()
signer = Signer(self.secret)
key = "{base}.{identifier}".format(base=self.discount_code_base, identifier=identifier)
return signer.sign(key)
@staticmethod
def is_signed_code_format(code):
"""Checks if the code is in the {x.y.z} format"""
return len(code.split('.')) == 3 if code else False
@classmethod
def get_from_signed_code(cls, code):
"""Returns a discount policy given a valid signed code, returns None otherwise"""
if not cls.is_signed_code_format(code):
return None
discount_code_base = code.split('.')[0]
policy = cls.query.filter_by(discount_code_base=discount_code_base).one_or_none()
if not policy:
return None
signer = Signer(policy.secret)
try:
signer.unsign(code)
return policy
except BadSignature:
return None
def set_secret(self, secret=None):
"""Sets a given value or buid as the secret for the discount policy object"""
self.secret = secret if secret else buid()
return self.secret
@classmethod
def make_bulk(cls, discount_code_base, **kwargs):
"""
Returns a discount policy for the purpose of issuing signed discount coupons in bulk.
"""
return cls(discount_type=DISCOUNT_TYPE.COUPON, discount_code_base=discount_code_base, secret=buid(), **kwargs)
@event.listens_for(DiscountPolicy, 'before_update')
@event.listens_for(DiscountPolicy, 'before_insert')
def validate_price_based_discount(mapper, connection, target):
if target.is_price_based and len(target.items) > 1:
raise ValueError("Price-based discount MUST have only one associated item.")
def generate_coupon_code(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
class DiscountCoupon(IdMixin, db.Model):
__tablename__ = 'discount_coupon'
__uuid_primary_key__ = True
__table_args__ = (db.UniqueConstraint('discount_policy_id', 'code'),)
def __init__(self, *args, **kwargs):
self.id = uuid1mc()
super(DiscountCoupon, self).__init__(*args, **kwargs)
code = db.Column(db.Unicode(100), nullable=False, default=generate_coupon_code)
usage_limit = db.Column(db.Integer, nullable=False, default=1)
used_count = db.Column(db.Integer, nullable=False, default=0)
discount_policy_id = db.Column(None, db.ForeignKey('discount_policy.id'), nullable=False)
discount_policy = db.relationship(DiscountPolicy, backref=db.backref('discount_coupons', cascade='all, delete-orphan'))
|
Python
| 0.999901
|
@@ -5091,16 +5091,17 @@
discount
+s
MUST ha
@@ -5127,17 +5127,16 @@
ted item
-.
%22)%0A%0A%0Adef
|
fa19f7bee1f8b0cd19790b12779ad362feec26a4
|
Fix Brand.has_products() so it's not always True
|
satchmo/apps/satchmo_ext/brand/models.py
|
satchmo/apps/satchmo_ext/brand/models.py
|
from django.conf import settings
from django.contrib.sites.models import Site
from django.db import models
from django.utils.translation import ugettext_lazy as _
from l10n.mixins import TranslatedObjectMixin
import product
from product.models import Product
from satchmo_utils.thumbnail.field import ImageWithThumbnailField
from satchmo_utils.signals import collect_urls
import logging
log = logging.getLogger('brand.models')
class BrandManager(models.Manager):
def active(self, site=None):
if not site:
site = Site.objects.get_current()
return self.filter(site=site, active=True)
def by_slug(self, slug):
site = Site.objects.get_current()
return self.get(slug=slug, site=site)
class Brand(models.Model, TranslatedObjectMixin):
"""A product brand"""
site = models.ForeignKey(Site)
slug = models.SlugField(_("Slug"), unique=True,
help_text=_("Used for URLs"))
products = models.ManyToManyField(Product, blank=True, verbose_name=_("Products"), through='BrandProduct')
ordering = models.IntegerField(_("Ordering"))
active = models.BooleanField(default=True)
objects = BrandManager()
def _active_categories(self):
return [cat for cat in self.categories.all() if cat.has_content()]
active_categories = property(fget=_active_categories)
def _translation(self):
return self._find_translation()
translation = property(fget=_translation)
def _get_absolute_url(self):
return ('satchmo_brand_view', None, {'brandname' : self.slug})
get_absolute_url = models.permalink(_get_absolute_url)
def active_products(self):
return self.products.filter(site=self.site, active=True)
def has_categories(self):
return self.active_categories().count() > 0
def has_content(self):
return self.has_products() or self.has_categories()
def has_products(self):
return self.active_products().count > 0
def __unicode__(self):
return u"%s" % self.slug
class Meta:
ordering=('ordering', 'slug')
verbose_name = _('Brand')
verbose_name_plural = _('Brands')
class BrandProduct(models.Model):
brand = models.ForeignKey(Brand)
product = models.ForeignKey(Product)
class Meta:
verbose_name=_("Brand Product")
verbose_name_plural=_("Brand Products")
class BrandTranslation(models.Model):
brand = models.ForeignKey(Brand, related_name="translations")
languagecode = models.CharField(_('language'), max_length=10, choices=settings.LANGUAGES)
name = models.CharField(_('title'), max_length=100, blank=False)
short_description = models.CharField(_('Short Description'), blank=True, max_length=200)
description = models.TextField(_('Full Description'), blank=True)
picture = ImageWithThumbnailField(verbose_name=_('Picture'),
upload_to="__DYNAMIC__",
name_field="_filename",
null=True, blank=True,
max_length=200) #Media root is automatically prepended
def _get_filename(self):
if self.brand:
return '%s-%s' % (self.brand.slug, self.id)
else:
return 'default'
_filename = property(_get_filename)
class Meta:
ordering=('languagecode', )
verbose_name = _('Brand Translation')
verbose_name_plural = _('Brand Translations')
class BrandCategoryManager(models.Manager):
def by_slug(self, brandname, slug):
brand = Brand.objects.by_slug(brandname)
return brand.categories.get(slug=slug)
class BrandCategory(models.Model, TranslatedObjectMixin):
"""A category within a brand"""
slug = models.SlugField(_("Slug"),
help_text=_("Used for URLs"))
brand = models.ForeignKey(Brand, related_name="categories")
products = models.ManyToManyField(Product, blank=True, verbose_name=_("Products"), through='BrandCategoryProduct')
ordering = models.IntegerField(_("Ordering"))
active = models.BooleanField(default=True)
objects = BrandCategoryManager()
def _translation(self):
return self._find_translation()
translation = property(fget=_translation)
def _get_absolute_url(self):
return ('satchmo_brand_category_view', None, {'brandname' : self.brand.slug, 'catname' : self.slug})
get_absolute_url = models.permalink(_get_absolute_url)
def active_products(self):
return self.products.filter(site=self.brand.site).filter(active=True)
def has_categories(self):
return False
def has_content(self):
return self.active_products()
def has_products(self):
return self.active_products().count > 0
def __unicode__(self):
return u"%s: %s" % (self.brand.slug, self.slug)
class Meta:
ordering=('ordering', 'slug')
verbose_name = _('Brand Category')
verbose_name_plural = _('Categories')
class BrandCategoryProduct(models.Model):
brandcategory = models.ForeignKey(BrandCategory)
product = models.ForeignKey(Product)
class Meta:
verbose_name = _('Brand Category Product')
verbose_name_plural = _('Brand Category Products')
class BrandCategoryTranslation(models.Model):
brandcategory = models.ForeignKey(BrandCategory, related_name="translations")
languagecode = models.CharField(_('language'), max_length=10, choices=settings.LANGUAGES)
name = models.CharField(_('title'), max_length=100, blank=False)
short_description = models.CharField(_('Short Description'), blank=True, max_length=200)
description = models.TextField(_('Description'), blank=True)
picture = ImageWithThumbnailField(verbose_name=_('Picture'),
upload_to="__DYNAMIC__",
name_field="_filename",
null=True, blank=True,
max_length=200) #Media root is automatically prepended
def _get_filename(self):
if self.brandcategory:
return '%s-%s' % (self.brandcategory.brand.slug, self.id)
else:
return 'default'
_filename = property(_get_filename)
class Meta:
ordering=('languagecode', )
verbose_name_plural = _('Brand Category Translations')
import config
from urls import add_brand_urls
collect_urls.connect(add_brand_urls, sender=product)
|
Python
| 0.003399
|
@@ -2001,24 +2001,26 @@
ucts().count
+()
%3E 0%0A
|
70f7e516755063864b7e3a52816a64eb071436ce
|
Change ExtendedUser to CustomUser & set default content type back to text/html.
|
restfulwebapisite/settings/defaults.py
|
restfulwebapisite/settings/defaults.py
|
"""Django settings for restfulwebapisite project."""
import os
# To help us extend the defaults instead of overriding them with hardcoded values.
from django.conf import global_settings as DEFAULT_SETTINGS
PROJECT_PATH = os.path.abspath(os.path.join(os.path.join(os.path.dirname(__file__).replace('\\','/'), '..'), '..'))
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.path.join(PROJECT_PATH, 'sqlite3_restfulwebapi.db'), # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
#TIME_ZONE = 'America/Chicago'
TIME_ZONE = 'Europe/London'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
#LANGUAGE_CODE = 'en-us'
LANGUAGE_CODE = 'en-gb'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'pgo-!&m@m+2vbzu*2e4csf8+(+*g^egw_7c^r-!ddprc2^je7!'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
'restfulwebapi.middleware.MIMETypeMiddleware',
)
ROOT_URLCONF = 'restfulwebapisite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'restfulwebapisite.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_PATH, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'accounts',
'restfulwebapi',
'cinema',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
##################
# AUTHENTICATION #
##################
AUTH_USER_MODEL = 'accounts.ExtendedUser'
LOGIN_URL = 'accounts:login'
LOGOUT_URL = 'accounts:logout'
LOGIN_REDIRECT_URL = 'accounts:profile'
####################
# CORE #
####################
DEFAULT_CONTENT_TYPE = 'application/xhtml+xml'
# Add new context processors here
TEMPLATE_CONTEXT_PROCESSORS = DEFAULT_SETTINGS.TEMPLATE_CONTEXT_PROCESSORS
# First day of week, to be used on calendars
# 0 means Sunday, 1 means Monday...
FIRST_DAY_OF_WEEK = 1
|
Python
| 0
|
@@ -5949,16 +5949,14 @@
nts.
-Extended
+Custom
User
@@ -6126,16 +6126,17 @@
######%0A%0A
+#
DEFAULT_
|
2ab57fd58150d9416986eb089430fcd090c9cc31
|
Fix ipynb tests
|
binstar_client/inspect_package/ipynb.py
|
binstar_client/inspect_package/ipynb.py
|
import os
import re
import time
from ..utils.notebook.inflection import parameterize
from ..utils.notebook.data_uri import data_uri_from
class IPythonNotebook(object):
_name = None
_version = None
thumbnail_file = None
def __init__(self, filename, fileobj, *args, **kwargs):
self.filename = filename
self.thumbnail_file = kwargs.get('thumbnail_file', None)
@property
def basename(self):
return os.path.basename(self.filename)
@property
def name(self):
if self._name is None:
return re.sub('\-ipynb$', '', parameterize(os.path.basename(self.filename)))
return self._name
@property
def version(self):
if self._version is None:
self._version = time.strftime('%Y.%m.%d-%H%M')
return self._version
@property
def thumbnail(self):
if self.thumbnail_file is None:
return None
return data_uri_from(self.thumbnail_file)
def get_package_data(self):
if self.thumbnail_file is None:
return {
'name': self.name,
'summary': 'IPython notebook'
}
else:
return {
'name': self.name,
'summary': 'IPython notebook',
'thumbnail': self.thumbnail
}
def inspect_ipynb_package(filename, fileobj, *args, **kwargs):
if 'parser_args' in kwargs:
thumbnail_file = kwargs['parser_args'].thumbnail
ipython_notebook = IPythonNotebook(filename, fileobj, thumbnail_file=thumbnail_file)
package_data = ipython_notebook.get_package_data()
release_data = {
'version': ipython_notebook.version,
'description': ''
}
file_data = {
'basename': ipython_notebook.basename,
'attrs': {}
}
return package_data, release_data, file_data
|
Python
| 0.000002
|
@@ -1572,24 +1572,96 @@
mbnail_file)
+%0A else:%0A ipython_notebook = IPythonNotebook(filename, fileobj)
%0A%0A packag
|
6672e556ecd82725cff5a3920bc5a537ccd91fab
|
Update radio.py
|
cogs/radio.py
|
cogs/radio.py
|
from .utils import config, checks, formats
import discord
from discord.ext import commands
import discord.utils
from .utils.api.pycopy import Copy
import random, json, asyncio
from urllib.parse import unquote
class Radio:
"""The radio-bot related commands."""
def __init__(self, bot):
self.bot = bot
if not discord.opus.is_loaded():
discord.opus.load_opus('/usr/local/lib/libopus.so') #FreeBSD path
self.player = None
self.stopped = True
self.q = asyncio.Queue()
self.play_next_song = asyncio.Event()
self.current_song = None
copy_creds = self.load_copy_creds()
self.copycom = Copy(copy_creds['login'], copy_creds['passwd'])
self.songs = []
self.update_song_list()
def load_copy_creds(self):
with open('copy_creds.json') as f:
return json.load(f)
@property
def is_playing(self):
return self.player is not None and self.player.is_playing() and not self.stopped
def toggle_next_song(self):
if not self.stopped:
self.bot.loop.call_soon_threadsafe(self.play_next_song.set)
def update_song_list(self):
self.songs = self.copycom.list_files('radio/')
@commands.command()
async def join(self, *, channel : discord.Channel = None):
"""Зайти на указанный голосовой канал."""
if channel is None or channel.type is not discord.ChannelType.voice:
await self.bot.say('Нет такого голосового канала.')
return
await self.bot.join_voice_channel(channel)
@commands.command(pass_context=True)
async def leave(self, ctx):
"""Покинуть текущий голосовой каналю"""
await ctx.invoke(self.stop)
await self.bot.voice.disconnect()
@commands.command()
async def pause(self):
"""Приостановить воспроизведение."""
if self.player is not None:
self.player.pause()
@commands.command()
async def resume(self):
"""Продолжить воспроизведение."""
if self.player is not None and not self.is_playing:
self.player.resume()
@commands.command()
async def skip(self):
"""Перейти к следующей песне в очереди."""
if self.player is not None and self.is_playing:
self.player.stop()
self.toggle_next_song()
@commands.command()
async def stop(self):
"""Остановить воспроизведение."""
if self.is_playing:
self.stopped = True
self.player.stop()
@commands.command(pass_context=True)
async def play(self, ctx):
"""Начать воспроизведение песен из очереди."""
if self.player is not None and not self.stopped:
if not self.is_playing:
await ctx.invoke(self.resume)
return
else:
await self.bot.say('Уже играю песенку.')
return
while True:
if not self.bot.is_voice_connected():
await ctx.invoke(self.join, channel=ctx.message.author.voice_channel)
continue
if self.q.empty():
await self.q.put(random.choice(self.songs))
self.play_next_song.clear()
self.current = await self.q.get()
self.player = self.bot.voice.create_ffmpeg_player(
self.copycom.direct_link('radio/' + self.current),
after=self.toggle_next_song,
#options="-loglevel debug -report",
headers = dict(self.copycom.session.headers))
self.stopped = False
self.player.start()
song_name = unquote(self.current.split('/')[-1])
await self.bot.change_status(discord.Game(name=song_name))
await self.play_next_song.wait()
@commands.command(aliases=['c'])
async def current(self):
"""Что там на радио?"""
if self.is_playing:
song_name = unquote(self.current.split('/')[-1])
await self.bot.say(song_name)
@commands.command()
async def update(self):
"""Обновить список песен."""
self.update_song_list()
await self.bot.say("Найдено {} песенок".format(len(self.files)))
@commands.command()
async def list(self):
"""Вывести список всех доступных песен."""
song_list = ""
id = 1
for song in self.songs:
song_list += "{}. {}\n".format(id, song)
id += 1
if len(song_list) > 1800:
await self.bot.say(song_list)
song_list = ''
await self.bot.say(song_list)
@commands.command()
async def add(self, song_num : int):
"""Добавить в конец очереди песню с данным номером."""
await self.q.put(self.songs[song_num-1])
await self.bot.say("{} будет следующей песенкой".format(self.songs[song_num-1]))
def setup(bot):
bot.add_cog(Radio(bot))
|
Python
| 0.000001
|
@@ -4501,20 +4501,20 @@
en(self.
-file
+song
s)))%0D%0A
|
9cdd74a6f94ec2f7272cc66c5f5e9ff3e0999b40
|
Extend bb_component test
|
bluebottle/common/tests/templatetags.py
|
bluebottle/common/tests/templatetags.py
|
from django.template import Template, Context, TemplateSyntaxError
import unittest
class BlockVerbatimTestCase(unittest.TestCase):
"""
Testcase testing the block_verbatim template tag.
block_verbatim parses other template tags while leaving {{foo}} structures
untouched. {% block %} inside block_verbatim DOES render context variables.
"""
def test_render(self):
""" Test block_verbatim with block name in closing tag """
t = Template(
'{% load bb_ember %}' # load the tag library
'{% block_verbatim test %}'
'{{verbatim node}}'
'{% endblock_verbatim test %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, u'{{verbatim node}}')
def test_render_no_name_closing_tag(self):
""" Test block_verbatim without block name in closing tag """
t = Template(
'{% load bb_ember %}' # load the tag library
'{% block_verbatim test %}'
'{{verbatim node}}'
'{% endblock_verbatim %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, u'{{verbatim node}}')
def test_block_in_block(self):
t = Template(
'{% load bb_ember %}' # load the tag library
'{% block_verbatim test %}'
'{{verbatim node}}'
'{% block foo %}'
'\nfoo'
'{% endblock %}'
'{% endblock_verbatim %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, u'{{verbatim node}}\nfoo')
def test_block_in_block_with_context(self):
t = Template(
'{% load bb_ember %}' # load the tag library
'{% block_verbatim test %}'
'{{verbatim node}}'
'{% block foo %}'
'\n{{ foo }}'
'{% endblock %}'
'{% endblock_verbatim %}'
)
c = Context({'foo': 'bar'})
rendered = t.render(c)
self.assertEqual(rendered, u'{{verbatim node}}\nbar')
def test_tag_not_loaded(self):
def _create_template():
Template(
'{% block_verbatim test %}'
'{{verbatim node}}'
'{% endblock_verbatim %}'
)
self.assertRaises(TemplateSyntaxError, _create_template)
class BBComponentTestCase(unittest.TestCase):
"""
TestCase testing the correct functioning of the bb_component tag.
bb_component takes an arbitrary number of keyword arguments and translates
strings marked for translation.
"""
def setUp(self):
self.load_statement = "{% load bb_ember %}";
def test_no_component_args(self):
t = Template(
self.load_statement +
'{% bb_component \'my-component\' %}'
)
rendered = t.render(Context())
self.assertEqual(rendered, u'{{my-component}}')
def test_no_args(self):
""" Test that a TemplateSyntaxError is raised when no component name is specified (as a string)"""
def _create_template():
t = Template(
self.load_statement +
'{% bb_component %}'
)
t.render(Context())
self.assertRaises(TemplateSyntaxError, _create_template)
def _create_template2():
t = Template(
self.load_statement +
'{% bb_component foo %}'
)
t.render(Context())
self.assertRaises(TemplateSyntaxError, _create_template2)
def test_kwargs(self):
""" Test that the keyword arguments are indeed in the component """
t = Template(
self.load_statement +
'{% bb_component \'my-component\' value1=\'foo\' name=\'bar\' errors=\'foobar\' %}'
)
result = t.render(Context())
self.assertTrue(result.startswith('{{my-component '))
self.assertIn('value1=foo', result)
self.assertIn('name=\'bar\'', result)
self.assertIn('errors=foobar', result)
self.assertTrue(result.endswith('}}'))
|
Python
| 0.000001
|
@@ -3809,16 +3809,39 @@
=%5C'bar%5C'
+ valueBinding=%5C'title%5C'
errors=
@@ -4051,32 +4051,88 @@
bar%5C'', result)%0A
+ self.assertIn('valueBinding=%5C'title%5C'', result)%0A
self.ass
|
b85b2da345be12be400ebbdd33c7f04aaf3e51cf
|
Fix processing script to handle compound nodes with ports
|
scripts/graph-process.py
|
scripts/graph-process.py
|
#!/usr/bin/env python3
import sys
import re
import collections
import argparse
import json
parser = argparse.ArgumentParser(description='Merge connection graph and forest data.')
parser.add_argument('forest', type=str, help='json file containing forest')
parser.add_argument('graph', type=str, help='graphviz dot file containing graph')
args = parser.parse_args()
counter = 0
class CompoundNode:
def __init__(self, name=''):
self.nodes = []
self.name = name
def print(self,indent):
global counter
my_cnt = counter
counter += 1
out = ''
out += ' ' * indent + 'subgraph cluster_%d {\n' % my_cnt
out += ' ' * (indent+1) + 'label="%s";\n' % self.name
for node in self.nodes:
out += node.print(indent+1)
out += ' ' * indent + '}\n'
return out
def add(self, node):
self.nodes.append(node)
class Node:
def __init__(self, num):
self.num = num
def print(self,indent):
return ' ' * indent + '%s;\n' % self.num
def region_to_color(region):
reg_int = int(region, 16)
rgb = reg_int & 0xffffff
return '#%x' % rgb
def line_to_node(line):
match = re.match(r'(\d+)\[label="([^"]+)"', line)
num = match.group(1)
label = match.group(2)
uuid = re.search(r'uuid="([^"]+)"', line).group(1)
region = re.search(r'region="([^"]+)"', line).group(1)
return num, label, uuid, region
with open(args.graph) as file_:
graph_lines = file_.readlines()
root_nodes = collections.defaultdict(CompoundNode)
nodes = collections.defaultdict(CompoundNode)
lines = []
for line in graph_lines[1:]:
if not re.match(r'\d+\[', line):
break
num, name, uuid, region = line_to_node(line)
nodes[uuid] = Node(num)
lines.append('%s[label="%s", fillcolor="%s", style="filled"];' % (num, name,
region_to_color(region)))
with open(args.forest) as file_:
forest = json.load(file_)
for node,parents in sorted(forest.items()):
# first entry in parents array is name of node
name, *parents = parents
if len(parents) == 0:
root_nodes[node] = nodes[node]
else:
first_parent = parents[0]
nodes[first_parent].add(nodes[node])
nodes[node].name = name
print(graph_lines[0],end='')
for root_node in root_nodes.values():
print(root_node.print(0))
for line in lines:
print(line)
for line in graph_lines[len(lines)+1:]:
print(line,end='')
|
Python
| 0.000021
|
@@ -446,18 +446,21 @@
nodes =
-%5B%5D
+set()
%0A
@@ -764,24 +764,31 @@
out += node
+s%5Bnode%5D
.print(inden
@@ -803,112 +803,354 @@
-out += ' ' * indent + '%7D%5Cn'%0A return out%0A%0A def add(self, node):%0A self.nodes.append(node)
+if not self.nodes:%0A out += '%25d%5Blabel=%22%25s%22%5D%5Cn' %25 (counter + len(lines), self.name)%0A out += ' ' * indent + '%7D%5Cn'%0A return out%0A%0A def add(self, node):%0A self.nodes.add(node)%0A%0A @staticmethod%0A def from_node(node):%0A compound_node = CompoundNode()%0A compound_node.add(node)%0A return compound_node%0A
%0A%0Acl
@@ -2208,16 +2208,59 @@
ad(file_
+, object_pairs_hook=collections.OrderedDict
)%0A%0Afor n
@@ -2278,15 +2278,8 @@
in
-sorted(
fore
@@ -2288,17 +2288,16 @@
.items()
-)
:%0A #
@@ -2479,16 +2479,371 @@
ents%5B0%5D%0A
+ try:%0A nodes%5Bfirst_parent%5D.add(node)%0A except AttributeError:%0A # HACK - subgraphs can't have connections so add the _old_ node as%0A # a subnode to the new compound node%0A nodes%5Bfirst_parent + 'x'%5D = nodes%5Bfirst_parent%5D%0A nodes%5Bfirst_parent%5D = CompoundNode.from_node(first_parent + 'x')%0A
@@ -2870,23 +2870,16 @@
add(node
-s%5Bnode%5D
)%0A no
|
acc0cf7a9e44ca11384d7d2b0dcd743af6e99ef9
|
Update version to 1.0.3
|
chatterbot/__init__.py
|
chatterbot/__init__.py
|
"""
ChatterBot is a machine learning, conversational dialog engine.
"""
from .chatterbot import ChatBot
__version__ = '1.0.2'
__author__ = 'Gunther Cox'
__email__ = 'gunthercx@gmail.com'
__url__ = 'https://github.com/gunthercox/ChatterBot'
__all__ = (
'ChatBot',
)
|
Python
| 0
|
@@ -121,9 +121,9 @@
1.0.
-2
+3
'%0A__
|
52ab9eeaca097890e58c89acb4d85e5e0be2e86b
|
Refactor a method to perform only one functionality
|
sqlitebiter/_table_creator.py
|
sqlitebiter/_table_creator.py
|
#!/usr/bin/env python
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
from __future__ import absolute_import
import simplesqlite
from sqliteschema import SqliteSchemaExtractor
class TableCreator(object):
def __init__(self, logger, dst_con):
self.__logger = logger
self.__dst_con = dst_con
def create(self, tabledata, index_list):
is_rename, con_mem = self.__require_rename_table(tabledata)
src_table_name = con_mem.get_table_name_list()[0]
dst_table_name = src_table_name
if is_rename:
dst_table_name = self.__make_unique_table_name(src_table_name)
self.__logger.debug(u"rename table from '{}' to '{}'".format(
src_table_name, dst_table_name))
simplesqlite.copy_table(
src_con=con_mem, dst_con=self.__dst_con,
src_table_name=src_table_name,
dst_table_name=dst_table_name)
else:
simplesqlite.append_table(
src_con=con_mem, dst_con=self.__dst_con,
table_name=dst_table_name)
self.__dst_con.create_index_list(dst_table_name, [
simplesqlite.sqlquery.SqlQuery.sanitize_attr(index)
for index in index_list
])
def __require_rename_table(self, tabledata):
con_mem = simplesqlite.connect_sqlite_memdb()
con_mem.create_table_from_tabledata(tabledata=tabledata)
if not self.__dst_con.has_table(tabledata.table_name):
return (False, con_mem)
if self.__dst_con.get_attr_name_list(tabledata.table_name) != tabledata.header_list:
return (True, con_mem)
con_schema_extractor = SqliteSchemaExtractor(
self.__dst_con, verbosity_level=1)
con_mem_schema_extractor = SqliteSchemaExtractor(
con_mem, verbosity_level=1)
if con_schema_extractor.get_database_schema() == con_mem_schema_extractor.get_database_schema():
return (False, con_mem)
return (True, con_mem)
def __make_unique_table_name(self, table_name_base):
exist_table_name_list = self.__dst_con.get_table_name_list()
if table_name_base not in exist_table_name_list:
return table_name_base
suffix_id = 1
while True:
table_name_candidate = u"{:s}_{:d}".format(
table_name_base, suffix_id)
if table_name_candidate not in exist_table_name_list:
return table_name_candidate
suffix_id += 1
|
Python
| 0.000007
|
@@ -404,16 +404,135 @@
+con_mem = simplesqlite.connect_sqlite_memdb()%0A con_mem.create_table_from_tabledata(tabledata=tabledata)%0A
is_renam
@@ -532,25 +532,16 @@
s_rename
-, con_mem
= self.
@@ -563,25 +563,45 @@
e_table(
-tabledata
+con_mem, tabledata.table_name
)%0A
@@ -1469,139 +1469,33 @@
lf,
-tabledata):%0A con_mem = simplesqlite.connect_sqlite_memdb()%0A con_mem.create_table_from_tabledata(tabledata=tabledata)%0A
+src_con, src_table_name):
%0A
@@ -1531,26 +1531,20 @@
s_table(
-tabledata.
+src_
table_na
@@ -1567,32 +1567,21 @@
return
-(
False
-, con_mem)
%0A%0A
@@ -1585,16 +1585,17 @@
if
+(
self.__d
@@ -1620,26 +1620,20 @@
me_list(
-tabledata.
+src_
table_na
@@ -1642,30 +1642,68 @@
) !=
- tabledata.header_list
+%0A src_con.get_attr_name_list(src_table_name))
:%0A
@@ -1715,39 +1715,28 @@
return
-(
True
-, con_mem)
%0A%0A co
@@ -1896,23 +1896,23 @@
-con_mem
+src_con
, verbos
@@ -1936,16 +1936,17 @@
if
+(
con_sche
@@ -1982,16 +1982,32 @@
ema() ==
+%0A
con_mem
@@ -2045,16 +2045,17 @@
schema()
+)
:%0A
@@ -2071,24 +2071,13 @@
urn
-(
False
-, con_mem)
%0A%0A
@@ -2093,23 +2093,12 @@
urn
-(
True
-, con_mem)
%0A%0A
|
287b7b57056201d1d68c6d3cc963f1b2af2f7e83
|
Update scripts/alerts/validate_alerts_format.py
|
scripts/alerts/validate_alerts_format.py
|
scripts/alerts/validate_alerts_format.py
|
import sys
import yaml
import json
from google.cloud import monitoring_v3
def check_json_in_metadata(path, file_id, file_version):
metadata_path = "/".join(path.split("/")[:-1]) + "/metadata.yaml"
check_metadata_entries(metadata_path)
f = open(metadata_path)
data = yaml.safe_load(f)
templates_metadata = data.get("alert_policy_templates")
for template_metadata in templates_metadata:
if template_metadata.get("id") == file_id and template_metadata.get("version") == int(file_version[1]):
return
raise Exception("{} does not have an entry in {}".format(path, metadata_path))
def check_metadata_entries(path):
f = open(path)
data = yaml.safe_load(f)
templates_metadata = data.get("alert_policy_templates")
if not templates_metadata:
raise Exception("alert_policy_templates not defined in {}".format(path))
required_fields = ["id", "version", "display_name", "description"]
for template_metadata in templates_metadata:
for field in required_fields:
if field not in template_metadata.keys():
raise Exception("{} missing {}".format(path, field))
def check_json_file_name(path, file_name_parts):
if len(file_name_parts) != 3:
raise Exception("{} file name not in <name>.<version>.json format".format(path))
file_version = file_name_parts[1]
if file_version[0] != "v":
raise Exception("{} version does not start with 'v'".format(path))
if not file_version[1].isnumeric():
raise Exception("{} 'v' is not followed by numeric version number".format(path))
def check_is_alert_policy_json(path):
f = open(path)
try:
policy_json = json.dumps(json.load(f))
except:
raise Exception("{} content could not be loaded".format(path))
monitoring_v3.AlertPolicy.from_json(policy_json)
def main():
path = sys.argv[1]
# only run validation script on files added/changed in
# alert_templates folder
if path.split("/")[0] != "alerts":
sys.exit()
file_name = path.split("/")[-1]
file_name_parts = file_name.split(".")
# metadata file added/changed would be checked for expected fields
if file_name == "metadata.yaml":
check_metadata_entries(path)
# all json files added to alerts folder are implictly taken as alert policy jsons
# and must follow expected file hierarchy and naming
if path.split(".")[-1] == "json":
# checking if json file name is in the correct format
check_json_file_name(path, file_name_parts)
# check if file has entry in metadata.yaml
check_json_in_metadata(path, file_name_parts[0], file_name_parts[1])
# checking if json content is indeed an alert policy
check_is_alert_policy_json(path)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -1,12 +1,24 @@
+import json%0A
import sys%0Ai
@@ -28,28 +28,16 @@
rt yaml%0A
-import json%0A
from goo
|
d7df17b2f5bb8ce2d66eab8e23a9bdedcbb631bf
|
update IGDomain.__init__() for no element groups
|
sfepy/discrete/iga/domain.py
|
sfepy/discrete/iga/domain.py
|
"""
Computational domain for isogeometric analysis.
"""
import os.path as op
import numpy as nm
from sfepy.base.base import assert_, Struct
from sfepy.discrete.common.domain import Domain
import sfepy.discrete.iga as iga
import sfepy.discrete.iga.io as io
from sfepy.discrete.iga.extmods.igac import eval_in_tp_coors
class NurbsPatch(Struct):
"""
Single NURBS patch data.
"""
def __init__(self, knots, degrees, cps,
weights, cs, conn):
degrees = nm.asarray(degrees, dtype=nm.int32)
cs = [nm.asarray(cc, dtype=nm.float64) for cc in cs]
if cs[0].ndim == 3:
cs = [nm.ascontiguousarray(cc[:, None, ...]) for cc in cs]
Struct.__init__(self, name='nurbs', knots=knots, degrees=degrees,
cps=cps, weights=weights, cs=cs, conn=conn)
self.n_els = [len(ii) for ii in cs]
self.dim = len(self.n_els)
def _get_ref_coors_1d(self, pars, axis):
uk = nm.unique(self.knots[axis])
indices = nm.searchsorted(uk[1:], pars)
ref_coors = nm.empty_like(pars)
for ii in xrange(len(uk) - 1):
ispan = nm.where(indices == ii)[0]
pp = pars[ispan]
ref_coors[ispan] = (pp - uk[ii]) / (uk[ii+1] - uk[ii])
return uk, indices, ref_coors
def __call__(self, u=None, v=None, w=None, field=None):
"""
Igakit-like interface for NURBS evaluation.
"""
pars = [u]
if v is not None: pars += [v]
if w is not None: pars += [w]
indices = []
rcs = []
for ia, par in enumerate(pars):
uk, indx, rc = self._get_ref_coors_1d(par, ia)
indices.append(indx.astype(nm.uint32))
rcs.append(rc)
out = eval_in_tp_coors(field, indices,
rcs, self.cps, self.weights,
self.degrees,
self.cs, self.conn)
return out
def evaluate(self, field, u=None, v=None, w=None):
"""
Igakit-like interface for NURBS evaluation.
"""
return self(u, v, w, field)
def _to_igakit(self):
import igakit.cad as cad
n_efuns = self.degrees + 1
nks = nm.array([len(ii) for ii in self.knots])
shape = tuple(nks - n_efuns)
cps = self.cps.reshape(shape + (-1,))
weights = self.weights.reshape(shape)
return cad.NURBS(self.knots, cps, weights=weights)
def _from_igakit(self, inurbs):
cs = iga.compute_bezier_extraction(inurbs.knots, inurbs.degree)
n_els = [len(ii) for ii in cs]
conn, bconn = iga.create_connectivity(n_els, inurbs.knots,
inurbs.degree)
cps = inurbs.points[..., :self.dim].copy()
cps = cps.reshape((-1, self.dim))
return NurbsPatch(inurbs.knots, inurbs.degree, cps,
inurbs.weights.ravel(), cs, conn)
def elevate(self, times=0):
"""
Elevate the patch degrees several `times` by one.
Returns
-------
nurbs : NurbsPatch instance
Either `self` if `times` is zero, or a new instance.
"""
if times is 0: return self
aux = self._to_igakit()
for ia in range(self.dim):
aux.elevate(ia, times)
assert_(nm.isfinite(aux.points).all(),
'igakit degree elevation failed for axis %d!' % ia)
return self._from_igakit(aux)
class IGDomain(Domain):
"""
Bezier extraction based NURBS domain for isogeometric analysis.
"""
@staticmethod
def from_file(filename):
"""
filename : str
The name of the IGA domain file.
"""
(knots, degrees, cps, weights, cs, conn,
bcps, bweights, bconn, regions) = io.read_iga_data(filename)
nurbs = NurbsPatch(knots, degrees, cps, weights, cs, conn)
bmesh = Struct(name='bmesh', cps=bcps, weights=bweights, conn=bconn)
name = op.splitext(filename)[0]
domain = IGDomain(name, nurbs=nurbs, bmesh=bmesh, regions=regions)
return domain
def __init__(self, name, nurbs, bmesh, regions=None, **kwargs):
"""
Create an IGA domain.
Parameters
----------
name : str
The domain name.
"""
Domain.__init__(self, name, nurbs=nurbs, bmesh=bmesh, regions=regions,
**kwargs)
from sfepy.discrete.fem.geometry_element import create_geometry_elements
from sfepy.discrete.fem import Mesh
from sfepy.discrete.fem.extmods.cmesh import CMesh
from sfepy.discrete.fem.utils import prepare_remap
tconn = iga.get_bezier_topology(bmesh.conn, nurbs.degrees)
itc = nm.unique(tconn)
remap = prepare_remap(itc, bmesh.conn.max() + 1)
ltcoors = bmesh.cps[itc]
ltconn = remap[tconn]
n_nod, dim = ltcoors.shape
n_el = ltconn.shape[0]
self.shape = Struct(n_nod=n_nod, dim=dim, tdim=0, n_el=n_el, n_gr=1)
desc = '%d_%d' % (dim, 2**dim)
mat_id = nm.zeros(ltconn.shape[0], dtype=nm.int32)
self.mesh = Mesh.from_data(self.name + '_topo', ltcoors, None, [ltconn],
[mat_id], [desc])
self.cmesh = CMesh.from_mesh(self.mesh)
gels = create_geometry_elements()
self.cmesh.set_local_entities(gels)
self.cmesh.setup_entities()
self.shape.tdim = self.cmesh.tdim
self.gel = gels[desc]
if regions is not None:
self.vertex_set_bcs = {}
for key, val in self.regions.iteritems():
self.vertex_set_bcs[key] = remap[val]
self.cell_offsets = {0 : 0}
self.reset_regions()
|
Python
| 0.000012
|
@@ -4621,67 +4621,8 @@
esh%0A
- from sfepy.discrete.fem.extmods.cmesh import CMesh%0A
@@ -4676,16 +4676,16 @@
e_remap%0A
+
%0A
@@ -5035,16 +5035,8 @@
n_el
-, n_gr=1
)%0A%0A
@@ -5292,34 +5292,23 @@
h =
-CMesh.from_mesh(self.
+self.mesh.c
mesh
-)
%0A
@@ -5624,16 +5624,16 @@
tems():%0A
+
@@ -5683,45 +5683,8 @@
l%5D%0A%0A
- self.cell_offsets = %7B0 : 0%7D%0A%0A
|
a1ea4cde0e82a882cb3c1c4a5d011092e4bae2e9
|
fix flake8 errors
|
boards/opencm904/dist/robotis-loader.py
|
boards/opencm904/dist/robotis-loader.py
|
#!/usr/bin/env python
'''
MIT License
Copyright (c) 2014 Gregoire Passault
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# This script sends a program on a robotis board (OpenCM9.04 or CM900)
# using the robotis bootloader (used in OpenCM IDE)
#
# Usage:
# python robotis-loader.py <serial port> <binary>
#
# Example:
# python robotis-loader.py /dev/ttyACM0 firmware.bin
#
# https://github.com/Gregwar/robotis-loader
import serial
import sys
import os
import time
print('~~ Robotis loader ~~')
print('')
print('Please, make sure to connect the USB cable WHILE holding down the "USER SW" button.')
print('Status LED should stay lit and the board should be able to load the program.')
print('')
# Helper function for bytes conversion
if sys.version_info[:1][0] == 3:
def to_ord(val):
return ord(chr(val))
else:
def to_ord(val):
return ord(val)
# Reading command line
#if len(sys.argv) != 3:
# exit('! Usage: robotis-loader.py <serial-port> <binary>')
#pgm, port, binary = sys.argv
pgm = sys.argv[0]
port = os.environ["PORT"]
binary = os.environ["HEXFILE"]
def progressBar(percent, precision=65):
"""Prints a progress bar."""
threshold = precision*percent / 100.0
sys.stdout.write('[ ')
for x in range(precision):
if x < threshold:
sys.stdout.write('#')
else:
sys.stdout.write(' ')
sys.stdout.write(' ] ')
sys.stdout.flush()
# Opening the firmware file
try:
stat = os.stat(binary)
size = stat.st_size
firmware = open(binary, 'rb')
print('* Opening %s, size=%d' % (binary, size))
except:
exit('! Unable to open file %s' % binary)
# Opening serial port
try:
s = serial.Serial(port, baudrate=115200)
except:
exit('! Unable to open serial port %s' % port)
print('* Resetting the board')
s.setRTS(True)
s.setDTR(False)
time.sleep(0.1)
s.setRTS(False)
s.write(b'CM9X')
s.close()
time.sleep(1.0);
print('* Connecting...')
s = serial.Serial(port, baudrate=115200)
s.write(b'AT&LD')
print('* Download signal transmitted, waiting...')
# Entering bootloader sequence
while True:
line = s.readline().strip()
if line.endswith(b'Ready..'):
print('* Board ready, sending data')
cs = 0
pos = 0
while True:
c = firmware.read(2048)
if len(c):
pos += len(c)
sys.stdout.write("\r")
progressBar(100 * float(pos) / float(size))
s.write(c)
for k in range(0, len(c)):
cs = (cs + to_ord(c[k])) % 256
else:
firmware.close()
break
print('')
s.setDTR(True)
print('* Checksum: %d' % (cs))
import struct
s.write(struct.pack('B', cs))
# s.write('{0}'.format(chr(cs)).encode('ascii'))
print('* Firmware was sent')
else:
if line == b'Success..':
print('* Success, running the code')
print('')
s.write(b'AT&RST')
s.close()
exit()
else:
print('Board -> {}'.format(line))
|
Python
| 0.999995
|
@@ -1874,16 +1874,17 @@
d line%0A#
+
if len(s
@@ -1899,16 +1899,17 @@
!= 3:%0A#
+
exit
@@ -1963,16 +1963,17 @@
ary%3E')%0A#
+
pgm, por
@@ -2575,32 +2575,46 @@
, size))%0Aexcept:
+ # noqa: E722
%0A exit('! Una
@@ -2724,16 +2724,30 @@
%0Aexcept:
+ # noqa: E722
%0A exi
@@ -2931,9 +2931,8 @@
1.0)
-;
%0A%0Apr
|
fac1e1bbe26e0fb7b82d65e48619cacc742ea747
|
Update default path
|
notebooks/utils/data_loading.py
|
notebooks/utils/data_loading.py
|
"""Wrappers to simplify data loading."""
import pandas as pd
# Set default path
DEFAULT_PATH = '../datasets/raw/'
def load_users_data(path=DEFAULT_PATH, preprocessed=False):
"""Load users data into train and test users.
Parameters
----------
path: str
Path of the folder containing the data.
Returns
-------
train_users, test_users: DataFrame, DataFrame
Loaded DataFrames.
"""
if not preprocessed:
train_users = pd.read_csv(path + 'train_users.csv')
test_users = pd.read_csv(path + 'test_users.csv')
else:
path = '../datasets/processed/'
train_users = pd.read_csv(path + 'preprocessed_train_users.csv')
test_users = pd.read_csv(path + 'preprocessed_test_users.csv')
return train_users, test_users
def load_sessions_data(path=DEFAULT_PATH):
"""Load the users sessions data.
Parameters
----------
path: str
Path of the folder containing the data.
Returns
-------
sessions: DataFrame
Loaded DataFrame.
"""
return pd.read_csv(path + 'sessions.csv')
|
Python
| 0.000001
|
@@ -98,20 +98,16 @@
'../data
-sets
/raw/'%0A%0A
|
95c1b7bb737db7ded2980ce2a378d3e5d3d5ed4f
|
Test admin create new user
|
billjobs/tests/tests_user_admin_api.py
|
billjobs/tests/tests_user_admin_api.py
|
from django.test import TestCase
from django.contrib.auth.models import User
from rest_framework import status
from rest_framework.test import APIClient, APIRequestFactory, \
force_authenticate
from billjobs.views import UserAdmin, UserAdminDetail
class UserAdminAPI(TestCase):
""" Test User Admin API REST endpoint """
fixtures=['account_test.yaml']
def setUp(self):
self.client = APIClient()
self.factory = APIRequestFactory()
self.admin = User.objects.get(pk=1)
self.user = User.objects.get(pk=2)
def test_admin_list_user(self):
request = self.factory.get('/billjobs/users/')
force_authenticate(request, user=self.admin)
view = UserAdmin.as_view()
response = view(request)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_admin_retrieve_user(self):
request = self.factory.get('/billjobs/users/')
force_authenticate(request, user=self.admin)
view = UserAdminDetail.as_view()
response = view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_admin_get_404_if_no_user_pk(self):
request = self.factory.get('/billjobs/users')
force_authenticate(request, user=self.admin)
view = UserAdminDetail.as_view()
response = view(request, pk=123)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_anonymous_do_not_list_user(self):
request = self.factory.get('/billjobs/users/')
view = UserAdmin.as_view()
response = view(request)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_anonymous_do_not_retrieve_user(self):
request = self.factory.get('/billjobs/users/')
view = UserAdmin.as_view()
response = view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_user_do_not_list_user(self):
request = self.factory.get('/billjobs/users/')
force_authenticate(request, user=self.user)
view = UserAdmin.as_view()
response = view(request)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_user_do_not_retrieve_user(self):
request = self.factory.get('/billjobs/users/')
force_authenticate(request, user=self.user)
view = UserAdminDetail.as_view()
response = view(request, pk=1)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
|
Python
| 0
|
@@ -248,16 +248,28 @@
inDetail
+%0Aimport json
%0A%0Aclass
@@ -2549,9 +2549,538 @@
IDDEN)%0A%0A
+ def test_admin_create_user(self):%0A request = self.factory.post('/billjobs/users/',%0A json.dumps(%7B%0A 'username': 'new_user',%0A 'email': 'new@jobs.org',%0A 'password': 'foobar'%7D),%0A content_type='application/json')%0A force_authenticate(request, user=self.admin)%0A view = UserAdmin.as_view()%0A response = view(request)%0A print(response.data)%0A self.assertEqual(response.status_code, status.HTTP_201_CREATED)%0A%0A%0A
%0A
|
94f3b24fb711aa4554ededfc126e8f637e89acfc
|
use shell=True on Windows
|
build/util/lastchange.py
|
build/util/lastchange.py
|
#!/usr/bin/env python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
lastchange.py -- Chromium revision fetching utility.
"""
import optparse
import os
import subprocess
import sys
class VersionInfo(object):
def __init__(self, url, root, revision):
self.url = url
self.root = root
self.revision = revision
def FetchGitRevision(directory):
"""
Fetch the Git hash for the a given directory.
Errors are swallowed.
Returns:
a VersionInfo object or None on error.
"""
# Force shell usage under cygwin & win32. This is a workaround for
# mysterious loss of cwd while invoking cygwin's git.
# We can't just pass shell=True to Popen, as under win32 this will
# cause CMD to be used, while we explicitly want a cygwin shell.
command = ['git', 'rev-parse', 'HEAD']
if sys.platform in ('cygwin', 'win32'):
command = ['sh', '-c', ' '.join(command)]
try:
proc = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=directory)
except OSError:
return None
output = proc.communicate()[0].strip()
if proc.returncode == 0 and output:
return VersionInfo('git', 'git', output[:7])
return None
def FetchSVNRevision(directory):
"""
Fetch the Subversion branch and revision for the a given directory.
Errors are swallowed.
Returns:
a VersionInfo object or None on error.
"""
try:
proc = subprocess.Popen(['svn', 'info'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=directory)
except OSError:
# command is apparently either not installed or not executable.
return None
if not proc:
return None
attrs = {}
for line in proc.stdout:
line = line.strip()
if not line:
continue
key, val = line.split(': ', 1)
attrs[key] = val
try:
url = attrs['URL']
root = attrs['Repository Root']
revision = attrs['Revision']
except KeyError:
return None
return VersionInfo(url, root, revision)
def FetchVersionInfo(default_lastchange, directory=None):
"""
Returns the last change (in the form of a branch, revision tuple),
from some appropriate revision control system.
"""
version_info = FetchSVNRevision(directory) or FetchGitRevision(directory)
if not version_info:
if default_lastchange and os.path.exists(default_lastchange):
revision = open(default_lastchange, 'r').read().strip()
version_info = VersionInfo(None, None, revision)
else:
version_info = VersionInfo('unknown', '', '0')
return version_info
def WriteIfChanged(file_name, contents):
"""
Writes the specified contents to the specified file_name
iff the contents are different than the current contents.
"""
try:
old_contents = open(file_name, 'r').read()
except EnvironmentError:
pass
else:
if contents == old_contents:
return
os.unlink(file_name)
open(file_name, 'w').write(contents)
def main(argv=None):
if argv is None:
argv = sys.argv
parser = optparse.OptionParser(usage="lastchange.py [options]")
parser.add_option("-d", "--default-lastchange", metavar="FILE",
help="default last change input FILE")
parser.add_option("-o", "--output", metavar="FILE",
help="write last change to FILE")
parser.add_option("--revision-only", action='store_true',
help="just print the SVN revision number")
opts, args = parser.parse_args(argv[1:])
out_file = opts.output
while len(args) and out_file is None:
if out_file is None:
out_file = args.pop(0)
if args:
sys.stderr.write('Unexpected arguments: %r\n\n' % args)
parser.print_help()
sys.exit(2)
version_info = FetchVersionInfo(opts.default_lastchange)
if opts.revision_only:
print version_info.revision
else:
contents = "LASTCHANGE=%s\n" % version_info.revision
if out_file:
WriteIfChanged(out_file, contents)
else:
sys.stdout.write(contents)
return 0
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0.004337
|
@@ -1756,32 +1756,91 @@
cwd=directory
+,%0A shell=(sys.platform=='win32')
)%0A except OSErr
|
d03571b523ba125be94d68bc50cda74a9a934d6f
|
fix documents
|
chainer/functions/evaluation/r2_score.py
|
chainer/functions/evaluation/r2_score.py
|
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class R2_score(function.Function):
def __init__(self, sample_weight, multioutput):
if sample_weight is not None:
raise NotImplementedError()
if multioutput in ['uniform_average', 'raw_values']:
self.multioutput = multioutput
else:
raise ValueError("invalid multioutput argument")
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
pred_type, true_type = in_types
type_check.expect(
pred_type.dtype.kind == 'f',
true_type.dtype.kind == 'f'
)
type_check.expect(
pred_type.ndim >= true_type.ndim,
pred_type.shape == true_type.shape,
)
def forward(self, inputs):
xp = cuda.get_array_module(*inputs)
pred, true = inputs
SS_res = xp.sum((pred-true)**2, axis=0)
SS_tot = xp.sum((true-xp.mean(true, axis=0))**2, axis=0)
if self.multioutput == 'uniform_average':
return xp.asarray((1 - SS_res / SS_tot).mean(), dtype=pred.dtype),
elif self.multioutput == 'raw_values':
return xp.asarray((1 - SS_res / SS_tot), dtype=pred.dtype),
def r2_score(pred, true, sample_weight=None, multioutput='uniform_average'):
"""Computes R^2(coefficient of determination) regression score function.
Args:
pred(Variable): Variable holding a vector or matrix of estimated \
target values
true(Variable): Variable holding a vector or matrix of correct target \
values
sample_weight: NotImplemented
multioutput(string): ['uniform_average', 'raw_values']. if \
'uniform_average', this function return an average of R^2\
score of multiple output. If 'raw_average', this function \
return a set of R^2 score of multiple output.
Returns:
Variable: A Variable holding a scalar array of the R^2 score if \
'multioutput' is 'uniform_average' or a vector of R^2 \
scores if 'multioutput' is 'raw_values'.
.. note:: This function is non-differentiable
"""
return R2_score(sample_weight=sample_weight, multioutput=multioutput)\
(pred, true)
|
Python
| 0.000086
|
@@ -1546,24 +1546,25 @@
arget values
+.
%0A tru
@@ -1654,16 +1654,17 @@
values
+.
%0A
@@ -1681,28 +1681,19 @@
ight: No
-tImplemented
+ne.
%0A
@@ -2233,16 +2233,17 @@
entiable
+.
%0A%0A %22%22
|
9c7373a519a61d9a5511d212d94898f1a091e2e8
|
Add custom_vars_list argument for WebRTCFactory.
|
scripts/master/factory/webrtc_factory.py
|
scripts/master/factory/webrtc_factory.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from master.factory import chromium_factory
from master.factory import gclient_factory
from master.factory import chromium_commands
import config
class WebRTCFactory(chromium_factory.ChromiumFactory):
CUSTOM_VARS_ROOT_DIR = ('root_dir', 'src')
# Can't use the same Valgrind constant as in chromium_factory.py, since WebRTC
# uses another path (use_relative_paths=True in DEPS).
CUSTOM_DEPS_VALGRIND = ('third_party/valgrind',
config.Master.trunk_url + '/deps/third_party/valgrind/binaries')
def __init__(self, build_dir, target_platform, svn_root_url, branch,
custom_deps_list=None, nohooks_on_update=False, target_os=None):
"""Creates a WebRTC factory.
This factory can also be used to build stand-alone projects.
Args:
build_dir: Directory to perform the build relative to. Usually this is
trunk/build for WebRTC and other projects.
target_platform: Platform, one of 'win32', 'darwin', 'linux2'
svn_root_url: Subversion root URL (i.e. without branch/trunk part).
branch: Branch name to checkout.
custom_deps_list: Content to be put in the custom_deps entry of the
.gclient file for the default solution. The parameter must be a list
of tuples with two strings in each: path and remote URL.
nohooks_on_update: If True, no hooks will be executed in the update step.
target_os: Used to sync down OS-specific dependencies, if specified.
"""
chromium_factory.ChromiumFactory.__init__(
self, build_dir, target_platform=target_platform,
nohooks_on_update=nohooks_on_update, target_os=target_os)
svn_url = svn_root_url + '/' + branch
# Use root_dir=src since many Chromium scripts rely on that path.
custom_vars_list = [self.CUSTOM_VARS_ROOT_DIR]
# Overwrite solutions of ChromiumFactory since we sync WebRTC, not Chromium.
self._solutions = []
self._solutions.append(gclient_factory.GClientSolution(
svn_url, name='src', custom_vars_list=custom_vars_list,
custom_deps_list=custom_deps_list))
if config.Master.webrtc_internal_url:
self._solutions.append(gclient_factory.GClientSolution(
config.Master.webrtc_internal_url, name='webrtc-internal',
custom_vars_list=custom_vars_list))
def WebRTCFactory(self, target='Debug', clobber=False, tests=None, mode=None,
slave_type='BuilderTester', options=None,
compile_timeout=1200, build_url=None, project=None,
factory_properties=None, gclient_deps=None):
options = options or ''
tests = tests or []
factory_properties = factory_properties or {}
if factory_properties.get('needs_valgrind'):
self._solutions[0].custom_deps_list = [self.CUSTOM_DEPS_VALGRIND]
factory = self.BuildFactory(target, clobber, tests, mode, slave_type,
options, compile_timeout, build_url, project,
factory_properties, gclient_deps)
# Get the factory command object to create new steps to the factory.
cmds = chromium_commands.ChromiumCommands(factory, target, self._build_dir,
self._target_platform)
# Override test runner script paths with our own that can run any test and
# have our suppressions configured.
valgrind_script_path = cmds.PathJoin('src', 'tools', 'valgrind-webrtc')
cmds._posix_memory_tests_runner = cmds.PathJoin(valgrind_script_path,
'webrtc_tests.sh')
cmds._win_memory_tests_runner = cmds.PathJoin(valgrind_script_path,
'webrtc_tests.bat')
# Add tests.
gyp_defines = factory_properties['gclient_env'].get('GYP_DEFINES', '')
for test in tests:
if 'build_for_tool=memcheck' in gyp_defines:
cmds.AddMemoryTest(test, 'memcheck',
factory_properties=factory_properties)
elif 'build_for_tool=tsan' in gyp_defines:
cmds.AddMemoryTest(test, 'tsan', factory_properties=factory_properties)
else:
cmds.AddAnnotatedGTestTestStep(test, factory_properties)
return factory
|
Python
| 0.000002
|
@@ -778,16 +778,54 @@
st=None,
+ custom_vars_list=None,%0A
nohooks
@@ -1278,16 +1278,88 @@
eckout.%0A
+ custom_vars_list: List of tuples specifying custom GYP variables.%0A
cu
@@ -2031,33 +2031,83 @@
tom_vars_list =
-%5B
+custom_vars_list or %5B%5D%0A custom_vars_list.append(
self.CUSTOM_VARS
@@ -2115,17 +2115,17 @@
ROOT_DIR
-%5D
+)
%0A%0A #
|
d6c20476bebed1265ccd0ac46e3020fdb3804bdd
|
Add type to command serialization
|
changes/api/serializer/models/command.py
|
changes/api/serializer/models/command.py
|
from changes.api.serializer import Serializer, register
from changes.models import Command
@register(Command)
class CommandSerializer(Serializer):
def serialize(self, instance, attrs):
return {
'id': instance.id.hex,
'name': instance.label,
'status': instance.status,
'script': instance.script,
'returnCode': instance.return_code,
'env': dict(instance.env or {}),
'cwd': instance.cwd,
'artifacts': instance.artifacts or [],
'duration': instance.duration,
'dateCreated': instance.date_created,
'dateStarted': instance.date_started,
'dateFinished': instance.date_finished,
}
|
Python
| 0.000002
|
@@ -476,16 +476,51 @@
ce.cwd,%0A
+ 'type': instance.type,%0A
|
1c061f786cd702cc56a5e2fdc4ece9d1cb9c83b6
|
fix the print method and fix the docstring of the migrate script
|
scripts/migrate_github_oauth_settings.py
|
scripts/migrate_github_oauth_settings.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to migrate nodes with invalid categories."""
import sys
import mock
from nose.tools import *
from framework.mongo import database
from website.app import init_app
from tests.base import OsfTestCase
from website.addons.github.api import GitHub
from website.addons.github.model import AddonGitHubOauthSettings, AddonGitHubUserSettings
# user_settings_collection = AddonGitHubUserSettings._storage[0].store
def do_migration(records):
# ... perform the migration ...
for raw_user_settings in records:
access_token = raw_user_settings['oauth_access_token']
token_type = raw_user_settings['oauth_token_type']
github_user_name = raw_user_settings['github_user']
if access_token and token_type and github_user_name:
gh = GitHub(access_token, token_type)
github_user = gh.user()
oauth_settings = AddonGitHubOauthSettings()
oauth_settings.github_user_id = str(github_user.id)
oauth_settings.save()
oauth_settings.oauth_access_token = access_token
oauth_settings.oauth_token_type = token_type
oauth_settings.github_user_name = github_user_name
oauth_settings.save()
AddonGitHubUserSettings._storage[0].store.update(
{'_id': raw_user_settings['_id']},
{
'$unset': {
'oauth_access_token': True,
'oauth_token_type': True,
'github_user': True,
},
'$set': {
'oauth_settings': oauth_settings.github_user_id,
}
}
)
AddonGitHubOauthSettings._storage[0].store.update(
{'github_user_id': oauth_settings.github_user_id},
{
'$push': {
'__backrefs.accessed.addongithubusersettings.oauth_settings': raw_user_settings['_id'],
}
}
)
def get_user_settings():
# ... return the StoredObjects to migrate ...
return database.addongithubusersettings.find()
def main():
init_app('website.settings', set_backends=True, routes=True) # Sets the storage backends on all models
user_settings = get_user_settings()
if 'dry' in sys.argv:
# print list of affected nodes, totals, etc.
for user_setting in user_settings:
print "===AddonGithubUserSettings==="
print "user_settings_id:"
print (user_setting['_id'])
else:
do_migration(get_user_settings())
for user_setting in user_settings:
print "===AddonGithubUserSettings==="
print "user_settings_id:"
print (user_setting['_id'])
print "Total affected user" + len(user_settings)
class TestMigrateGitHubOauthSettings(OsfTestCase):
def setUp(self):
super(TestMigrateGitHubOauthSettings, self).setUp()
self.mongo_collection = database.addongithubusersettings
self.user_settings = {
"__backrefs" : {
"authorized" : {
"addongithubnodesettings" : {
"user_settings" : [
"678910",
]
}
}
},
"_id" : "123456",
"_version" : 1,
"deletedAddonGitHubUserSettings" : False,
"github_user" : "testing user",
"oauth_access_token" : "testing acess token",
"oauth_state" : "no state",
"oauth_token_type" : "testing token type",
"owner" : "abcde"
}
self.mongo_collection.insert(self.user_settings)
def test_get_user_settings(self):
records = list(get_user_settings())
assert_equal(1, len(records))
assert_equal(
records[0]['github_user'],
self.user_settings['github_user']
)
assert_equal(
records[0]['oauth_state'],
self.user_settings['oauth_state']
)
assert_equal(
records[0]['oauth_access_token'],
self.user_settings['oauth_access_token']
)
assert_equal(
records[0]['oauth_token_type'],
self.user_settings['oauth_token_type']
)
@mock.patch('website.addons.github.api.GitHub.user')
def test_do_migration(self, mock_github_user):
user = mock.Mock()
user.id = "testing user id"
mock_github_user.return_value = user
do_migration(get_user_settings())
user_settings = AddonGitHubUserSettings.find()[0]
assert_true(user_settings.oauth_settings)
assert_true(user_settings.oauth_state)
assert_equal(
user_settings.oauth_settings.github_user_name,
"testing user"
)
assert_equal(
user_settings.oauth_settings.oauth_access_token,
"testing acess token"
)
assert_equal(
user_settings.oauth_settings.oauth_token_type,
"testing token type"
)
assert_equal(
user_settings.oauth_settings.github_user_id,
"testing user id"
)
def tearDown(self):
self.mongo_collection.remove()
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -64,36 +64,77 @@
ate
-nodes with invali
+addongithubusersettings an
d c
+re
ate
-gorie
+ and attach addongithuboauthsetting
s.%22%22
@@ -2573,33 +2573,33 @@
print
-
+(
%22===AddonGithubU
@@ -2604,33 +2604,36 @@
bUserSettings===
-%22
+%5Cn%22)
%0A pri
@@ -2626,33 +2626,33 @@
print
-
+(
%22user_settings_i
@@ -2653,36 +2653,30 @@
ings_id:
-%22%0A print
+ %7B%7D %5Cn%22.format
(user_se
@@ -2688,16 +2688,17 @@
%5B'_id'%5D)
+)
%0A%0A el
@@ -2803,17 +2803,17 @@
print
-
+(
%22===Addo
@@ -2834,17 +2834,20 @@
tings===
-%22
+%5Cn%22)
%0A
@@ -2856,17 +2856,17 @@
print
-
+(
%22user_se
@@ -2879,28 +2879,22 @@
_id:
-%22%0A print
+ %7B%7D %5Cn%22.format
(use
@@ -2910,25 +2910,18 @@
%5B'_id'%5D)
+)
%0A
-
%0A
@@ -2926,17 +2926,17 @@
print
-
+(
%22Total a
@@ -2951,12 +2951,21 @@
user
-%22 +
+: %7B%7D%22.format(
len(
@@ -2978,16 +2978,18 @@
ettings)
+))
%0A%0A%0Aclass
|
732d83b105ae27636bc3f97479732dff5c4b9331
|
Fix typo
|
D20/Server.py
|
D20/Server.py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2018 Agalmic Ventures LLC (www.agalmicventures.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import binascii
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.backends import default_backend
import datetime
import flask
import hashlib
import os
import sys
##### Defaults #####
#Default TCP port
DEFAULT_PORT = 27184
#Size of entropy in 32 byte (256 bit) blocks
DEFAULT_ENTROPY_SIZE = 16 * 32
#How often to reseed the DRBG
DEFAULT_RESEED_INTERVAL = 1024 * 1024
##### Application #####
#TODO: move this to another file
#TODO: unit test
class RandomBitGenerator(object):
"""
Represents a random bit generator, combining a deterministic random bit generator and an optional entropy source.
"""
def __init__(self, arguments):
self._reseedInterval = arguments.reseed_interval
self._seedEntropy = arguments.seed_urandom
self._zeroBlock = b'\x00' * arguments.entropy_size
#OS randomness to use for seeds
if self._seedEntropy:
self._urandom = open('/dev/urandom', 'wb')
self.reseed()
#TODO: self-test
def reseed(self):
"""
Reseeds the internal AES-256-CTR-DRBG.
"""
secret = os.urandom(32) #256 bits @suppress
iv = os.urandom(16)
self._cipher = Cipher(algorithms.AES(secret), modes.CTR(iv), backend=default_backend())
self._encryptor = self._cipher2.encryptor()
self._n = 0
def entropy(self, **kwargs):
"""
API to return entropy to a client.
"""
#Reseed the DRBG after a while
self._n += 1
if self._n >= self._reseedInterval:
self.reseed()
#Get entropy from an AES-256-CTR-DRBG
entropy = self._encryptor.update(self._zeroBlock)
return entropy
rbg = None #Assigned in main()
#Setup Flask application
app = flask.Flask('D20')
##### Basic Routes #####
@app.errorhandler(404)
def error404(e):
"""
404 error handler.
"""
return ''.join([
'<html><body>',
'<h1>D20 - Page Not Found</h1>',
'<p>The only endpoint available on this entropy micro-service is <a href="/api/entropy">/api/entropy</a>.</p>',
'<p>For more information including the complete source code, visit <a href="https://github.com/AgalmicVentures/D20">the D20 repository</a>.</p>',
'</body></html>',
]), 404
##### API Routes #####
api = flask.Blueprint('api', __name__)
@api.route('/entropy', methods=['GET', 'POST'])
def entropy():
"""
API to return entropy to a client.
"""
challenge = flask.request.args.get('challenge')
if challenge is None:
return flask.jsonify({
'error': "No 'challenge' parameter provided (e.g. /api/entropy?challenge=0123456789ABCDEF)",
})
elif len(challenge) < 8:
return flask.jsonify({
'error': "'challenge' parameter provided is too short; must be at least 8 bytes",
})
#Get the time
now = datetime.datetime.utcnow()
iso8601Format = '%Y-%m-%dT%H:%M:%S'
nowStr = now.strftime(iso8601Format)
#Generate the challenge response: the hash of the challenge || time
h = hashlib.sha512()
h.update(challenge.encode('utf8', 'ignore'))
h.update(nowStr.encode('utf8', 'ignore'))
challengeResponseBytes = binascii.hexlify(h.digest())
challengeResponse = challengeResponseBytes.decode('utf8')
#Get entropy from an AES-256-CTR-DRBG
entropy = rbg.entropy()
entropyValue = binascii.hexlify(entropy).decode('utf8')
return flask.jsonify({
'apiVersion': '1', #Also update in roll.sh
'challengeResponse': challengeResponse,
'entropy': entropyValue,
'time': nowStr,
})
def main(argv=None):
"""
The main function of this script.
:param argv: List[str] Arguments to parse (default sys.argv)
:return: int
"""
parser = argparse.ArgumentParser(description='D20 Entropy Microservice')
parser.add_argument('-H', '--host', action='store', default='0.0.0.0',
help='TCP host to run on (default 0.0.0.0).')
parser.add_argument('-p', '--port', action='store', type=int, default=DEFAULT_PORT,
help='TCP port to run on.')
parser.add_argument('-e', '--entropy-size', action='store', type=int, default=DEFAULT_ENTROPY_SIZE,
help='Size of entropy to return in bytes (default 16 512-bit blocks).')
parser.add_argument('-r', '--reseed-interval', action='store', type=int, default=DEFAULT_RESEED_INTERVAL,
help='Reseed the internal DRBG at this frequency (default 2^20).')
parser.add_argument('-s', '--seed-urandom', action='store_true',
help='Seed the entropy pool with hashed data from requests.')
arguments = parser.parse_args(sys.argv[1:])
#Instantiate the rnadom bit generator
global rbg
rbg = RandomBitGenerator(arguments)
#TODO: productionize
print('Starting up D20...')
app.register_blueprint(api, url_prefix='/api')
app.run(host=arguments.host, port=arguments.port)
print('Shutting down D20...')
return 0
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0.999999
|
@@ -2410,17 +2410,16 @@
._cipher
-2
.encrypt
|
e6300cc22369811f02ef59d2a71da8df31fe8221
|
Add zlib.dll to Windows wheels
|
ci/bundle_hdf5_whl.py
|
ci/bundle_hdf5_whl.py
|
"""Bundle HDF5 DLLs into an h5py wheel on Windows
This is meant to do something like auditwheel on Linux & delocate on Mac,
but h5py-specific.
"""
from base64 import urlsafe_b64encode
from contextlib import contextmanager
from glob import glob
import hashlib
import os
import os.path as osp
import shutil
import sys
import tempfile
from zipfile import ZipFile, ZIP_DEFLATED
def find_dlls():
hdf5_path = os.environ.get("HDF5_DIR")
print("HDF5_DIR", hdf5_path)
return glob(os.path.join(hdf5_path, 'lib', 'hdf*.dll'))
def file_sha256(path):
h = hashlib.sha256()
with open(path, 'rb') as src:
while True:
buf = src.read(1024 * 8)
if not buf:
break
h.update(buf)
return urlsafe_b64encode(h.digest()).decode('ascii').rstrip('=')
def find_wheels():
wheelhouse_dir = sys.argv[1]
return glob(osp.join(wheelhouse_dir, '*.whl'))
@contextmanager
def modify_zip(zip_file):
with tempfile.TemporaryDirectory() as td:
with ZipFile(zip_file, 'r') as zf:
zf.extractall(path=td)
yield td
with ZipFile(zip_file, 'w', compression=ZIP_DEFLATED) as zf:
for root, dirs, files in os.walk(td):
for f in sorted(files):
path = osp.join(root, f)
zf.write(path, arcname=osp.relpath(path, td))
dirs.sort()
def bundle(whl_file):
print("Adding DLLs to", whl_file)
with modify_zip(whl_file) as td:
# Find & read RECORD file
records = glob(osp.join(td, '*.dist-info', 'RECORD'))
assert len(records) == 1, records
record_f = records[0]
with open(record_f, encoding='utf-8') as f:
record = f.read().strip() + '\n'
# Copy DLLs & add them to RECORD
for dll in find_dlls():
size = os.stat(dll).st_size
sha = file_sha256(dll)
dest = 'h5py/' + os.path.basename(dll)
print(f"{dest} ({size} bytes)")
shutil.copy2(dll, osp.join(td, dest))
record += f'{dest},sha256={sha},{size}\n'
print("Writing modified", record_f)
with open(record_f, 'w', encoding='utf-8') as f:
f.write(record)
def main():
if not sys.platform.startswith('win'):
print("Non-windows platform, skipping bundle_hdf5_whl.py")
return
for whl_file in find_wheels():
bundle(whl_file)
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -462,30 +462,34 @@
5_path)%0A
-return
+yield from
glob(os.pat
@@ -525,16 +525,181 @@
*.dll'))
+%0A zlib_root = os.environ.get(%22ZLIB_ROOT%22)%0A if zlib_root:%0A print(%22ZLIB_ROOT%22, zlib_root)%0A yield os.path.join(zlib_root, 'bin_release', 'zlib.dll')
%0A%0Adef fi
|
7f4ae6e2973ccdd305ebac91c761050a4f9c1727
|
add return_tuple parameter to get_one_row
|
database.py
|
database.py
|
"""
Database Manager.
"""
import MySQLdb
import MySQLdb.cursors
class DatabaseManager(object):
def __init__(self, host, user, passwd, database, charset='utf8', large_scale=False):
"""Be careful using large_scale=True, SSDictCursor seems not reliable."""
self.conn = MySQLdb.connect(host=host, user=user, passwd=passwd, db=database, charset=charset)
self.large_scale = large_scale
def close(self):
self.conn.close()
# put here for better understandability
cursor_types = {
True: {
True: MySQLdb.cursors.SSDictCursor,
False: MySQLdb.cursors.SSCursor,
},
False: {
True: MySQLdb.cursors.DictCursor,
False: MySQLdb.cursors.Cursor,
},
}
def __get_cursor_type(self, use_dict):
return self.cursor_types[self.large_scale][use_dict]
def __query(self, sql, values=(), use_dict=True):
"""Execute any SQL.
You can use %s placeholder in sql and fill with values.
Note: it's the call's responsibility to call .close() on the returned cursor
@return cursor"""
cursor = self.conn.cursor(self.__get_cursor_type(use_dict))
cursor.execute(sql, values)
return cursor
def query(self, sql, values=()):
"""Execute any SQL and return affected rows."""
cursor = self.__query(sql, values)
rowcount = cursor.rowcount
cursor.close()
return rowcount
def begin(self):
self.query('BEGIN')
def commit(self):
self.query('BEGIN')
def insert(self, sql, values=()):
"""Insert a row and return insert id."""
cursor = self.__query(sql, values)
lastrowid = cursor.lastrowid
cursor.close()
return lastrowid
def batch_insert(self, sql, values):
"""Insert many rows at a time."""
cursor = self.conn.cursor()
cursor.executemany(sql, values)
cursor.close()
def get_one_row(self, sql, values=()):
"""Get one row of SELECT query."""
cursor = self.__query(sql, values)
row = cursor.fetchone()
cursor.close()
return row
def get_rows(self, sql, values=()):
"""[Generator]Get rows of SELECT query."""
cursor = self.__query(sql, values)
for i in xrange(cursor.rowcount):
yield cursor.fetchone()
cursor.close()
def get_value(self, sql, values=(), idx=0):
"""Get value of the first row.
Does not check for empty row, so ensure the result is not empty.
This is handy if you want to retrive COUNT(*)."""
cursor = self.__query(sql, values, use_dict=False)
row = cursor.fetchone()
cursor.close()
return row[idx]
|
Python
| 0.000002
|
@@ -2002,32 +2002,52 @@
, sql, values=()
+, return_tuple=False
):%0A %22%22%22Ge
@@ -2064,32 +2064,123 @@
of SELECT query.
+%0A Tip: setting return_tuple=True returns tuple so you can unpack with ease.%0A
%22%22%22%0A curs
@@ -2200,32 +2200,59 @@
uery(sql, values
+, use_dict=not return_tuple
)%0A row =
|
177e2eeb665899a0f116d20876c8c77b4ef27b98
|
use java_common.merge instead of manual _collect util functions (#838)
|
scala/scala_import.bzl
|
scala/scala_import.bzl
|
load("@io_bazel_rules_scala//scala:jars_to_labels.bzl", "JarsToLabelsInfo")
#intellij part is tested manually, tread lightly when changing there
#if you change make sure to manually re-import an intellij project and see imports
#are resolved (not red) and clickable
def _scala_import_impl(ctx):
target_data = _code_jars_and_intellij_metadata_from(
ctx.attr.jars,
ctx.file.srcjar,
)
(
current_target_compile_jars,
intellij_metadata,
) = (target_data.code_jars, target_data.intellij_metadata)
current_jars = depset(current_target_compile_jars)
exports = _collect(ctx.attr.exports)
transitive_runtime_jars = _collect_runtime(ctx.attr.runtime_deps)
jars = _collect(ctx.attr.deps)
jars2labels = {}
_collect_labels(ctx.attr.deps, jars2labels)
_collect_labels(ctx.attr.exports, jars2labels) #untested
_add_labels_of_current_code_jars(
depset(transitive = [current_jars, exports.compile_jars]),
ctx.label,
jars2labels,
) #last to override the label of the export compile jars to the current target
if current_target_compile_jars:
current_target_providers = [_new_java_info(ctx, jar) for jar in current_target_compile_jars]
else:
# TODO(#8867): Migrate away from the placeholder jar hack when #8867 is fixed.
current_target_providers = [_new_java_info(ctx, ctx.file._placeholder_jar)]
return struct(
scala = struct(
outputs = struct(jars = intellij_metadata),
),
providers = [
java_common.merge(current_target_providers),
DefaultInfo(
files = current_jars,
),
JarsToLabelsInfo(jars_to_labels = jars2labels),
],
)
def _new_java_info(ctx, jar):
return JavaInfo(
output_jar = jar,
compile_jar = jar,
exports = [target[JavaInfo] for target in ctx.attr.exports],
deps = [target[JavaInfo] for target in ctx.attr.deps],
runtime_deps = [target[JavaInfo] for target in ctx.attr.runtime_deps],
source_jar = ctx.file.srcjar,
neverlink = ctx.attr.neverlink,
)
def _add_labels_of_current_code_jars(code_jars, label, jars2labels):
for jar in code_jars.to_list():
jars2labels[jar.path] = label
def _code_jars_and_intellij_metadata_from(jars, srcjar):
code_jars = []
intellij_metadata = []
for jar in jars:
current_jar_code_jars = _filter_out_non_code_jars(jar.files)
current_jar_source_jars = _source_jars(jar, srcjar)
code_jars += current_jar_code_jars
for current_class_jar in current_jar_code_jars: #intellij, untested
intellij_metadata.append(
struct(
ijar = None,
class_jar = current_class_jar,
source_jars = current_jar_source_jars,
),
)
return struct(code_jars = code_jars, intellij_metadata = intellij_metadata)
def _source_jars(jar, srcjar):
if srcjar:
return [srcjar]
else:
jar_source_jars = [
file
for file in jar.files.to_list()
if _is_source_jar(file)
]
return jar_source_jars
def _filter_out_non_code_jars(files):
return [file for file in files.to_list() if not _is_source_jar(file)]
def _is_source_jar(file):
return file.basename.endswith("-sources.jar")
# TODO: it seems this could be reworked to use java_common.merge
def _collect(deps):
transitive_compile_jars = []
runtime_jars = []
compile_jars = []
for dep_target in deps:
java_provider = dep_target[JavaInfo]
compile_jars.append(java_provider.compile_jars)
transitive_compile_jars.append(java_provider.transitive_compile_time_jars)
runtime_jars.append(java_provider.transitive_runtime_jars)
return struct(
transitive_runtime_jars = depset(transitive = runtime_jars),
transitive_compile_jars = depset(transitive = transitive_compile_jars),
compile_jars = depset(transitive = compile_jars),
)
def _collect_labels(deps, jars2labels):
for dep_target in deps:
if JarsToLabelsInfo in dep_target:
jars2labels.update(dep_target[JarsToLabelsInfo].jars_to_labels)
#scala_library doesn't add labels to the direct dependency itself
java_provider = dep_target[JavaInfo]
for jar in java_provider.compile_jars.to_list():
jars2labels[jar.path] = dep_target.label
def _collect_runtime(runtime_deps):
jar_deps = []
for dep_target in runtime_deps:
java_provider = dep_target[JavaInfo]
jar_deps.append(java_provider.transitive_runtime_jars)
return depset(transitive = jar_deps)
scala_import = rule(
implementation = _scala_import_impl,
attrs = {
"jars": attr.label_list(
allow_files = True,
), #current hidden assumption is that these point to full, not ijar'd jars
"deps": attr.label_list(),
"runtime_deps": attr.label_list(),
"exports": attr.label_list(),
"neverlink": attr.bool(),
"srcjar": attr.label(allow_single_file = True),
"_placeholder_jar": attr.label(
allow_single_file = True,
default = Label("@io_bazel_rules_scala//scala:libPlaceHolderClassToCreateEmptyJarForScalaImport.jar"),
),
},
)
|
Python
| 0
|
@@ -598,33 +598,74 @@
exports =
-_collect(
+java_common.merge(%5Bexport%5BJavaInfo%5D for export in
ctx.attr.exp
@@ -668,16 +668,17 @@
.exports
+%5D
)%0A tr
@@ -701,33 +701,70 @@
_jars =
-_collect_runtime(
+%5C%0A java_common.merge(%5Bdep%5BJavaInfo%5D for dep in
ctx.attr
@@ -780,44 +780,49 @@
deps
-)
+%5D) %5C
%0A
-jars = _collect(ctx.attr.deps)
+ .transitive_runtime_jars
%0A
@@ -3520,684 +3520,8 @@
%22)%0A%0A
-# TODO: it seems this could be reworked to use java_common.merge%0Adef _collect(deps):%0A transitive_compile_jars = %5B%5D%0A runtime_jars = %5B%5D%0A compile_jars = %5B%5D%0A%0A for dep_target in deps:%0A java_provider = dep_target%5BJavaInfo%5D%0A compile_jars.append(java_provider.compile_jars)%0A transitive_compile_jars.append(java_provider.transitive_compile_time_jars)%0A runtime_jars.append(java_provider.transitive_runtime_jars)%0A%0A return struct(%0A transitive_runtime_jars = depset(transitive = runtime_jars),%0A transitive_compile_jars = depset(transitive = transitive_compile_jars),%0A compile_jars = depset(transitive = compile_jars),%0A )%0A%0A
def
@@ -3938,249 +3938,8 @@
el%0A%0A
-def _collect_runtime(runtime_deps):%0A jar_deps = %5B%5D%0A for dep_target in runtime_deps:%0A java_provider = dep_target%5BJavaInfo%5D%0A jar_deps.append(java_provider.transitive_runtime_jars)%0A%0A return depset(transitive = jar_deps)%0A%0A
scal
|
76e1f2db2fe3763e1b8638c9044afa341e4d39bf
|
Fix fileno method in ReactorTransport.
|
sheared/reactor/transport.py
|
sheared/reactor/transport.py
|
# vim:nowrap:textwidth=0
import random, os, types
class StringTransport:
def __init__(self):
self.input = ''
self.output = ''
self.closed = 0
def read(self, cnt=4096):
cnt = min(cnt, 1 + int(random.random() * (len(self.input) - 1)))
data = self.input[:cnt]
self.input = self.input[cnt:]
return data
def write(self, data):
if self.closed:
raise IOError, 'cannot write to a closed Transport'
self.output = self.output + data
return len(data)
def sendfile(self, file):
d = file.read()
while not d == '':
self.output = self.output + d
d = file.read()
def close(self):
if self.closed:
raise IOError, 'already closed'
self.closed = 1
def appendInput(self, data):
self.input = self.input + data
def getOutput(self):
return self.output
class FileTransport:
def __init__(self, reactor, file, other):
self.file = file
if isinstance(file, types.IntType):
self.fileno = file
else:
self.fileno = file.fileno()
def read(self, max=4096):
return os.read(self.fileno, max)
def write(self, data):
while data:
cnt = os.write(self.fileno, data)
data = data[cnt:]
def close(self):
os.close(self.fileno)
class ReactorTransport:
def __init__(self, reactor, file, other):
self.reactor = reactor
#self.file = self.reactor.prepareFile(file)
self.file = file
self.other = other
self.closed = 0
def read(self, max=4096):
return self.reactor.read(self.file, max)
def write(self, data):
self.reactor.write(self.file, data)
def sendfile(self, file):
self.reactor.sendfile(file, self.file)
def fileno(self):
return self.reactor.getfd(self.file)
def close(self):
self.reactor.close(self.file)
self.closed = 1
|
Python
| 0.006964
|
@@ -1893,34 +1893,107 @@
-return self.reactor.getfd(
+if type(self.file) is types.IntType:%0A return self.file%0A else:%0A return
self
@@ -1989,32 +1989,40 @@
return self.file
+.fileno(
)%0A%0A def close
|
20131f8ea26b989d74652416e5c23c85e979e36b
|
Use whoami properly
|
roushagent/plugins/input/task_input.py
|
roushagent/plugins/input/task_input.py
|
#!/usr/bin/env python
import json
import os
import threading
import time
from requests import ConnectionError
from roushclient.client import RoushEndpoint
name = 'taskerator'
task_getter = None
class TaskThread(threading.Thread):
def __init__(self, endpoint, name):
# python, I hate you.
super(TaskThread, self).__init__()
self.endpoint = None
self.endpoint_uri = endpoint
self.name = name
self.producer_lock = threading.Lock()
self.producer_condition = threading.Condition(self.producer_lock)
self.pending_tasks = []
self.running_tasks = {}
self.host_id = None
self._maybe_init()
def _maybe_init(self):
if self.endpoint:
return True
else:
LOG.info('Connecting to endpoint')
try:
self.endpoint = RoushEndpoint(self.endpoint_uri)
except ConnectionError:
return False
except KeyboardInterrupt:
raise
if not self.host_id:
# try to find our host ID from the endpoint
LOG.info('Initial connection: fetching host ID')
for node in self.endpoint.nodes.filter("name='%s'" % (
self.name)):
self.host_id = node.id
if not self.host_id:
# make a new node entry for this host
LOG.info('Creating new host entry')
node = self.endpoint.nodes.new(name=self.name)
node.save()
self.host_id = node.id
LOG.info('New host ID: %d' % self.host_id)
# update the module list
self.producer_lock.acquire()
task = {'action': 'modules.list',
'payload': {},
'id': -1}
self.pending_tasks.append(task)
self.producer_condition.notify()
LOG.debug('added module_list task to work queue')
self.producer_lock.release()
self.producer_lock.acquire()
task = {'action': 'modules.actions',
'payload': {},
'id': -1}
self.pending_tasks.append(task)
self.producer_condition.notify()
LOG.debug('added module_list task to work queue')
self.producer_lock.release()
return True
def stop(self):
self.endpoint = None
self.running = False
def run(self):
self.running = True
while self.running:
task = None
if not self._maybe_init():
time.sleep(15)
continue
try:
task = self.endpoint.nodes[self.host_id].task_blocking
except ConnectionError:
time.sleep(15)
except KeyboardInterrupt:
raise
if task:
self.producer_lock.acquire()
if task.id not in [x['id'] for x in self.pending_tasks]:
LOG.debug('Found new pending task with id %s' % task.id)
# this should be done on the server side while
# locked to avoid races
task.state = 'running'
task.save()
self.pending_tasks.append(task.to_hash())
self.producer_condition.notify()
LOG.debug('added task to work queue' % task.to_hash())
self.producer_lock.release()
self.running = False
def fetch(self, blocking=True):
# we'll assume any task we've marked as running
# is under way, and we'll only return new pending tasks.
retval = {}
LOG.debug("fetching new work item")
self.producer_lock.acquire()
while(blocking and len(self.pending_tasks) == 0):
self.producer_condition.wait()
if len(self.pending_tasks) > 0:
LOG.debug('Found %d queued tasks' % len(self.pending_tasks))
task = self.pending_tasks.pop()
LOG.debug('Preparing to process task: %s' % task)
retval = {'id': task['id'],
'action': task['action'],
'payload': task['payload']}
LOG.debug('Marking task %s as running' % task['id'])
# throw it into the running list -- I don't know that we really
# need this list, but meh.
self.running_tasks[retval['id']] = retval
self.producer_lock.release()
return retval
def result(self, txid, result):
self.producer_lock.acquire()
if txid in self.running_tasks.keys():
try:
if txid > 0:
# update the db
task = self.endpoint.tasks[txid]
task.state = 'done'
task.result = result
task.save()
del self.running_tasks[txid]
elif txid == -1:
# module list?
if result['result_code'] == 0:
newattr = self.endpoint.attrs.new(
node_id=self.host_id,
key=result['result_data']['name'],
value=result['result_data']['value'])
newattr.save()
except ConnectionError:
# FIXME(rp):
# we should enqueue the task into a "retry update" list
# so we can update it once we get back to a connected
# state
pass
self.producer_lock.release()
class TaskGetter:
def __init__(self, endpoint, name):
self.endpoint = endpoint
self.name = name
self.running = False
self.server_thread = None
def run(self):
if self.running:
raise RuntimeError
self.server_thread = TaskThread(self.endpoint, self.name)
self.server_thread.setDaemon(True)
self.server_thread.start()
self.running = True
def stop(self):
self.running = False
self.server_thread.stop()
self.server_thread.join(5)
self.server_thread.terminate()
def fetch(self):
return self.server_thread.fetch()
def result(self, txid, result):
return self.server_thread.result(txid, result)
def setup(config={}):
global task_getter
name = config.get('hostname', os.popen('hostname -f').read().strip())
endpoint = config.get('endpoint', 'http://localhost:8080')
task_getter = TaskGetter(endpoint, name)
task_getter.run()
def teardown():
global task_getter
task_getter.stop()
def fetch():
global task_getter
return task_getter.fetch()
def result(input_data, output_data):
global task_getter
txid = input_data['id']
result_hash = output_data
return task_getter.result(txid, result_hash)
|
Python
| 0
|
@@ -811,17 +811,16 @@
point')%0A
-%0A
@@ -1165,17 +1165,16 @@
st ID')%0A
-%0A
@@ -1181,274 +1181,108 @@
-for node in self.endpoint.nodes.filter(%22name='%25s'%22 %25 (%0A self.name)):%0A self.host_id = node.id%0A%0A if not self.host_id:%0A # make a new node entry for this host%0A LOG.info('Creating new host entry')
+#TODO: Fix up client to support whoami in a more%0A #reasonable manner and fix this code up
%0A
@@ -1290,24 +1290,20 @@
- node
+root
= self.
@@ -1321,154 +1321,157 @@
des.
-new(name=self.name)%0A node.save()%0A self.host_id = node.id%0A%0A LOG.info('New host ID: %25d' %25 self.host_id)
+filter(%0A 'facts.parent_id = None and name = %22workspace%22').first()%0A self.host_id = root.whoami(self.name).json%5B'node'%5D%5B'id'%5D
%0A%0A
|
08f07c880a69fc8e078ebcd871e4c7506ac31cf4
|
fix too typo
|
bluebottle/projects/tests/test_unit.py
|
bluebottle/projects/tests/test_unit.py
|
from datetime import timedelta
from decimal import Decimal
from bluebottle.utils.utils import StatusDefinition
from django.test import TestCase
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.utils.model_dispatcher import get_project_model
from bluebottle.test.factory_models.projects import ProjectFactory
from bluebottle.donations.models import Donation
from bluebottle.orders.models import Order
from bluebottle.test.utils import BluebottleTestCase
from bluebottle.bb_projects.models import ProjectPhase
from django.utils import timezone
PROJECT_MODEL = get_project_model()
class TestProjectStatusUpdate(BluebottleTestCase):
"""
save() automatically updates some fields, specifically
the status field. Make sure it picks the right one
"""
def setUp(self):
super(TestProjectStatusUpdate, self).setUp()
self.init_projects()
self.incomplete = ProjectPhase.objects.get(slug="done-incomplete")
self.complete = ProjectPhase.objects.get(slug="done-complete")
self.campaign = ProjectPhase.objects.get(slug="campaign")
self.expired_project = ProjectFactory.create(amount_asked=5000,
status=self.campaign)
self.expired_project.deadline = timezone.now() - timedelta(days=1)
def test_expired_too_little(self):
""" Not enough donated - status done incomplete """
self.expired_project.amount_donated = 4999
self.expired_project.save()
self.failUnless(self.expired_project.status == self.incomplete)
def test_expired_too_exact(self):
""" Exactly the amount requested - status done complete """
self.expired_project.amount_donated = 5000
self.expired_project.save()
self.failUnless(self.expired_project.status == self.complete)
def test_expired_too_more_than_enough(self):
""" More donated than requested - status done complete """
self.expired_project.amount_donated = 5001
self.expired_project.save()
self.failUnless(self.expired_project.status == self.complete)
class CalculateProjectMoneyDonatedTest(BluebottleTestCase):
def setUp(self):
super(CalculateProjectMoneyDonatedTest, self).setUp()
# Required by Project model save method
self.init_projects()
self.some_project = ProjectFactory.create(amount_asked=5000)
self.another_project = ProjectFactory.create(amount_asked=5000)
self.some_user = BlueBottleUserFactory.create()
self.another_user = BlueBottleUserFactory.create()
# def test_donated_amount(self):
# # Some project have amount_asked of 5000000 (cents that is)
# self.assertEqual(self.some_project.amount_asked, 5000)
#
# # A project without donations should have amount_donated of 0
# self.assertEqual(self.some_project.amount_donated, 0)
#
# # Create a new donation of 15 in status 'new'. project money donated should be 0
# first_donation = self._create_donation(user=self.some_user, project=self.some_project, amount=1500,
# status=DonationStatuses.new)
# self.assertEqual(self.some_project.amount_donated, 0)
#
#
# # Create a new donation of 25 in status 'in_progress'. project money donated should be 0.
# second_donation = self._create_donation(user=self.some_user, project=self.some_project, amount=2500,
# status=DonationStatuses.in_progress)
# self.assertEqual(self.some_project.amount_donated, 0)
#
# # Setting the first donation to status 'paid' money donated should be 1500
# first_donation.order.status = StatusDefinition.PENDING
# first_donation.order.save()
# self.assertEqual(self.some_project.amount_donated, 15)
#
# # Setting the second donation to status 'pending' money donated should be 40
# second_donation.order.status = StatusDefinition.PENDING
# second_donation.order.save()
# self.assertEqual(self.some_project.amount_donated, 40)
def _create_donation(self, user=None, amount=None, project=None, status=StatusDefinition.NEW):
""" Helper method for creating donations."""
if not project:
project = ProjectFactory.create()
project.save()
if not user:
user = BlueBottleUserFactory.create()
if not amount:
amount = Decimal('10.00')
order = Order.objects.create(status=status)
donation = Donation.objects.create(user=user, amount=amount, project=project, order=order)
return donation
|
Python
| 0.998754
|
@@ -1625,20 +1625,16 @@
expired_
-too_
exact(se
@@ -1885,20 +1885,16 @@
expired_
-too_
more_tha
|
c64d7ba394e1f185b2ebb14cdcc58f5b329e130d
|
Translate "Images" word.
|
shop/products/admin/forms.py
|
shop/products/admin/forms.py
|
from django.apps import apps
from django import forms
from suit.sortables import SortableTabularInline
from multiupload.fields import MultiFileField
class ProductForm(forms.ModelForm):
images = MultiFileField(max_num=100, min_num=1, required=False)
def save(self, commit=True):
product = super(ProductForm, self).save(commit)
if 'category' in self.changed_data:
product.attribute_values.all().delete()
return product
class Meta:
model = apps.get_model('products', 'Product')
fields = '__all__'
class ProductImageInline(SortableTabularInline):
fields = ('preview', )
readonly_fields = ['preview']
model = apps.get_model('products', 'ProductImage')
extra = 0
max_num = 0
|
Python
| 0.999996
|
@@ -23,17 +23,16 @@
rt apps%0A
-%0A
from dja
@@ -47,16 +47,72 @@
rt forms
+%0Afrom django.utils.translation import ugettext_lazy as _
%0A%0Afrom s
@@ -267,16 +267,44 @@
leField(
+%0A label=_('Images'),
max_num=
|
e6f9cd49a57dfc5fd0acbcf757fd6e80c3749b4f
|
Support complex comparison (Issue #121)
|
numba/specialize/comparisons.py
|
numba/specialize/comparisons.py
|
import ast
from functools import reduce
import numba
from numba import *
from numba import error
from numba import visitors, nodes
from numba import function_util
from numba.symtab import Variable
from numba import pyconsts
logger = logging.getLogger(__name__)
opmap = {
ast.Eq : pyconsts.Py_EQ,
ast.NotEq : pyconsts.Py_NE,
ast.Lt : pyconsts.Py_LT,
ast.LtE : pyconsts.Py_LE,
ast.Gt : pyconsts.Py_GT,
ast.GtE : pyconsts.Py_GE,
}
def build_boolop(right, left):
node = ast.BoolOp(ast.And(), [left, right])
return nodes.typednode(node, bool_)
class SpecializeComparisons(visitors.NumbaTransformer):
"""
Rewrite cascaded ast.Compare nodes to a sequence of boolean operations
ANDed together:
a < b < c
becomes
a < b and b < c
"""
def single_compare(self, node):
if node.left.type.is_object:
return self.single_compare_objects(node)
if node.left.type.is_pointer and node.comparators[0].type.is_pointer:
# Coerce pointers to integer values before comparing
node.left = nodes.CoercionNode(node.left, Py_uintptr_t)
node.comparators = [nodes.CoercionNode(node.comparators[0],
Py_uintptr_t)]
return node
def single_compare_objects(self, node):
op = type(node.ops[0])
if op not in opmap:
raise error.NumbaError(
node, "%s comparisons not yet implemented" % (op,))
# Build arguments for PyObject_RichCompareBool
operator = nodes.const(opmap[op], int_)
args = [node.left, node.comparators[0], operator]
# Call PyObject_RichCompareBool
compare = function_util.external_call(self.context,
self.llvm_module,
'PyObject_RichCompareBool',
args=args)
# Coerce int result to bool
return nodes.CoercionNode(compare, bool_)
def visit_Compare(self, node):
"Reduce cascaded comparisons into single comparisons"
# Process children
self.generic_visit(node)
compare_nodes = []
comparators = [nodes.CloneableNode(c) for c in node.comparators]
# Build comparison nodes
left = node.left
for op, right in zip(node.ops, comparators):
node = ast.Compare(left=left, ops=[op], comparators=[right])
node = nodes.typednode(node, bool_)
# Handle comparisons specially based on their types
node = self.single_compare(node)
compare_nodes.append(node)
left = right.clone
# AND the comparisons together
node = reduce(build_boolop, reversed(compare_nodes))
return node
|
Python
| 0
|
@@ -606,16 +606,362 @@
bool_)%0A%0A
+def extract(complex_node):%0A complex_node = nodes.CloneableNode(complex_node)%0A%0A real = nodes.ComplexAttributeNode(complex_node, 'real')%0A imag = nodes.ComplexAttributeNode(complex_node.clone, 'imag')%0A%0A return real, imag%0A%0Adef compare(lhs, rhs):%0A result = ast.Compare(lhs, %5Bast.Eq()%5D, %5Brhs%5D)%0A return nodes.typednode(result, bool_)%0A%0A
class Sp
@@ -1205,32 +1205,67 @@
re(self, node):%0A
+ rhs = node.comparators%5B0%5D%0A%0A
if node.
@@ -1297,22 +1297,22 @@
-return
+node =
self.si
@@ -1339,32 +1339,34 @@
(node)%0A%0A
+el
if node.left.typ
@@ -1382,35 +1382,19 @@
ter and
-node.comparators%5B0%5D
+rhs
.type.is
@@ -1591,94 +1591,363 @@
ode(
-node.comparators%5B0%5D,%0A Py_uintptr_t)%5D
+rhs, Py_uintptr_t)%5D%0A%0A elif node.left.type.is_complex and rhs.type.is_complex:%0A real1, imag1 = extract(node.left)%0A real2, imag2 = extract(rhs)%0A lhs = compare(real1, real2)%0A rhs = compare(imag1, imag2)%0A result = ast.BoolOp(ast.And(), %5Blhs, rhs%5D)%0A node = nodes.typednode(result, bool_)
%0A%0A
|
faf5facbefc02d5c5144d036ff53fcaab40667f4
|
fix pylint
|
anyway/parsers/news_flash_db_adapter.py
|
anyway/parsers/news_flash_db_adapter.py
|
import datetime
import logging
import pandas as pd
from flask_sqlalchemy import SQLAlchemy
from anyway.parsers import infographics_data_cache_updater
from anyway.parsers import timezones
from anyway.models import NewsFlash
# fmt: off
def init_db() -> "DBAdapter":
from anyway.app_and_db import db
return DBAdapter(db)
class DBAdapter:
def __init__(self, db: SQLAlchemy):
self.db = db
def execute(self, *args, **kwargs):
return self.db.session.execute(*args, **kwargs)
def commit(self, *args, **kwargs):
return self.db.session.commit(*args, **kwargs)
def recreate_table_for_location_extraction(self):
with self.db.session.begin():
self.db.session.execute("""TRUNCATE cbs_locations""")
self.db.session.execute("""INSERT INTO cbs_locations
(SELECT ROW_NUMBER() OVER (ORDER BY road1) as id, LOCATIONS.*
FROM
(SELECT DISTINCT road1,
road2,
non_urban_intersection_hebrew,
yishuv_name,
street1_hebrew,
street2_hebrew,
district_hebrew,
region_hebrew,
road_segment_name,
longitude,
latitude
FROM markers_hebrew
WHERE (provider_code=1
OR provider_code=3)
AND (longitude is not null
AND latitude is not null)) LOCATIONS)"""
)
def get_markers_for_location_extraction(self):
query_res = self.execute(
"""SELECT * FROM cbs_locations"""
)
df = pd.DataFrame(query_res.fetchall())
df.columns = query_res.keys()
return df
def remove_duplicate_rows(self):
"""
remove duplicate rows by link
"""
self.execute(
"""
DELETE FROM news_flash T1
USING news_flash T2
WHERE T1.ctid < T2.ctid -- delete the older versions
AND T1.link = T2.link; -- add more columns if needed
"""
)
self.commit()
def insert_new_newsflash(self, newsflash: NewsFlash) -> None:
logging.info("Adding newsflash, is accident: {}, date: {}"
.format(newsflash.accident, newsflash.date))
self.db.session.add(newsflash)
self.db.session.commit()
infographics_data_cache_updater.add_news_flash_to_cache(newsflash)
def get_newsflash_by_id(self, id):
return self.db.session.query(NewsFlash).filter(NewsFlash.id == id)
def select_newsflash_where_source(self, source):
return self.db.session.query(NewsFlash).filter(NewsFlash.source == source)
def get_all_newsflash(self):
return self.db.session.query(NewsFlash)
def get_latest_date_of_source(self, source):
"""
:return: latest date of news flash
"""
latest_date = self.execute(
"SELECT max(date) FROM news_flash WHERE source=:source",
{"source": source},
).fetchone()[0] or datetime.datetime(1900, 1, 1, 0, 0, 0)
res = timezones.from_db(latest_date)
logging.info('Latest time fetched for source {} is {}'
.format(source, res))
return res
def get_latest_tweet_id(self):
"""
:return: latest tweet id
"""
latest_id = self.execute(
"SELECT tweet_id FROM news_flash where source='twitter' ORDER BY date DESC LIMIT 1"
).fetchone()
if latest_id:
return latest_id[0]
return None
|
Python
| 0.000003
|
@@ -817,17 +817,16 @@
ocations
-
%0A
|
a21c31eb05c275d35e5bcdcaff923f6cc7b15bc4
|
Fix upgrade handler call in install_node
|
octane/commands/install_node.py
|
octane/commands/install_node.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from octane.commands.upgrade_db import get_controllers
from octane.commands.upgrade_node import ControllerUpgrade
from octane.commands.upgrade_node import wait_for_node
from octane.helpers.node_attributes import copy_disks
from octane.helpers.node_attributes import copy_ifaces
from octane import magic_consts
from cliff import command as cmd
from fuelclient.objects import environment as environment_obj
from fuelclient.objects import node as node_obj
LOG = logging.getLogger(__name__)
def update_node_settings(node, disks_fixture, ifaces_fixture):
if not magic_consts.DEFAULT_DISKS:
LOG.info("Updating node %s disk settings with fixture: %s",
str(node.id), disks_fixture)
disks = node.get_attribute('disks')
LOG.info("Original node %s disk settings: %s",
str(node.id), disks)
new_disks = list(copy_disks(disks_fixture, disks, 'by_name'))
LOG.info("New disk info generated: %s", new_disks)
node.upload_node_attribute('disks', new_disks)
else:
LOG.warn("Using default volumes for node %s", node)
LOG.warn("To keep custom volumes layout, change DEFAULT_DISKS const "
"in magic_consts.py module")
LOG.info("Updating node %s network settings with fixture: %s",
str(node.id), ifaces_fixture)
ifaces = node.get_attribute('interfaces')
LOG.info("Original node %s network settings: %s",
str(node.id), ifaces)
new_ifaces = list(copy_ifaces(ifaces_fixture, ifaces))
LOG.info("New interfaces info generated: %s", new_ifaces)
node.upload_node_attribute('interfaces', new_ifaces)
def install_node(orig_id, seed_id, node_ids, isolated=False):
env = environment_obj.Environment
nodes = [node_obj.Node(node_id) for node_id in node_ids]
if orig_id == seed_id:
raise Exception("Original and seed environments have the same ID: %s",
orig_id)
orig_env = env(orig_id)
orig_node = next(get_controllers(orig_env))
seed_env = env(seed_id)
seed_env.assign(nodes, orig_node.data['roles'])
for node in nodes:
disk_info_fixture = orig_node.get_attribute('disks')
nic_info_fixture = orig_node.get_attribute('interfaces')
update_node_settings(node, disk_info_fixture, nic_info_fixture)
seed_env.install_selected_nodes('provision', nodes)
for node in nodes:
wait_for_node(node, "provisioned")
for node in nodes:
ControllerUpgrade.predeploy(node, seed_env,
isolated=isolated)
seed_env.install_selected_nodes('deploy', nodes)
for node in nodes:
wait_for_node(node, "ready")
class InstallNodeCommand(cmd.Command):
"""Install nodes to environment based on settings of orig environment"""
def get_parser(self, prog_name):
parser = super(InstallNodeCommand, self).get_parser(prog_name)
parser.add_argument(
'--isolated', action='store_true',
help="Isolate node's network from original cluster")
parser.add_argument(
'orig_id', type=int, metavar='ORIG_ID',
help="ID of original environment")
parser.add_argument(
'seed_id', type=int, metavar='SEED_ID',
help="ID of upgrade seed environment")
parser.add_argument(
'node_ids', type=int, metavar='NODE_ID', nargs='+',
help="IDs of nodes to be moved")
return parser
def take_action(self, parsed_args):
install_node(parsed_args.orig_id, parsed_args.seed_id,
parsed_args.node_ids, isolated=parsed_args.isolated)
|
Python
| 0.000001
|
@@ -3019,32 +3019,95 @@
node in nodes:%0A
+ # FIXME: properly call all handlers all over the place%0A
Controll
@@ -3119,18 +3119,8 @@
rade
-.predeploy
(nod
@@ -3135,44 +3135,8 @@
env,
-%0A
iso
@@ -3150,16 +3150,28 @@
solated)
+.predeploy()
%0A see
|
3bb928fa19e9e94c426746addff09f68cac1fec0
|
Remove leftover (NC-1326)
|
nodeconductor/cost_tracking/serializers.py
|
nodeconductor/cost_tracking/serializers.py
|
from __future__ import unicode_literals
from django.contrib.contenttypes.models import ContentType
from django.utils import six
from rest_framework import serializers
from nodeconductor.core.serializers import GenericRelatedField, AugmentedSerializerMixin, JSONField
from nodeconductor.core.signals import pre_serializer_fields, post_validate_attrs
from nodeconductor.core.utils import get_subclasses
from nodeconductor.cost_tracking import models
from nodeconductor.structure import SupportedServices, models as structure_models
from nodeconductor.structure.filters import ScopeTypeFilterBackend
from nodeconductor.structure.serializers import ProjectSerializer, BaseResourceSerializer
class PriceEstimateSerializer(AugmentedSerializerMixin, serializers.HyperlinkedModelSerializer):
scope = GenericRelatedField(related_models=models.PriceEstimate.get_editable_estimated_models())
scope_name = serializers.SerializerMethodField()
scope_type = serializers.SerializerMethodField()
resource_type = serializers.SerializerMethodField()
class Meta(object):
model = models.PriceEstimate
fields = ('url', 'uuid', 'scope', 'total', 'consumed', 'month', 'year',
'is_manually_input', 'scope_name', 'scope_type', 'resource_type', 'threshold')
read_only_fields = ('is_manually_input', 'threshold')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
protected_fields = ('scope', 'year', 'month')
def validate(self, data):
if self.instance is None and models.PriceEstimate.objects.filter(
scope=data['scope'], year=data['year'], month=data['month'], is_manually_input=True).exists():
raise serializers.ValidationError(
'Estimate for given month already exists. Use PATCH request to update it.')
return data
def create(self, validated_data):
validated_data['is_manually_input'] = True
price_estimate = super(PriceEstimateSerializer, self).create(validated_data)
return price_estimate
def get_scope_name(self, obj):
return six.text_type(obj.scope or obj.details.get('scope_name')) # respect to unicode
def get_scope_type(self, obj):
return ScopeTypeFilterBackend.get_scope_type(obj) or obj.details.get('scope_type')
def get_resource_type(self, obj):
if not obj.is_leaf:
return None
return SupportedServices.get_name_for_model(obj.content_type.model_class())
class YearMonthField(serializers.CharField):
""" Field that support year-month representation in format YYYY.MM """
def to_internal_value(self, value):
try:
year, month = [int(el) for el in value.split('.')]
except ValueError:
raise serializers.ValidationError('Value "{}" should be valid be in format YYYY.MM'.format(value))
if not 0 < month < 13:
raise serializers.ValidationError('Month has to be from 1 to 12')
return year, month
class PriceEstimateDateFilterSerializer(serializers.Serializer):
date_list = serializers.ListField(
child=YearMonthField(),
required=False
)
class PriceEstimateDateRangeFilterSerializer(serializers.Serializer):
start = YearMonthField(required=False)
end = YearMonthField(required=False)
def validate(self, data):
if 'start' in data and 'end' in data and data['start'] >= data['end']:
raise serializers.ValidationError('Start has to be earlier than end.')
return data
class PriceListItemSerializer(serializers.HyperlinkedModelSerializer):
service = GenericRelatedField(related_models=structure_models.Service.get_all_models())
class Meta:
model = models.PriceListItem
fields = ('url', 'uuid', 'key', 'item_type', 'value', 'units', 'service')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def create(self, validated_data):
# XXX: This behavior is wrong for services with several resources, find a better approach
resource_class = SupportedServices.get_related_models(validated_data['service'])['resources'][0]
validated_data['resource_content_type'] = ContentType.objects.get_for_model(resource_class)
return super(PriceListItemSerializer, self).create(validated_data)
class DefaultPriceListItemSerializer(serializers.HyperlinkedModelSerializer):
resource_type = serializers.SerializerMethodField()
value = serializers.FloatField()
metadata = JSONField()
class Meta:
model = models.DefaultPriceListItem
fields = ('url', 'uuid', 'key', 'item_type', 'value', 'resource_type', 'metadata')
extra_kwargs = {
'url': {'lookup_field': 'uuid'},
}
def get_resource_type(self, obj):
return SupportedServices.get_name_for_model(obj.resource_content_type.model_class())
class PriceEstimateThresholdSerializer(serializers.Serializer):
threshold = serializers.FloatField(min_value=0, required=True)
limit = serializers.FloatField(required=True)
scope = GenericRelatedField(related_models=models.PriceEstimate.get_estimated_models(), required=True)
class PriceEstimateLimitSerializer(serializers.Serializer):
limit = serializers.FloatField(required=True)
scope = GenericRelatedField(related_models=models.PriceEstimate.get_estimated_models(), required=True)
class NestedPriceEstimateSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = models.PriceEstimate
fields = ('threshold', 'total', 'limit')
def get_price_estimate_for_project(serializer, project):
try:
estimate = models.PriceEstimate.objects.get_current(project)
except models.PriceEstimate.DoesNotExist:
return {
'threshold': 0.0,
'total': 0.0,
'limit': -1.0
}
else:
serializer = NestedPriceEstimateSerializer(instance=estimate, context=serializer.context)
return serializer.data
def add_price_estimate_for_project(sender, fields, **kwargs):
fields['price_estimate'] = serializers.SerializerMethodField()
setattr(sender, 'get_price_estimate', get_price_estimate_for_project)
pre_serializer_fields.connect(add_price_estimate_for_project, sender=ProjectSerializer)
def check_project_price_estimate(sender, instance, attrs, **kwargs):
serializer = instance
if serializer.instance:
# Skip validation if instance is updated
return
project = attrs['service_project_link'].project
try:
estimate = models.PriceEstimate.objects.get_current(project)
except models.PriceEstimate.DoesNotExist:
return
else:
if estimate.limit != -1 and estimate.total > estimate.limit > 0:
raise serializers.ValidationError({
'detail': 'Resource provisioning is disabled because estimated project price is over limit.'
})
for serializer in get_subclasses(BaseResourceSerializer):
post_validate_attrs.connect(check_project_price_estimate, sender=serializer)
|
Python
| 0
|
@@ -5041,58 +5041,8 @@
ue)%0A
- limit = serializers.FloatField(required=True)%0A
|
67ff8c30d07f54c89d8072f480ded59e144d8463
|
Update range test after reporting change
|
numba/tests/builtins/test_builtin_range.py
|
numba/tests/builtins/test_builtin_range.py
|
"""
>>> range_ret1()
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> range_ret2()
[1, 2, 3, 4]
>>> range_ret3()
[10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, -1, -2, -3, -4]
>>> forward1()
0 1 2 3 4 5 6 7 8 9 done
>>> forward2()
1 2 3 4 done
>>> forward3()
5 8 11 14 done
>>> backward1()
10 7 4 done
>>> backward2()
done
>>> backward3()
-5 -8 -11 -14 done
>>> empty_assign()
14
>>> last_value()
Warning 92:10: local variable 'i' might be referenced before assignment
9
"""
from numba import *
@autojit
def range_ret1():
return range(10)
@autojit
def range_ret2():
return range(1, 5)
@autojit
def range_ret3():
return range(10, -5, -1)
@autojit
def forward1():
for i in range(10):
print i,
print "done"
@autojit
def forward2():
for i in range(1, 5):
print i,
print "done"
@autojit
def forward3():
for i in range(5, 15, 3):
print i,
print "done"
@autojit
def backward1():
for i in range(10, 2, -3):
print i,
print "done"
@autojit
def backward2():
for i in range(1, 5, -1):
print i,
print "done"
@autojit
def backward3():
for i in range(-5, -15, -3):
print i,
print "done"
@autojit
def empty_assign():
i = 14
for i in range(10, 4):
pass
print i
@autojit
def last_value():
for i in range(10):
pass
print i
if __name__ == '__main__':
backward3()
# import doctest
# doctest.testmod()
|
Python
| 0
|
@@ -367,16 +367,109 @@
value()%0A
+--------------------- Numba Encountered Errors or Warnings ---------------------%0A%3CBLANKLINE%3E%0A
Warning
@@ -469,17 +469,17 @@
arning 9
-2
+6
:10: loc
@@ -532,16 +532,109 @@
ignment%0A
+%3CBLANKLINE%3E%0A--------------------------------------------------------------------------------%0A
9%0A%22%22%22%0A%0Af
@@ -1548,16 +1548,18 @@
__':%0A
+ #
backwar
@@ -1566,18 +1566,16 @@
d3()%0A
- #
import
@@ -1585,18 +1585,16 @@
test%0A
- #
doctest
|
ca326b3bfaf642b3fde430241350c62578899a81
|
Add add_method delegate on extension types type
|
numba/typesystem/exttypes/extensiontype.py
|
numba/typesystem/exttypes/extensiontype.py
|
# -*- coding: utf-8 -*-
"""
Extension type types.
"""
from numba.traits import traits, Delegate
from numba.typesystem import *
@traits
class ExtensionType(NumbaType, minitypes.ObjectType):
"""
Extension type Numba type.
Available to users through MyExtensionType.exttype (or
numba.typeof(MyExtensionType).
"""
is_extension = True
is_final = False
methoddict = Delegate('vtab_type')
methodnames = Delegate('vtab_type')
attributedict = Delegate('attribute_table')
attributes = Delegate('attribute_table')
def __init__(self, py_class, **kwds):
super(ExtensionType, self).__init__(**kwds)
assert isinstance(py_class, type), ("Must be a new-style class "
"(inherit from 'object')")
self.name = py_class.__name__
self.py_class = py_class
self.symtab = {} # attr_name -> attr_type
self.compute_offsets(py_class)
self.attribute_table = None
self.vtab_type = None
self.parent_attr_struct = None
self.parent_vtab_type = None
self.parent_type = getattr(py_class, "__numba_ext_type", None)
def compute_offsets(self, py_class):
from numba.exttypes import extension_types
self.vtab_offset = extension_types.compute_vtab_offset(py_class)
self.attr_offset = extension_types.compute_attrs_offset(py_class)
def set_attributes(self, attribute_list):
"""
Create the symbol table and attribute struct from a list of
(varname, attribute_type)
"""
import numba.symtab
self.attribute_table = numba.struct(attribute_list)
self.symtab.update([(name, numba.symtab.Variable(type))
for name, type in attribute_list])
# ______________________________________________________________________
# @jit
class JitExtensionType(ExtensionType):
"Type for @jit extension types"
is_jit_extension = True
def __repr__(self):
return "<JitExtension %s>" % self.name
def __str__(self):
if self.attribute_table:
return "<JitExtension %s(%s)>" % (
self.name, self.attribute_table.attributedict)
return repr(self)
# ______________________________________________________________________
# @autojit
class AutojitExtensionType(ExtensionType):
"Type for @autojit extension types"
is_autojit_extension = True
def __repr__(self):
return "<AutojitExtension %s>" % self.name
def __str__(self):
if self.attribute_table:
return "<AutojitExtension %s(%s)>" % (
self.name, self.attribute_table.attributedict)
return repr(self)
|
Python
| 0
|
@@ -49,16 +49,54 @@
s.%0A%22%22%22%0A%0A
+from numba.minivect import minitypes%0A%0A
from num
@@ -158,17 +158,25 @@
import
-*
+NumbaType
%0A%0A@trait
@@ -498,16 +498,55 @@
b_type')
+%0A add_method = Delegate('vtab_type')
%0A%0A at
|
aa5d9c85dc8267a5e3172c8e7792c2a059e1815c
|
sql db query: improved query timer logging
|
cantools/db/sql/query.py
|
cantools/db/sql/query.py
|
from sqlalchemy.sql import func
from cantools.util import start_timer, end_timer
from properties import *
from getters import *
from setters import *
from session import session, testSession, metadata, Session
_passthru = ["count", "all"]
_qmod = ["filter", "limit", "offset", "join"]
class Query(object):
def __init__(self, mod, *args, **kwargs):
self.mod = mod
self.schema = get_schema(mod)
self.session = kwargs.pop("session", session)
self.query = self.session.query(mod)
for fname in _passthru:
setattr(self, fname, self._qpass(fname))
for fname in _qmod:
setattr(self, fname, self._qmlam(fname))
self.get = self._qpass("first")
self._order = self._qmlam("order_by")
self.filter(*args, **kwargs)
def order(self, prop):
if isinstance(prop, basestring) and "." in prop: # it's a foreignkey reference from another table
from lookup import refcount_subq
asc = True
if prop.startswith("-"):
asc = False
prop = prop[1:]
sub = refcount_subq(prop, self.session)
order = sub.c.count
if not asc:
order = -sub.c.count
return self.join(sub, self.mod.key == sub.c.target).order(order)
return self._order(prop)
def _qpass(self, fname):
def qp(*args, **kwargs):
qkey = "%s: %s %s (%s)"%(fname, args, kwargs, self.query)
start_timer(qkey)
res = getattr(self.query, fname)(*args, **kwargs)
end_timer(qkey)
return res
return qp
def _qmlam(self, fname):
return lambda *a, **k : self._qmod(fname, *a, **k)
def _qmod(self, modname, *args, **kwargs):
self.query = getattr(self.query, modname)(*args, **kwargs)
return self
def fetch(self, limit, offset=0, keys_only=False):
self.limit(limit)
if offset:
self.offset(offset)
res = self.all()
if keys_only: # best way?
return [r.key for r in res]
return res
|
Python
| 0.999852
|
@@ -1433,16 +1433,22 @@
qkey = %22
+Query.
%25s: %25s %25
|
6f3560c680980e2954c937282f55a8f6da167590
|
fix 299 and add make search case insensitive
|
capa/ida/plugin/proxy.py
|
capa/ida/plugin/proxy.py
|
# Copyright (C) 2020 FireEye, Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: [package root]/LICENSE.txt
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and limitations under the License.
from PyQt5 import QtCore
from PyQt5.QtCore import Qt
from capa.ida.plugin.model import CapaExplorerDataModel
class CapaExplorerRangeProxyModel(QtCore.QSortFilterProxyModel):
"""filter results based on virtual address range as seen by IDA
implements filtering for "limit results by current function" checkbox in plugin UI
minimum and maximum virtual addresses are used to filter results to a specific address range. this allows
basic blocks to be included when limiting results to a specific function
"""
def __init__(self, parent=None):
"""initialize proxy filter"""
super(CapaExplorerRangeProxyModel, self).__init__(parent)
self.min_ea = None
self.max_ea = None
def lessThan(self, left, right):
"""return True if left item is less than right item, else False
@param left: QModelIndex of left
@param right: QModelIndex of right
"""
ldata = left.internalPointer().data(left.column())
rdata = right.internalPointer().data(right.column())
if (
ldata
and rdata
and left.column() == CapaExplorerDataModel.COLUMN_INDEX_VIRTUAL_ADDRESS
and left.column() == right.column()
):
# convert virtual address before compare
return int(ldata, 16) < int(rdata, 16)
else:
# compare as lowercase
return ldata.lower() < rdata.lower()
def filterAcceptsRow(self, row, parent):
"""return true if the item in the row indicated by the given row and parent should be included in the model;
otherwise return false
@param row: row number
@param parent: QModelIndex of parent
"""
if self.filter_accepts_row_self(row, parent):
return True
alpha = parent
while alpha.isValid():
if self.filter_accepts_row_self(alpha.row(), alpha.parent()):
return True
alpha = alpha.parent()
if self.index_has_accepted_children(row, parent):
return True
return False
def index_has_accepted_children(self, row, parent):
"""return True if parent has one or more children that match filter, else False
@param row: row number
@param parent: QModelIndex of parent
"""
model_index = self.sourceModel().index(row, 0, parent)
if model_index.isValid():
for idx in range(self.sourceModel().rowCount(model_index)):
if self.filter_accepts_row_self(idx, model_index):
return True
if self.index_has_accepted_children(idx, model_index):
return True
return False
def filter_accepts_row_self(self, row, parent):
"""return True if filter accepts row, else False
@param row: row number
@param parent: QModelIndex of parent
"""
# filter not set
if self.min_ea is None and self.max_ea is None:
return True
index = self.sourceModel().index(row, 0, parent)
data = index.internalPointer().data(CapaExplorerDataModel.COLUMN_INDEX_VIRTUAL_ADDRESS)
# virtual address may be empty
if not data:
return False
# convert virtual address str to int
ea = int(data, 16)
if self.min_ea <= ea and ea < self.max_ea:
return True
return False
def add_address_range_filter(self, min_ea, max_ea):
"""add new address range filter
called when user checks "limit results by current function" in plugin UI
@param min_ea: minimum virtual address as seen by IDA
@param max_ea: maximum virtual address as seen by IDA
"""
self.min_ea = min_ea
self.max_ea = max_ea
self.setFilterKeyColumn(CapaExplorerDataModel.COLUMN_INDEX_VIRTUAL_ADDRESS)
self.invalidateFilter()
def reset_address_range_filter(self):
"""remove address range filter (accept all results)
called when user un-checks "limit results by current function" in plugin UI
"""
self.min_ea = None
self.max_ea = None
self.invalidateFilter()
class CapaExplorerSearchProxyModel(QtCore.QSortFilterProxyModel):
"""A SortFilterProxyModel that accepts rows with a substring match for a configurable query.
Looks for matches in the text of all rows.
Displays the entire tree row if any of the tree branches,
that is, you can filter by rule name, or also
filter by "characteristic(nzxor)" to filter matches with some feature.
"""
def __init__(self, parent=None):
""" """
super(CapaExplorerSearchProxyModel, self).__init__(parent)
self.query = ""
self.setFilterKeyColumn(-1) # all columns
def filterAcceptsRow(self, row, parent):
"""true if the item in the row indicated by the given row and parent
should be included in the model; otherwise returns false
@param row: int
@param parent: QModelIndex*
@retval True/False
"""
# this row matches, accept it
if self.filter_accepts_row_self(row, parent):
return True
# the parent of this row matches, accept it
alpha = parent
while alpha.isValid():
if self.filter_accepts_row_self(alpha.row(), alpha.parent()):
return True
alpha = alpha.parent()
# this row is a parent, and a child matches, accept it
if self.index_has_accepted_children(row, parent):
return True
return False
def index_has_accepted_children(self, row, parent):
"""returns True if the given row or its children should be accepted"""
source_model = self.sourceModel()
model_index = source_model.index(row, 0, parent)
if model_index.isValid():
for idx in range(source_model.rowCount(model_index)):
if self.filter_accepts_row_self(idx, model_index):
return True
if self.index_has_accepted_children(idx, model_index):
return True
return False
def filter_accepts_row_self(self, row, parent):
"""returns True if the given row should be accepted"""
if self.query == "":
return True
source_model = self.sourceModel()
for column in (
CapaExplorerDataModel.COLUMN_INDEX_RULE_INFORMATION,
CapaExplorerDataModel.COLUMN_INDEX_VIRTUAL_ADDRESS,
CapaExplorerDataModel.COLUMN_INDEX_DETAILS,
):
index = source_model.index(row, column, parent)
data = source_model.data(index, Qt.DisplayRole)
if not data:
continue
if not isinstance(data, str):
# sanity check: should already be a string, but double check
continue
if self.query in data:
return True
return False
def set_query(self, query):
self.query = query
self.invalidateFilter()
def reset_query(self):
self.set_query("")
|
Python
| 0
|
@@ -566,16 +566,26 @@
icense.%0A
+import six
%0Afrom Py
@@ -7431,18 +7431,31 @@
(data, s
-tr
+ix.string_types
):%0A
@@ -7581,16 +7581,32 @@
uery
- in data
+.lower() in data.lower()
:%0A
|
5826f791e824b7aa0b38b76570212b7de6e5d1e2
|
Index descriptor_data as Text field in ES
|
resolwe/flow/elastic_indexes/collection.py
|
resolwe/flow/elastic_indexes/collection.py
|
"""Elastic Search indexes for Collection model."""
import elasticsearch_dsl as dsl
from resolwe.elastic.fields import Name
from resolwe.elastic.indices import BaseIndex
from resolwe.flow.utils import dict_dot, iterate_schema
from ..models import Collection
from .base import BaseDocument, BaseIndexMixin
class CollectionDocument(BaseDocument):
"""Document for collection search."""
# Data values extracted from the descriptor.
descriptor_data = Name(multi=True)
tags = dsl.Keyword(multi=True)
class Meta:
"""Meta class for collection search document."""
index = 'collection'
class CollectionIndexMixin:
"""Mixin for indices for collection objects used in ``CollectionDocument``."""
def extract_descriptor(self, obj):
"""Extract data from the descriptor."""
if not obj.descriptor_schema:
return []
descriptor = []
for _, _, path in iterate_schema(obj.descriptor, obj.descriptor_schema.schema):
try:
value = dict_dot(obj.descriptor, path)
except KeyError:
continue
if not isinstance(value, list):
value = [value]
for item in value:
if not isinstance(item, (int, bool, float, str)):
continue
descriptor.append('{}'.format(item))
return descriptor
def get_descriptor_data_value(self, obj):
"""Extract data from the descriptors."""
return self.extract_descriptor(obj)
class CollectionIndex(BaseIndexMixin, CollectionIndexMixin, BaseIndex):
"""Index for collection objects used in ``CollectionDocument``."""
queryset = Collection.objects.all().prefetch_related(
'descriptor_schema',
'contributor'
)
object_type = Collection
document_class = CollectionDocument
|
Python
| 0
|
@@ -81,48 +81,8 @@
sl%0A%0A
-from resolwe.elastic.fields import Name%0A
from
@@ -419,12 +419,16 @@
a =
-Name
+dsl.Text
(mul
|
c7b79f1bcd3616b7d780b8a99ae2fbea7d61886e
|
Check for malformed output path
|
cclib/scripts/ccframe.py
|
cclib/scripts/ccframe.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2019, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Script for writing data tables from computational chemistry files."""
import argparse
import os.path
import sys
from cclib.io import ccopen
from cclib.io import ccframe
from cclib.parser.utils import find_package as _find_package
_has_pandas = _find_package("pandas")
if _has_pandas:
import pandas as pd
def process_logfiles(filenames, output, identifier):
df = ccframe([ccopen(path) for path in filenames])
if output is not None:
outputtype = os.path.splitext(os.path.basename(output))[1][1:]
if outputtype in {'csv'}:
df.to_csv(output)
elif outputtype in {'h5', 'hdf', 'hdf5'}:
df.to_hdf(output, key=identifier)
elif outputtype in {'json'}:
df.to_json(output)
elif outputtype in {'pickle', 'pkl'}:
df.to_pickle(output)
elif outputtype in {'xlsx'}:
writer = pd.ExcelWriter(output)
# This overwrites previous sheets
# (see https://stackoverflow.com/a/42375263/4039050)
df.to_excel(writer, sheet_name=identifier)
writer.save()
else:
print(df)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-O', '--output',
help=('the output document to write, including an '
'extension supported by pandas '
'(csv, h5/hdf/hdf5, json, pickle/pkl, xlsx)'))
parser.add_argument('compchemlogfiles', metavar='compchemlogfile',
nargs='+',
help=('one or more computational chemistry output '
'files to parse and convert'))
parser.add_argument('--identifier',
default='logfiles',
help=('name of sheet which will contain DataFrame, if '
'writing to an Excel file, or identifier for '
'the group in HDFStore, if writing a HDF file'))
args = parser.parse_args()
process_logfiles(args.compchemlogfiles, args.output, args.identifier)
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -733,16 +733,219 @@
)%5B1%5D%5B1:%5D
+%0A if not outputtype:%0A raise RuntimeWarning(%0A %22The output type could not be determined from the given path, %22%0A %22not writing DataFrame to disk%22%0A )
%0A%0A
|
ec6f54e52050cecb6bf88b3cfcaa6957f9ea0c60
|
Fix unicode handling in selenium var templates
|
bzt/resources/selenium_taurus_extras.py
|
bzt/resources/selenium_taurus_extras.py
|
"""
Copyright 2018 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from string import Template as StrTemplate
from selenium.common.exceptions import NoSuchWindowException
class Apply(StrTemplate):
def __init__(self, template):
super(Apply, self).__init__(template)
self.variables = {}
def __repr__(self):
return repr(self.safe_substitute(self.variables))
def __str__(self):
return self.safe_substitute(self.variables)
class Template:
def __init__(self, variables):
self.variables = variables
self.tmpl = Apply("")
def apply(self, template):
self.tmpl.template = template
self.tmpl.variables = self.variables
return str(self.tmpl)
class WindowManager:
def __init__(self, driver):
self.driver = driver
self.windows = {}
def switch(self, window_name=None):
try:
if not window_name: # Switch to last window created
self.driver.switch_to.window(self.driver.window_handles[-1])
else:
if window_name.isdigit(): # Switch to window handler index
self._switch_by_idx(int(window_name))
else:
if window_name.startswith("win_ser_"): # Switch using window sequential mode
self._switch_by_win_ser(window_name)
else: # Switch using window name
self.driver.switch_to.window(window_name)
except NoSuchWindowException:
raise NoSuchWindowException("Invalid Window ID: %s" % window_name)
def _switch_by_idx(self, win_index):
wnd_handlers = self.driver.window_handles
if len(wnd_handlers) <= win_index and win_index >= 0:
self.driver.switch_to.window(wnd_handlers[win_index])
else:
raise NoSuchWindowException("Invalid Window ID: %s" % str(win_index))
def _switch_by_win_ser(self, window_name):
if window_name == "win_ser_local":
wnd_handlers = self.driver.window_handles
if len(wnd_handlers) > 0:
self.driver.switch_to.window(wnd_handlers[0])
else:
raise NoSuchWindowException("Invalid Window ID: %s" % window_name)
else:
if window_name not in self.windows:
self.windows[window_name] = self.driver.window_handles[-1]
self.driver.switch_to.window(self.windows[window_name])
def close(self, window_name=None):
if window_name:
self.switch(window_name)
self.driver.close()
|
Python
| 0.000013
|
@@ -556,17 +556,16 @@
se.%0A%22%22%22%0A
-%0A
from str
@@ -660,16 +660,46 @@
ception%0A
+from bzt.six import text_type%0A
%0A%0Aclass
@@ -1235,19 +1235,25 @@
return
-str
+text_type
(self.tm
|
8d26b016e754f0927e1cb09dec226a7ee18bdc4b
|
fix .pretty_zip_size()
|
calaccess_processed/models/tracking.py
|
calaccess_processed/models/tracking.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Models for tracking processing of CAL-ACCESS snapshots over time.
"""
from __future__ import unicode_literals
from django.db import models
from hurry.filesize import size as sizeformat
from django.utils.encoding import python_2_unicode_compatible
from calaccess_processed import archive_directory_path
@python_2_unicode_compatible
class ProcessedDataVersion(models.Model):
"""
A version of CAL-ACCESS processed data.
"""
raw_version = models.OneToOneField(
'calaccess_raw.RawDataVersion',
related_name='processed_version',
verbose_name='raw data version',
help_text='Foreign key referencing the raw data version processed'
)
process_start_datetime = models.DateTimeField(
null=True,
verbose_name='date and time processing started',
help_text='Date and time when the processing of the CAL-ACCESS version'
' started',
)
process_finish_datetime = models.DateTimeField(
null=True,
verbose_name='date and time update finished',
help_text='Date and time when the processing of the CAL-ACCESS version'
' finished',
)
zip_archive = models.FileField(
blank=True,
max_length=255,
upload_to=archive_directory_path,
verbose_name='cleaned files zip archive',
help_text='An archive zip of processed files'
)
zip_size = models.BigIntegerField(
null=True,
verbose_name='zip of size (in bytes)',
help_text='The expected size (in bytes) of the zip of processed files'
)
class Meta:
"""
Meta model options.
"""
app_label = 'calaccess_processed'
verbose_name = 'TRACKING: CAL-ACCESS processed data version'
ordering = ('-process_start_datetime',)
get_latest_by = 'process_start_datetime'
def __str__(self):
return str(self.raw_version.release_datetime)
@property
def update_completed(self):
"""
Check if the database update to the version completed.
Return True or False.
"""
if self.process_finish_datetime:
is_completed = True
else:
is_completed = False
return is_completed
@property
def update_stalled(self):
"""
Check if the database update to the version started but did not complete.
Return True or False.
"""
if self.process_start_datetime and not self.update_finish_datetime:
is_stalled = True
else:
is_stalled = False
return is_stalled
def pretty_zip_size(self):
"""
Returns a prettified version (e.g., "725M") of the zip's size.
"""
if not self.zip_size:
return None
return sizeformat(self.clean_zip_size)
pretty_zip_size.short_description = 'processed zip size'
pretty_zip_size.admin_order_field = 'processed zip size'
@python_2_unicode_compatible
class ProcessedDataFile(models.Model):
"""
A data file included in a processed version of CAL-ACCESS.
"""
version = models.ForeignKey(
'ProcessedDataVersion',
on_delete=models.CASCADE,
related_name='files',
verbose_name='processed data version',
help_text='Foreign key referencing the processed version of CAL-ACCESS'
)
file_name = models.CharField(
max_length=100,
verbose_name='processed data file name',
help_text='Name of the processed data file without extension',
)
process_start_datetime = models.DateTimeField(
null=True,
verbose_name='date and time processing started',
help_text='Date and time when the processing of the file started',
)
process_finish_datetime = models.DateTimeField(
null=True,
verbose_name='date and time processing finished',
help_text='Date and time when the processing of the file finished',
)
records_count = models.IntegerField(
null=False,
default=0,
verbose_name='clean records count',
help_text='Count of records in the processed file'
)
file_archive = models.FileField(
blank=True,
max_length=255,
upload_to=archive_directory_path,
verbose_name='archive of processed file',
help_text='An archive of the processed file'
)
file_size = models.BigIntegerField(
null=False,
default=0,
verbose_name='size of processed data file (in bytes)',
help_text='Size of the processed file (in bytes)'
)
class Meta:
"""
Meta model options.
"""
app_label = 'calaccess_processed'
unique_together = (('version', 'file_name'),)
verbose_name = 'TRACKING: processed CAL-ACCESS data file'
ordering = ('-version_id', 'file_name',)
def __str__(self):
return self.file_name
def pretty_file_size(self):
"""
Returns a prettified version (e.g., "725M") of the processed file's size.
"""
return sizeformat(self.file_size)
pretty_file_size.short_description = 'processed file size'
pretty_file_size.admin_order_field = 'processed file size'
|
Python
| 0.000002
|
@@ -2874,14 +2874,8 @@
elf.
-clean_
zip_
|
7381f177f392b699eed3d93f2e36b7fa39d33ad0
|
remove unused import
|
build_and_push_images.py
|
build_and_push_images.py
|
#!/usr/bin/env python
import argparse
IMAGES = [
dict(image_name='geometalab/osmaxx-mediator', dockerfile='Dockerfile.mediator'),
dict(image_name='geometalab/osmaxx-worker', dockerfile='Dockerfile.worker'),
dict(image_name='geometalab/osmaxx-frontend', dockerfile='Dockerfile.frontend'),
]
def docker_build(dockerfile, image_name, release, location='.'):
subprocess.check_call(['docker', 'build', '-f', dockerfile, '-t', '{}:{}'.format(image_name, release), location])
def docker_push(release, image_name, *args, **kwargs):
subprocess.check_call(['docker', 'push', '{}:{}'.format(image_name, release)])
if __name__ == '__main__':
import subprocess
release = subprocess.check_output(["git", "describe"]).strip().decode()
for image in IMAGES:
docker_build(release=release, **image)
for image in IMAGES:
docker_push(release=release, **image)
print(release, ' has been pushed, you can now use that in your deployment!')
|
Python
| 0.000001
|
@@ -19,25 +19,8 @@
hon%0A
-import argparse%0A%0A
IMAG
|
674bb111c11bfcfcbd00f4527e35f1c87312d738
|
Append common VS directory to the path to fix build on x86
|
cerbero/utils/msbuild.py
|
cerbero/utils/msbuild.py
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import _winreg as winreg
from cerbero.config import Architecture
from cerbero.utils import fix_winpath, shell
class MSBuild(object):
def __init__(self, solution, arch=Architecture.X86, config='Release',
sdk='Windows7.1SDK', **properties):
self.properties = {}
if arch == Architecture.X86:
self.properties['Platform'] = 'Win32'
elif arch == Architecture.X86_64:
self.properties['Platform'] = 'x64'
self.properties['Config'] = config
self.properties['PlatformToolset'] = sdk
self.properties.update(properties)
self.solution = solution
def build(self):
self._call('build')
@staticmethod
def get_msbuild_tools_path():
reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
key = winreg.OpenKey(reg,
r"SOFTWARE\Microsoft\MSBuild\ToolsVersions\4.0")
path = winreg.QueryValueEx(key, 'MSBuildToolsPath')[0]
return fix_winpath(path)
def _call(self, command):
properties = self._format_properties()
msbuildpath = self.get_msbuild_tools_path()
shell.call('msbuild.exe %s %s /target:%s' %
(self.solution, properties, command), msbuildpath)
def _format_properties(self):
props = ['/property:%s=%s' % (k, v) for k, v in
self.properties.iteritems()]
return ' '.join(props)
|
Python
| 0
|
@@ -1922,136 +1922,709 @@
-def _call(self, command):%0D%0A properties = self._format_properties()%0D%0A msbuildpath = self.get_msbuild_tools_path()%0D%0A
+@staticmethod%0D%0A def get_vs_path():%0D%0A reg = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)%0D%0A key = winreg.OpenKey(reg,%0D%0A r%22SOFTWARE%5CMicrosoft%5CVisualStudio%5CSxS%5CVS7%22)%0D%0A path = winreg.QueryValueEx(key, '10.0')%5B0%5D%0D%0A path = path.rsplit('%5C%5C', 1)%5B0%5D + '%5C%5CCommon7%5C%5CIDE'%0D%0A return fix_winpath(path)%0D%0A%0D%0A def _call(self, command):%0D%0A properties = self._format_properties()%0D%0A msbuildpath = self.get_msbuild_tools_path()%0D%0A vs_path = self.get_vs_path()%0D%0A old_path = os.environ%5B'PATH'%5D%0D%0A if self.properties%5B'Platform'%5D == 'Win32':%0D%0A os.environ%5B'PATH'%5D = os.environ%5B'PATH'%5D + ':' + vs_path%0D%0A try:%0D%0A
@@ -2672,16 +2672,20 @@
:%25s' %25%0D%0A
+
@@ -2742,24 +2742,85 @@
sbuildpath)%0D
+%0A finally:%0D%0A os.environ%5B'PATH'%5D = old_path%0D
%0A%0D%0A def _
|
7ca3308ced87a51ac073e50229d15b0784f5aed7
|
Update chainerx/_docs/device.py
|
chainerx/_docs/device.py
|
chainerx/_docs/device.py
|
import chainerx
from chainerx import _docs
def _set_docs_device():
Device = chainerx.Device
_docs.set_doc(
Device,
"""Represents a physical computing unit.
""")
_docs.set_doc(
Device.synchronize,
"""Synchronizes the device.
""")
_docs.set_doc(
Device.name,
"""Device name.
Returns:
str: Device name.
""")
_docs.set_doc(
Device.backend,
"""Backend to which this device belongs.
Returns:
~chainerx.Backend: Backend object.
""")
_docs.set_doc(
Device.context,
"""Context to which this device belongs.
Returns:
~chainerx.Context: Context object.
""")
_docs.set_doc(
Device.index,
"""Index of this device.
Returns:
int: Index of this device.
""")
def set_docs():
_set_docs_device()
_docs.set_doc(
chainerx.get_device,
"""get_device(*device)
Returns a device specified by the argument.
If the argument is a :class:`~chainerx.Device` instance, it's simply returned.
Otherwise, there are three ways to specify a device:
.. testcode::
# Specify backend name and device index separately.
chainerx.get_device('native', 0)
# Specify backend name and device index in a single string.
chainerx.get_device('native:0')
# Specify only backend name. In this case device index 0 is chosen.
chainerx.get_device('native')
Returns:
~chainerx.Device: Device object.
""")
_docs.set_doc(
chainerx.get_default_device,
"""get_default_device()
Returns the default device associated with the current thread.
Returns:
~chainerx.Device: The default device.
.. seealso::
* :func:`chainerx.set_default_device`
* :func:`chainerx.device_scope`
""")
_docs.set_doc(
chainerx.set_default_device,
"""set_default_device(device)
Sets a given device as the default device of the current thread.
Args:
device (~chainerx.Device or str): Device object or device name to set as
the default device.
.. seealso::
* :func:`chainerx.get_default_device`
* :func:`chainerx.device_scope`
""")
_docs.set_doc(
chainerx.device_scope,
"""device_scope(device)
Creates a context manager to temporarily set the default device.
Args:
device (~chainerx.Device or str): Device object or device name to set as
the default device during the context.
.. seealso::
* :func:`chainerx.get_default_device`
* :func:`chainerx.set_default_device`
""")
|
Python
| 0
|
@@ -950,16 +950,17 @@
argument
+s
.%0A%0AIf th
|
81787c22af5fa5da2a679898b118bce6235e83f4
|
Fix authorize.net for python3
|
cartridge/shop/payment/authorizenet.py
|
cartridge/shop/payment/authorizenet.py
|
from __future__ import unicode_literals
from future.builtins import str
try:
from urllib.request import Request, urlopen
from urllib.error import URLError
except ImportError:
from urllib2 import Request, urlopen, URLError
from django.core.exceptions import ImproperlyConfigured
from django.utils.http import urlencode
from mezzanine.conf import settings
from cartridge.shop.checkout import CheckoutError
AUTH_NET_LIVE = 'https://secure.authorize.net/gateway/transact.dll'
AUTH_NET_TEST = 'https://test.authorize.net/gateway/transact.dll'
try:
AUTH_NET_LOGIN = settings.AUTH_NET_LOGIN
AUTH_NET_TRANS_KEY = settings.AUTH_NET_TRANS_KEY
except AttributeError:
raise ImproperlyConfigured("You need to define AUTH_NET_LOGIN and "
"AUTH_NET_TRANS_KEY in your settings module "
"to use the authorizenet payment processor.")
def process(request, order_form, order):
"""
Raise cartridge.shop.checkout.CheckoutError("error message") if
payment is unsuccessful.
"""
trans = {}
amount = order.total
trans['amount'] = amount
if settings.DEBUG:
trans['connection'] = AUTH_NET_TEST
else:
trans['connection'] = AUTH_NET_LIVE
trans['authorize_only'] = False
trans['configuration'] = {
'x_login': AUTH_NET_LOGIN,
'x_tran_key': AUTH_NET_TRANS_KEY,
'x_version': '3.1',
'x_relay_response': 'FALSE',
'x_test_request': 'FALSE',
'x_delim_data': 'TRUE',
'x_delim_char': '|',
# could be set to AUTH_ONLY to only authorize but not capture payment
'x_type': 'AUTH_CAPTURE',
'x_method': 'CC',
}
data = order_form.cleaned_data
trans['custBillData'] = {
'x_first_name': data['billing_detail_first_name'],
'x_last_name': data['billing_detail_last_name'],
'x_address': data['billing_detail_street'],
'x_city': data['billing_detail_city'],
'x_state': data['billing_detail_state'],
'x_zip': data['billing_detail_postcode'],
'x_country': data['billing_detail_country'],
'x_phone': data['billing_detail_phone'],
'x_email': data['billing_detail_email'],
}
trans['custShipData'] = {
'x_ship_to_first_name': data['shipping_detail_first_name'],
'x_ship_to_last_name': data['shipping_detail_last_name'],
'x_ship_to_address': data['shipping_detail_street'],
'x_ship_to_city': data['shipping_detail_city'],
'x_ship_to_state': data['shipping_detail_state'],
'x_ship_to_zip': data['shipping_detail_postcode'],
'x_ship_to_country': data['shipping_detail_country'],
}
trans['transactionData'] = {
'x_amount': amount,
'x_card_num': data['card_number'],
'x_exp_date': '{month}/{year}'.format(month=data['card_expiry_month'],
year=data['card_expiry_year']),
'x_card_code': data['card_ccv'],
'x_invoice_num': str(order.id)
}
part1 = urlencode(trans['configuration']) + "&"
part2 = "&" + urlencode(trans['custBillData'])
part3 = "&" + urlencode(trans['custShipData'])
trans['postString'] = (part1 + urlencode(trans['transactionData']) +
part2 + part3)
request_args = {"url": trans['connection'], "data": trans['postString']}
try:
all_results = urlopen(Request(**request_args)).read()
except URLError:
raise CheckoutError("Could not talk to authorize.net payment gateway")
parsed_results = all_results.split(trans['configuration']['x_delim_char'])
# response and response_reason_codes with their meaning here:
# http://www.authorize.net/support/merchant/Transaction_Response/
# Response_Reason_Codes_and_Response_Reason_Text.htm
# not exactly sure what the reason code is
response_code = parsed_results[0]
# reason_code = parsed_results[1]
# response_reason_code = parsed_results[2]
# response_text = parsed_results[3]
# transaction_id = parsed_results[6]
success = response_code == '1'
if not success:
raise CheckoutError("Transaction declined: " + parsed_results[2])
return parsed_results[6]
|
Python
| 0.000002
|
@@ -3393,16 +3393,32 @@
String'%5D
+.encode('utf-8')
%7D%0A tr
@@ -3616,16 +3616,32 @@
results.
+decode('utf-8').
split(tr
|
9a9d968f979dd12ee2bf4b1922aa1e0d70d27974
|
switch to cmake
|
packages/dependencies/libogg.py
|
packages/dependencies/libogg.py
|
{
'repo_type' : 'git',
'url' : 'https://github.com/xiph/ogg.git',
# 'folder_name' : 'ogg-1.3.2',
'configure_options' : '{autoconf_prefix_options}',
'_info' : { 'version' : None, 'fancy_name' : 'ogg' },
}
|
Python
| 0.000001
|
@@ -66,36 +66,62 @@
',%0A%09
-# 'folder_name' : 'ogg-1.3.2
+'conf_system' : 'cmake',%0A%09'source_subfolder' : '_build
',%0A%09
@@ -147,17 +147,17 @@
: '
-%7Bautoconf
+.. %7Bcmake
_pre
@@ -168,16 +168,104 @@
options%7D
+ -DCMAKE_INSTALL_PREFIX=%7Btarget_prefix%7D -DBUILD_SHARED_LIBS=0 -DCMAKE_BUILD_TYPE=Release
',%0A%09'_in
|
58c442c965e66934a39ca69f15fd6d8ae3280f93
|
Add HTTPError handling when Reimbursement URL doesn't exist
|
rosie/rosie/chamber_of_deputies/adapter.py
|
rosie/rosie/chamber_of_deputies/adapter.py
|
import logging
import os
from datetime import date
from pathlib import Path
from re import match
import numpy as np
import pandas as pd
from serenata_toolbox.chamber_of_deputies.reimbursements import Reimbursements
from serenata_toolbox.datasets import fetch
class Adapter:
STARTING_YEAR = 2009
COMPANIES_DATASET = '2016-09-03-companies.xz'
REIMBURSEMENTS_PATTERN = r'reimbursements-\d{4}\.csv'
RENAME_COLUMNS = {
'subquota_description': 'category',
'total_net_value': 'net_value',
'cnpj_cpf': 'recipient_id',
'supplier': 'recipient'
}
DTYPE = {
'applicant_id': np.str,
'cnpj_cpf': np.str,
'congressperson_id': np.str,
'subquota_number': np.str
}
def __init__(self, path):
self.path = path
self.log = logging.getLogger(__name__)
@property
def dataset(self):
self.update_datasets()
df = self.reimbursements.merge(
self.companies,
how='left',
left_on='cnpj_cpf',
right_on='cnpj'
)
self.prepare_dataset(df)
self.log.info('Dataset ready! Rosie starts her analysis now :)')
return df
@property
def companies(self):
self.log.info('Loading companies')
path = Path(self.path) / self.COMPANIES_DATASET
df = pd.read_csv(path, dtype={'cnpj': np.str}, low_memory=False)
df['cnpj'] = df['cnpj'].str.replace(r'\D', '')
return df
@property
def reimbursements(self):
df = pd.DataFrame()
paths = (
str(path) for path in Path(self.path).glob('*.csv')
if match(self.REIMBURSEMENTS_PATTERN, path.name)
)
for path in paths:
self.log.info(f'Loading reimbursements from {path}')
year_df = pd.read_csv(path, dtype=self.DTYPE, low_memory=False)
df = df.append(year_df)
return df
def update_datasets(self):
self.update_companies()
self.update_reimbursements()
def update_companies(self):
self.log.info('Updating companies')
os.makedirs(self.path, exist_ok=True)
fetch(self.COMPANIES_DATASET, self.path)
def update_reimbursements(self, years=None):
if not years:
next_year = date.today().year + 1
years = range(self.STARTING_YEAR, next_year)
for year in years:
self.log.info(f'Updating reimbursements from {year}')
Reimbursements(year, self.path)()
def prepare_dataset(self, df):
self.rename_categories(df)
self.coerce_dates(df)
self.rename_columns(df)
def rename_columns(self, df):
self.log.info('Renaming columns to Serenata de Amor standard')
df.rename(columns=self.RENAME_COLUMNS, inplace=True)
def rename_categories(self, df):
self.log.info('Categorizing reimbursements')
# There's no documented type for `3`, `4` and `5`, thus we assume it's
# an input error until we hear back from Chamber of Deputies
types = ('bill_of_sale', 'simple_receipt', 'expense_made_abroad')
converters = {number: None for number in range(3, 6)}
df['document_type'].replace(converters, inplace=True)
df['document_type'] = df['document_type'].astype('category')
df['document_type'].cat.rename_categories(types, inplace=True)
# Some classifiers expect a more broad category name for meals
rename = {'Congressperson meal': 'Meal'}
df['subquota_description'] = df['subquota_description'].replace(rename)
df['is_party_expense'] = df['congressperson_id'].isnull()
def coerce_dates(self, df):
for field, fmt in (('issue_date', '%Y-%m-%d'), ('situation_date', '%d/%m/%Y')):
self.log.info(f'Coercing {field} column to date data type')
df[field] = pd.to_datetime(df[field], format=fmt, errors='coerce')
|
Python
| 0
|
@@ -129,16 +129,51 @@
as as pd
+%0Afrom urllib.error import HTTPError
%0A%0Afrom s
@@ -2503,24 +2503,45 @@
om %7Byear%7D')%0A
+ try:%0A
@@ -2573,16 +2573,154 @@
.path)()
+%0A except HTTPError as e:%0A self.log.error(f'Could not update Reimbursement from year %7Byear%7D: %7Be%7D - %7Be.filename%7D')
%0A%0A de
|
5d136b1fc8d2d4945352e7ee9e6d25ebd2190e56
|
rename tags to subjects to better match schema
|
scrapi/base/schemas.py
|
scrapi/base/schemas.py
|
from __future__ import unicode_literals
from dateutil.parser import parse
from .helpers import (
default_name_parser,
oai_extract_url,
# oai_extract_doi,
oai_process_contributors,
compose,
single_result,
format_tags,
language_code
)
CONSTANT = lambda x: lambda *_, **__: x
BASEXMLSCHEMA = {
"description": ('//dc:description/node()', compose(lambda x: x.strip(), single_result)),
"contributors": ('//dc:creator/node()', compose(default_name_parser, lambda x: x.split(';'), single_result)),
"title": ('//dc:title/node()', compose(lambda x: x.strip(), single_result)),
"providerUpdatedDateTime": ('//dc:dateEntry/node()', compose(lambda x: x.strip(), single_result)),
"uris": {
"canonicalUri": ('//dcq:identifier-citation/node()', compose(lambda x: x.strip(), single_result)),
}
}
OAISCHEMA = {
"contributors": ('//dc:creator/node()', '//dc:contributor/node()', oai_process_contributors),
"uris": {
"canonicalUri": ('//dc:identifier/node()', oai_extract_url)
},
'providerUpdatedDateTime': ('//ns0:header/ns0:datestamp/node()', lambda x: parse(x[0]).replace(tzinfo=None).isoformat()),
'title': ('//dc:title/node()', single_result),
'description': ('//dc:description/node()', single_result),
'tags': ('//dc:subject/node()', format_tags),
'publisher': {
'name': ('//dc:publisher/node()', single_result)
},
'languages': ('//dc:language', compose(lambda x: [x], language_code, single_result))
}
|
Python
| 0
|
@@ -1288,19 +1288,23 @@
),%0A '
-tag
+subject
s': ('//
|
076d3831aa04941e6ae8d36dc95f03269a05436f
|
Fix python formatting issues.
|
backend/mcapi/machine.py
|
backend/mcapi/machine.py
|
from mcapp import app
from decorators import crossdomain, apikey, jsonp
from flask import request, g
import error
import rethinkdb as r
import dmutil
import json
import args
import access
@app.route('/machines', methods=['GET'])
@jsonp
def get_all_machines():
rr = r.table('machines').order_by(r.desc('birthtime'))
selection = list(rr.run(g.conn, time_format='raw'))
return args.json_as_format_arg(selection)
@app.route('/machines/<machine_id>', methods=['GET'])
@jsonp
def get_machine(machine_id):
return dmutil.get_single_from_table('machines', machine_id)
@app.route('/machines/new', methods=['POST'])
@apikey
@crossdomain(origin='*')
def create_machine():
j = request.get_json()
machine = dict()
machine['additional'] = dmutil.get_required('additional', j)
machine['name'] = dmutil.get_required('Name', j)
machine['notes'] = dmutil.get_required('Notes', j)
machine['birthtime'] = r.now()
return dmutil.insert_entry('machines', machine)
@app.route('/materials', methods=['GET'])
@apikey(shared=True)
@jsonp
def get_all_materials():
rr = r.table('materials').order_by(r.desc('birthtime'))
selection = list(rr.run(g.conn, time_format='raw'))
return args.json_as_format_arg(selection)
@app.route('/materials/new', methods=['POST'])
@apikey
@crossdomain(origin='*')
def create_material():
j = request.get_json()
material = dict()
user = access.get_user()
material['name'] = dmutil.get_required('name', j)
material['alloy'] = dmutil.get_required('alloy', j)
material['notes'] = dmutil.get_required('notes', j)
material['model'] = dmutil.get_required('model', j)
material['birthtime'] = r.now()
material['created_by'] = user
material['treatments_order'] = dmutil.get_optional('treatments_order',j)
material['treatments'] = dmutil.get_optional('treatments', j)
return dmutil.insert_entry('materials', material)
@app.route('/materials/<material_id>', methods=['GET'])
@jsonp
def get_material(material_id):
return dmutil.get_single_from_table('materials', material_id)
|
Python
| 0.000003
|
@@ -98,21 +98,8 @@
, g%0A
-import error%0A
impo
@@ -134,20 +134,8 @@
til%0A
-import json%0A
impo
@@ -392,16 +392,17 @@
ction)%0A%0A
+%0A
@app.rou
@@ -1766,16 +1766,17 @@
_order',
+
j)%0A m
|
520d7f4d00f58ad1f6367ea627d9de074aacf8d5
|
Fix redirect to checkout
|
satchless/contrib/checkout/common/views.py
|
satchless/contrib/checkout/common/views.py
|
# -*- coding:utf-8 -*-
try:
from functools import wraps
except ImportError:
from django.utils.functional import wraps # Python 2.4 fallback.
from django.shortcuts import redirect
from django.template.response import TemplateResponse
from django.utils.decorators import available_attrs
from django.views.decorators.http import require_POST
from ....cart.models import Cart
from ....order import handler
from ....order import models
from ....order import signals
from ....payment import PaymentFailure, ConfirmationFormNeeded
def require_order(status=None):
def decorator(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
order = None
if 'order_token' in kwargs:
try:
order = models.Order.objects.get(token=kwargs['order_token'],
status=status)
except models.Order.DoesNotExist:
pass
if not order:
return redirect('satchless-cart-view')
elif status is not None and status != order.status:
if order.status == 'checkout':
return redirect('checkout')
elif order.status == 'payment-pending':
return redirect(confirmation)
else:
return redirect('satchless-order-view',
order_token=order.token)
request.order = order
return view_func(request, *args, **kwargs)
return _wrapped_view
return decorator
@require_POST
def prepare_order(request, typ):
cart = Cart.objects.get_or_create_from_request(request, typ)
order_pk = request.session.get('satchless_order')
previous_orders = models.Order.objects.filter(pk=order_pk, cart=cart,
status='checkout')
try:
order = previous_orders.get()
except models.Order.DoesNotExist:
try:
order = models.Order.objects.get_from_cart(cart)
except models.EmptyCart:
return redirect('satchless-cart-view', typ=typ)
request.session['satchless_order'] = order.pk
return redirect('satchless-checkout', order_token=order.token)
@require_POST
@require_order(status='payment-failed')
def reactivate_order(request, order_token):
order = request.order
order.set_status('checkout')
return redirect('satchless-checkout', order_token=order.token)
@require_order(status='payment-pending')
def confirmation(request, order_token):
"""
Checkout confirmation
The final summary, where user is asked to review and confirm the order.
Confirmation will redirect to the payment gateway.
"""
order = request.order
if not request.order:
return redirect('satchless-checkout', order_token=order.token)
signals.order_pre_confirm.send(sender=models.Order, instance=order,
request=request)
try:
handler.confirm(order)
except ConfirmationFormNeeded, e:
return TemplateResponse(request, 'satchless/checkout/confirmation.html', {
'formdata': e,
'order': order,
})
except PaymentFailure:
order.set_status('payment-failed')
else:
order.set_status('payment-complete')
return redirect('satchless-order-view', order_token=order.token)
|
Python
| 0
|
@@ -1247,25 +1247,95 @@
direct('
-checkout'
+satchless-checkout',%0A order_toke=order.token
)%0A
|
854c9e4843ebb5e2e4e95359cab2bb1f58f90b04
|
Fix RepositoryEnv logic
|
decouple.py
|
decouple.py
|
# coding: utf-8
import os
import sys
# Useful for very coarse version differentiation.
PY3 = sys.version_info[0] == 3
if PY3:
from configparser import ConfigParser
else:
from ConfigParser import SafeConfigParser as ConfigParser
class UndefinedValueError(Exception):
pass
class Undefined(object):
"""
Class to represent undefined type.
"""
pass
# Reference instance to represent undefined values
undefined = Undefined()
class Config(object):
"""
Handle .env file format used by Foreman.
"""
_BOOLEANS = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def __init__(self, repository):
self.repository = repository
def _cast_boolean(self, value):
"""
Helper to convert config values to boolean as ConfigParser do.
"""
if value.lower() not in self._BOOLEANS:
raise ValueError('Not a boolean: %s' % value)
return self._BOOLEANS[value.lower()]
def get(self, option, default=undefined, cast=undefined):
"""
Return the value for option or default if defined.
"""
if self.repository.has_key(option):
value = self.repository.get(option)
else:
value = default
if isinstance(value, Undefined):
raise UndefinedValueError('%s option not found and default value was not defined.' % option)
if isinstance(cast, Undefined):
cast = lambda v: v # nop
elif cast is bool:
cast = self._cast_boolean
return cast(value)
def __call__(self, *args, **kwargs):
"""
Convenient shortcut to get.
"""
return self.get(*args, **kwargs)
class RepositoryBase(object):
def __init__(self, source):
raise NotImplemented
def has_key(self, key):
raise NotImplemented
def get(self, key):
raise NotImplemented
class RepositoryIni(RepositoryBase):
"""
Retrieves option keys from .ini files.
"""
SECTION = 'settings'
def __init__(self, source):
self.parser = ConfigParser()
self.parser.readfp(open(source))
def has_key(self, key):
return self.parser.has_option(self.SECTION, key)
def get(self, key):
return self.parser.get(self.SECTION, key)
class RepositoryEnv(RepositoryBase):
"""
Retrieves option keys from .env files with fall back to os.env.
"""
def __init__(self, source):
self.data = {}
for line in open(source):
line = line.strip()
if not line or line.startswith('#') or '=' not in line:
continue
k, v = line.split('=', 1)
v = v.strip("'").strip('"')
self.data[k] = v
def has_key(self, key):
return key in self.data or key in os.environ
def get(self, key):
return self.data[key] or os.environ[key]
class RepositoryShell(RepositoryBase):
"""
Retrieves option keys from os.env.
"""
def __init__(self, source=None):
pass
def has_key(self, key):
return key in os.env
def get(self, key):
return os.env[key]
class AutoConfig(object):
"""
Autodetects the config file and type.
"""
SUPPORTED = {
'settings.ini': ConfigIni,
'.env': ConfigEnv,
}
def __init__(self):
self.config = None
def _find_file(self, path):
# look for all files in the current path
for filename in self.SUPPORTED:
file = os.path.join(path, filename)
if os.path.exists(file):
return file
# search the parent
parent = os.path.dirname(path)
if parent and parent != os.path.sep:
return self._find_file(parent)
# reached root without finding any files.
return ''
def _load(self, path):
# Avoid unintended permission errors
try:
file = self._find_file(path)
except:
file = ''
klass = self.SUPPORTED.get(os.path.basename(file))
if not klass:
klass = ConfigShell
self.config = klass(file)
def _caller_path(self):
# MAGIC! Get the caller's module path.
frame = sys._getframe()
path = os.path.dirname(frame.f_back.f_back.f_code.co_filename)
return path
def __call__(self, *args, **kwargs):
if not self.config:
self._load(self._caller_path())
return self.config(*args, **kwargs)
# A pré-instantiated AutoConfig to improve decouple's usability
# now just import config and start using with no configuration.
config = AutoConfig()
|
Python
| 0.000001
|
@@ -2949,21 +2949,25 @@
elf.data
-%5Bkey%5D
+.get(key)
or os.e
|
d19bcad058ba617b9fec91583be0a0f019a7d3dc
|
Replace spaces with underscores in ansible inventories
|
src/mist/io/inventory.py
|
src/mist/io/inventory.py
|
import mist.io.methods
class MistInventory(object):
def __init__(self, user, machines=None):
self.user = user
self.hosts = {}
self.keys = {}
self._cache = {}
self.load(machines)
def load(self, machines=None):
self.hosts = {}
self.keys = {}
if not machines:
machines = [(bid, m['id'])
for bid in self.user.backends
for m in self._list_machines(bid)]
for bid, mid in machines:
try:
name, ip_addr = self.find_machine_details(bid, mid)
key_id, ssh_user, port = self.find_ssh_settings(bid, mid)
except Exception as exc:
print exc
continue
if key_id not in self.keys:
self.keys[key_id] = self.user.keypairs[key_id].private
if name in self.hosts:
num = 2
while ('%s-%d' % (name, num)) in self.hosts:
num += 1
name = '%s-%d' % (name, num)
self.hosts[name] = {
'ansible_ssh_host': ip_addr,
'ansible_ssh_port': port,
'ansible_ssh_user': ssh_user,
'ansible_ssh_private_key_file': 'id_rsa/%s' % key_id,
}
def export(self, include_localhost=True):
ans_inv = ''
if include_localhost:
ans_inv += 'localhost\tansible_connection=local\n\n'
for name, host in self.hosts.items():
vars_part = ' '.join(["%s=%s" % item for item in host.items()])
ans_inv += '%s\t%s\n' % (name, vars_part)
ans_inv += ('\n[all:vars]\n'
'ansible_python_interpreter="/usr/bin/env python2"\n')
ans_cfg = '[defaults]\nhostfile=./inventory\nhost_key_checking=False\n'
files = {'ansible.cfg': ans_cfg, 'inventory': ans_inv}
files.update({'id_rsa/%s' % key_id: private_key
for key_id, private_key in self.keys.items()})
return files
def _list_machines(self, backend_id):
if backend_id not in self._cache:
print 'Actually doing list_machines for %s' % backend_id
machines = mist.io.methods.list_machines(self.user, backend_id)
self._cache[backend_id] = machines
return self._cache[backend_id]
def find_machine_details(self, backend_id, machine_id):
machines = self._list_machines(backend_id)
for machine in machines:
if machine['id'] == machine_id:
name = machine['name']
ips = [ip for ip in machine['public_ips'] if ':' not in ip]
if not name:
name = machine_id
if not ips:
raise Exception('Machine ip not found in list machines')
ip_addr = ips[0] if ips else ''
return name, ip_addr
raise Exception('Machine not found in list_machines')
def find_ssh_settings(self, backend_id, machine_id):
assocs = []
for key_id, keypair in self.user.keypairs.items():
for assoc in keypair.machines:
if [backend_id, machine_id] == assoc[:2]:
assocs.append({
'key_id': key_id,
'last': assoc[2] if len(assoc) > 2 else 0,
'user': assoc[3] if len(assoc) > 3 else '',
'port': assoc[5] if len(assoc) > 5 else 22,
})
if not assocs:
raise Exception("Machine doesn't have SSH association")
assoc = sorted(assocs, key=lambda a: a['last'])[-1]
return assoc['key_id'], assoc['user'] or 'root', assoc['port']
|
Python
| 0.999317
|
@@ -2599,16 +2599,34 @@
%5B'name'%5D
+.replace(' ', '_')
%0A
|
1ebc38cd78a1453ab404714acb73fbe16840d0ed
|
Use `yaml.full_load_all` instead of `yaml.load_all` (#13577)
|
chart/tests/helm_template_generator.py
|
chart/tests/helm_template_generator.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import subprocess
import sys
from functools import lru_cache
from tempfile import NamedTemporaryFile
from typing import Any, Dict, Tuple
import jmespath
import jsonschema
import requests
import yaml
from kubernetes.client.api_client import ApiClient
api_client = ApiClient()
BASE_URL_SPEC = "https://raw.githubusercontent.com/instrumenta/kubernetes-json-schema/master/v1.14.0"
@lru_cache(maxsize=None)
def create_validator(api_version, kind):
api_version = api_version.lower()
kind = kind.lower()
if '/' in api_version:
ext, _, api_version = api_version.partition("/")
ext = ext.split(".")[0]
url = f'{BASE_URL_SPEC}/{kind}-{ext}-{api_version}.json'
else:
url = f'{BASE_URL_SPEC}/{kind}-{api_version}.json'
request = requests.get(url)
request.raise_for_status()
schema = request.json()
jsonschema.Draft7Validator.check_schema(schema)
validator = jsonschema.Draft7Validator(schema)
return validator
def validate_k8s_object(instance):
# Skip PostgresSQL chart
chart = jmespath.search("metadata.labels.chart", instance)
if chart and 'postgresql' in chart:
return
validate = create_validator(instance.get("apiVersion"), instance.get("kind"))
validate.validate(instance)
def render_chart(name="RELEASE-NAME", values=None, show_only=None, validate_schema=True):
"""
Function that renders a helm chart into dictionaries. For helm chart testing only
"""
values = values or {}
with NamedTemporaryFile() as tmp_file:
content = yaml.dump(values)
tmp_file.write(content.encode())
tmp_file.flush()
command = ["helm", "template", name, sys.path[0], '--values', tmp_file.name]
if show_only:
for i in show_only:
command.extend(["--show-only", i])
templates = subprocess.check_output(command)
k8s_objects = yaml.load_all(templates)
k8s_objects = [k8s_object for k8s_object in k8s_objects if k8s_object] # type: ignore
if validate_schema:
for k8s_object in k8s_objects:
validate_k8s_object(k8s_object)
return k8s_objects
def prepare_k8s_lookup_dict(k8s_objects) -> Dict[Tuple[str, str], Dict[str, Any]]:
"""
Helper to create a lookup dict from k8s_objects.
The keys of the dict are the k8s object's kind and name
"""
k8s_obj_by_key = {
(k8s_object["kind"], k8s_object["metadata"]["name"]): k8s_object for k8s_object in k8s_objects
}
return k8s_obj_by_key
def render_k8s_object(obj, type_to_render):
"""
Function that renders dictionaries into k8s objects. For helm chart testing only.
"""
return api_client._ApiClient__deserialize_model(obj, type_to_render) # pylint: disable=W0212
|
Python
| 0.000001
|
@@ -2689,16 +2689,21 @@
= yaml.
+full_
load_all
|
f8f63d4b15ce68797d6e16943bd85efb19a77752
|
Fix recording failure for system pollster
|
ceilometer/hardware/pollsters/system.py
|
ceilometer/hardware/pollsters/system.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.hardware import plugin
from ceilometer.hardware.pollsters import util
from ceilometer import sample
class _Base(plugin.HardwarePollster):
CACHE_KEY = 'system'
class _SystemBase(_Base):
def generate_one_sample(self, host, c_data):
value, metadata, extra = c_data
return util.make_sample_from_host(host,
name=self.IDENTIFIER,
sample_type=self.TYPE,
unit=self.UNIT,
volume=value,
res_metadata=metadata,
extra=extra)
class SystemCpuIdlePollster(_SystemBase):
IDENTIFIER = 'system_stats.cpu.idle'
TYPE = sample.TYPE_GAUGE
UNIT = '%'
class SystemIORawSentPollster(_SystemBase):
IDENTIFIER = 'system_stats.io.outgoing.blocks'
TYPE = sample.TYPE_CUMULATIVE,
UNIT = 'blocks'
class SystemIORawReceivedPollster(_SystemBase):
IDENTIFIER = 'system_stats.io.incoming.blocks'
TYPE = sample.TYPE_CUMULATIVE,
UNIT = 'blocks'
|
Python
| 0.999017
|
@@ -1500,33 +1500,32 @@
.TYPE_CUMULATIVE
-,
%0A UNIT = 'blo
@@ -1663,17 +1663,16 @@
MULATIVE
-,
%0A UNI
|
bf614837f9b05645bdcfb0f524d1d04b69ac49df
|
Add missing explicit cfg option import
|
ceilometer/tests/event/test_endpoint.py
|
ceilometer/tests/event/test_endpoint.py
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2012 New Dream Network, LLC (DreamHost)
#
# Author: Doug Hellmann <doug.hellmann@dreamhost.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for Ceilometer notify daemon."""
import mock
import oslo.messaging
from stevedore import extension
from ceilometer.event import endpoint as event_endpoint
from ceilometer import messaging
from ceilometer.openstack.common.fixture import config
from ceilometer.storage import models
from ceilometer.tests import base as tests_base
TEST_NOTICE_CTXT = {
u'auth_token': u'3d8b13de1b7d499587dfc69b77dc09c2',
u'is_admin': True,
u'project_id': u'7c150a59fe714e6f9263774af9688f0e',
u'quota_class': None,
u'read_deleted': u'no',
u'remote_address': u'10.0.2.15',
u'request_id': u'req-d68b36e0-9233-467f-9afb-d81435d64d66',
u'roles': [u'admin'],
u'timestamp': u'2012-05-08T20:23:41.425105',
u'user_id': u'1e3ce043029547f1a61c1996d1a531a2',
}
TEST_NOTICE_METADATA = {
u'message_id': u'dae6f69c-00e0-41c0-b371-41ec3b7f4451',
u'timestamp': u'2012-05-08 20:23:48.028195',
}
TEST_NOTICE_PAYLOAD = {
u'created_at': u'2012-05-08 20:23:41',
u'deleted_at': u'',
u'disk_gb': 0,
u'display_name': u'testme',
u'fixed_ips': [{u'address': u'10.0.0.2',
u'floating_ips': [],
u'meta': {},
u'type': u'fixed',
u'version': 4}],
u'image_ref_url': u'http://10.0.2.15:9292/images/UUID',
u'instance_id': u'9f9d01b9-4a58-4271-9e27-398b21ab20d1',
u'instance_type': u'm1.tiny',
u'instance_type_id': 2,
u'launched_at': u'2012-05-08 20:23:47.985999',
u'memory_mb': 512,
u'state': u'active',
u'state_description': u'',
u'tenant_id': u'7c150a59fe714e6f9263774af9688f0e',
u'user_id': u'1e3ce043029547f1a61c1996d1a531a2',
u'reservation_id': u'1e3ce043029547f1a61c1996d1a531a3',
u'vcpus': 1,
u'root_gb': 0,
u'ephemeral_gb': 0,
u'host': u'compute-host-name',
u'availability_zone': u'1e3ce043029547f1a61c1996d1a531a4',
u'os_type': u'linux?',
u'architecture': u'x86',
u'image_ref': u'UUID',
u'kernel_id': u'1e3ce043029547f1a61c1996d1a531a5',
u'ramdisk_id': u'1e3ce043029547f1a61c1996d1a531a6',
}
class TestEventEndpoint(tests_base.BaseTestCase):
def setUp(self):
super(TestEventEndpoint, self).setUp()
self.CONF = self.useFixture(config.Config()).conf
self.CONF([])
messaging.setup('fake://')
self.addCleanup(messaging.cleanup)
self.CONF.set_override("connection", "log://", group='database')
self.CONF.set_override("store_events", True, group="notification")
self.mock_dispatcher = mock.MagicMock()
self.endpoint = event_endpoint.EventsNotificationEndpoint()
self.endpoint.dispatcher_manager = \
extension.ExtensionManager.make_test_instance([
extension.Extension('test', None, None, self.mock_dispatcher)
])
self.endpoint.event_converter = mock.MagicMock()
self.endpoint.event_converter.to_event.return_value = mock.MagicMock(
event_type='test.test')
def test_message_to_event(self):
self.endpoint.info(TEST_NOTICE_CTXT, 'compute.vagrant-precise',
'compute.instance.create.end',
TEST_NOTICE_PAYLOAD, TEST_NOTICE_METADATA)
def test_message_to_event_duplicate(self):
self.mock_dispatcher.record_events.return_value = [
(models.Event.DUPLICATE, object())]
message = {'event_type': "foo", 'message_id': "abc"}
self.endpoint.process_notification(message) # Should return silently.
def test_message_to_event_bad_event(self):
self.CONF.set_override("ack_on_event_error", False,
group="notification")
self.mock_dispatcher.record_events.return_value = [
(models.Event.UNKNOWN_PROBLEM, object())]
message = {'event_type': "foo", 'message_id': "abc"}
ret = self.endpoint.process_notification(message)
self.assertEqual(oslo.messaging.NotificationResult.REQUEUE, ret)
|
Python
| 0.00025
|
@@ -734,16 +734,44 @@
t mock%0A%0A
+from oslo.config import cfg%0A
import o
@@ -2801,16 +2801,123 @@
6',%0A%7D%0A%0A%0A
+cfg.CONF.import_opt('store_events', 'ceilometer.notification',%0A group='notification')%0A%0A%0A
class Te
|
cd28805878328e87a4c2f16d5d912a31805de332
|
Add description to IPNetworkField
|
helpers/models.py
|
helpers/models.py
|
from django.db import models
from . import validators
# Create your models here.
class IPNetworkField(models.CharField):
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 18
self.default_validators = [validators.validate_ipv4_network]
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs['max_length']
return name, path, args, kwargs
|
Python
| 0
|
@@ -22,16 +22,72 @@
models%0A
+from django.utils.translation import ugettext_lazy as _%0A
%0A%0Afrom .
@@ -175,16 +175,62 @@
Field):%0A
+ description = _(%22IP address or network%22)%0A%0A
def
|
18618a56ce674c479a0737dcabd4a47913ae2dde
|
Add functionality to copy any missing files to the other folder
|
scripts/compare_dir.py
|
scripts/compare_dir.py
|
import os
dropboxFiles = []
localFiles = []
for dirpath, dirnames, filenames in os.walk(
'/media/itto/TOSHIBA EXT/Photos/Dropbox/ITTO/Southeast Asia 2017'):
dropboxFiles += filenames
for dirpath, dirnames, filenames in os.walk(
'/media/itto/TOSHIBA EXT/Photos/Southeast Asia'):
if ('Process' not in dirpath):
localFiles += filenames
localNotInDropbox = []
for file in localFiles:
if file not in dropboxFiles:
localNotInDropbox.append(file)
print('************')
for file in dropboxFiles:
if file not in localFiles:
print(file)
print(len(localNotInDropbox))
|
Python
| 0
|
@@ -7,37 +7,172 @@
os%0A
-%0AdropboxF
+from shutil import copyfile%0A%0AFOLDER_A = '/media/itto/TOSHIBA EXT/Photos/Southeast Asia'%0AFOLDER_B = '/media/itto/disk/PRIVATE/AVCHD/BDMV/STREAM'%0Af
iles
+_a
= %5B%5D%0A
-localF
+f
iles
+_b
= %5B
@@ -221,101 +221,30 @@
alk(
-%0A '/media/itto/TOSHIBA EXT/Photos/Dropbox/ITTO/Southeast Asia 2017'):%0A dropboxF
+FOLDER_A):%0A f
iles
+_a
+=
@@ -302,157 +302,151 @@
alk(
-%0A '/media/itto/TOSHIBA EXT/Photos/Southeast Asia'):%0A if ('Process' not in dirpath):%0A localFiles += filenames%0A%0AlocalNotInDropbox = %5B%5D
+FOLDER_B):%0A files_b += filenames%0A%0AinA_notB = %5B%5D%0AinB_notA = %5B%5D%0Afor file in files_b:%0A if file not in files_a:%0A inB_notA.append(file)
%0Afor
@@ -450,34 +450,31 @@
for file in
-localF
+f
iles
+_a
:%0A if fil
@@ -478,36 +478,31 @@
file not in
-dropboxF
+f
iles
+_b
:%0A lo
@@ -499,33 +499,24 @@
-localNotInDropbox
+inA_notB
.append(
@@ -525,133 +525,694 @@
le)%0A
+%0A
print('
-************')%0Afor file in dropboxFiles:%0A if file not in localFiles:%0A prin
+%7B%7D in Folder A. %7B%7D in Folder B.'.format(len(files_a), len(files_b)))%0Aprint('In A but not B: %7B%7D'.format(len(inA_notB)))%0Aprint('In B but not A: %7B%7D'.format(len(inB_notA)))%0A%0A%0Adef EnsureFolder(path):%0A if os.path.isdir(path):%0A pass%0A else:%0A # Make folder%0A os.mkdir(path)%0A%0A%0Adef CopyLeftoverFromBToA():%0A for file in inB_notA:%0A EnsureFolder(os.path.join(FOLDER_A, 'transfer'))%0A src = os.path.join(FOLDER_B, file)%0A dst = os.path.join(FOLDER_A, 'transfer', file)%0A if not os.path.exists(dst):%0A print('Copying %7B%7D'.forma
t(file)
-%0A%0Aprint(len(localNotInDropbox
+)%0A copyfile(src, dst)%0A else:%0A print('%7B%7D previously copied'.format(file
))
|
b20a571c83d8c7727e7a8864e12b9ccde80af7cf
|
Update settings.py to use basedir more intelligently.
|
clintools/settings.py
|
clintools/settings.py
|
"""
Django settings for clintools project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
with open('secrets/secret_key.txt') as f:
SECRET_KEY = f.read().strip()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pttrack',
'bootstrap3',
'bootstrap3_datetime',
'djangular',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'clintools.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'clintools.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Chicago'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'static/'
|
Python
| 0
|
@@ -670,16 +670,39 @@
th open(
+os.path.join(BASE_DIR,
'secrets
@@ -718,16 +718,17 @@
ey.txt')
+)
as f:%0A
|
94c0dc5ac2a73b373ca99ed7edca36523cdac230
|
fix assert failure
|
scripts/parse-props.py
|
scripts/parse-props.py
|
#!/usr/bin/env python
# Parse property file.
#
# Usage:
#
# lglaf.py -c '!INFO GPRO \x08\x0b\0\0' > props.bin
# scripts/parse-props.py props.bin
import argparse, sys, struct
def stringify(resp):
if not isinstance(resp, str):
try: resp = resp.decode('ascii')
except: pass
return resp
def get_str(data, shadow, offset):
resp = b''
#while True:
while offset < len(data):
b = data[offset:offset+1]
shadow[offset] = 's'
if b == b'\0':
break
resp += b
offset += 1
return stringify(resp)
def get_chr(data, shadow, offset):
b = data[offset:offset+1]
shadow[offset] = 'c'
return stringify(b)
def get_int(data, shadow, offset):
d = struct.unpack_from('<I', data, offset)[0]
for off in range(offset, offset+4):
shadow[off] = 'd'
return d
# Description of the contents
keys = [
(0x3f9, get_str, "download cable"),
(0x42b, get_int, "battery level"),
(0x010, get_chr, "download type"),
(0x021, get_int, "download speed"),
(0x403, get_str, "usb version"),
(0x417, get_str, "hardware revision"),
(0x029, get_str, "download sw version"),
(0x14f, get_str, "device sw version"),
(0x42f, get_chr, "secure device"),
(0x4e8, get_str, "laf sw version"),
(0x24f, get_str, "device factory version"),
(0x528, get_str, "device factory out version"),
(0x3db, get_str, "pid"),
(0x3c7, get_str, "imei"),
(0x131, get_str, "model name"),
(0x430, get_str, "device build type"),
(0x43a, get_str, "chipset platform"),
(0x44e, get_str, "target_operator"),
(0x462, get_str, "target_country"),
(0x4fc, get_int, "ap_factory_reset_status"),
(0x500, get_int, "cp_factory_reset_status"),
(0x504, get_int, "isDownloadNotFinish"),
(0x508, get_int, "qem"),
(0x628, get_str, "cupss swfv"),
(0x728, get_int, "is one binary dual plan"),
(0x72c, get_int, "memory size"),
(0x730, get_str, "memory_id"),
(0x39f, get_str, "bootloader_ver"),
]
def debug_other(data, shadow):
for offset, shadow_type in enumerate(shadow):
data_byte = data[offset:offset+1]
if not shadow_type and data_byte != b'\0':
print("Unprocessed byte at 0x%03x: %r" % (offset, data_byte))
shadow[offset] = '*'
def print_shadow(shadow):
for offset in range(0, len(shadow), 32):
line1 = ''.join(c or '.' for c in shadow[offset:offset+16])
line2 = ''.join(c or '.' for c in shadow[offset+16:offset+32])
print("%03x: %-16s %-16s" % (offset, line1, line2))
def parse_data(data):
version = struct.unpack_from('<I', data)
expected_length = 0x00000b08
assert version == expected_length, 'Unknown version: 0x%08x' % version
assert len(data) == expected_length
# Set to non-None when processed
shadow = [None] * len(data)
for offset, getter, description in keys:
resp = getter(data, shadow, offset)
print("%-26s = %r" % (description, resp))
return data, shadow
def open_local_readable(path):
if path == '-':
try: return sys.stdin.buffer
except: return sys.stdin
else:
return open(path, "rb")
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action='store_true', help="Enable debug messages")
parser.add_argument("file",
help="2824 byte properties dump file (or '-' for stdin)")
def main():
args = parser.parse_args()
data = open_local_readable(args.file).read()
data, shadow = parse_data(data)
if args.debug:
debug_other(data, shadow)
print_shadow(shadow)
if __name__ == '__main__':
main()
|
Python
| 0.000002
|
@@ -2646,24 +2646,27 @@
('%3CI', data)
+%5B0%5D
%0A expecte
|
8f24cd709aa5e8e4e5c1273c028344d7e934aeba
|
Use bson
|
scripts/serial_read.py
|
scripts/serial_read.py
|
#!/usr/bin/python3
import time
import datetime
import json
import zmq
import pymongo
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
from panoptes.utils import config, logger, serial, error, database
# @logger.set_log_level(level='debug')
@logger.has_logger
@config.has_config
class ArduinoSerialMonitor(object):
"""
Monitors the serial lines and tries to parse any data recevied
as JSON. This script first checks the first five ttyACM nodes and
tries to connect. Also connects to our mongo instance to update values
"""
def __init__(self):
# Store each serial reader
self.serial_readers = dict()
# Try to connect to a range of ports
for i in range(5):
port = '/dev/ttyACM{}'.format(i)
self.logger.info('Attempting to connect to serial port: {}'.format(port))
serial_reader = serial.SerialData(port=port, threaded=True)
try:
serial_reader.connect()
self.serial_readers[port] = serial_reader
except:
self.logger.debug('Could not connect to port: {}'.format(port))
# Create the messaging socket
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PUSH)
self.socket.connect("tcp://localhost:9242")
# Connect to sensors db
self.sensors = database.PanMongo().sensors
self._sleep_interval = 1
def run(self):
"""Run by the thread, reads continuously from serial line
"""
try:
while True:
sensor_data = self.get_reading()
self.logger.debug("{}".format(sensor_data))
message = {
"date": datetime.datetime.utcnow(),
"type": "environment",
"data": sensor_data
}
# Send out message on ZMQ
self.socket.send_multipart([
'message',
'',
json.dumps(message)
])
# Mongo insert
self.sensors.insert(message)
# Update the 'current' reading
self.sensors.update(
{"status": "current"},
{"$set": message},
True
)
time.sleep(self._sleep_interval)
except KeyboardInterrupt:
pass
def get_reading(self):
"""
Convenience method to get the sensor data.
Returns:
sensor_data (dict): Dictionary of sensors keyed by port. port->values
"""
# take the current serial sensor information
return self._prepare_sensor_data()
def _prepare_sensor_data(self):
"""
Helper function to return serial sensor info.
Reads each of the connected sensors. If a value is received, attempts
to parse the value as json.
Returns:
sensor_data (dict): Dictionary of sensors keyed by port. port->values
"""
sensor_data = dict()
# Read from all the readers
for port, reader in self.serial_readers.items():
# Get the values
sensor_value = reader.next()
if len(sensor_value) > 0:
try:
data = json.loads(sensor_value.replace('nan', 'null'))
sensor_data[port] = data
except ValueError:
print("Bad JSON: {0}".format(sensor_value))
return sensor_data
if __name__ == "__main__":
widget = ArduinoSerialMonitor()
widget.run()
|
Python
| 0.000002
|
@@ -53,16 +53,51 @@
rt json%0A
+import bson.json_util as json_util%0A
import z
@@ -2110,16 +2110,21 @@
json
+_util
.dumps(m
|
b40d9aaf107fadc477d0a8463c25945c2e83153c
|
Make it easier to override has_sudo_privileges check in middleware
|
django_sudo/middleware.py
|
django_sudo/middleware.py
|
"""
django_sudo.middleware
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2014 by Matt Robenolt.
:license: BSD, see LICENSE for more details.
"""
from django_sudo import COOKIE_NAME
from django_sudo.utils import has_sudo_privileges
class SudoMiddleware(object):
def process_request(self, request):
assert hasattr(request, 'session'), 'django_sudo depends on SessionMiddleware!'
request.is_sudo = lambda: has_sudo_privileges(request)
def process_response(self, request, response):
is_sudo = getattr(request, '_sudo', None)
if is_sudo is None:
return response
# We have explicitly had sudo revoked, so clean up cookie
if is_sudo is False and COOKIE_NAME in request.COOKIES:
response.delete_cookie(COOKIE_NAME)
return response
# Sudo mode has been granted, and we have a token to send back to the user agent
if is_sudo is True and hasattr(request, '_sudo_token'):
token = request._sudo_token
max_age = request._sudo_max_age
response.set_cookie(
COOKIE_NAME, token,
max_age=max_age, # If max_age is None, it's a session cookie
secure=request.is_secure(),
httponly=True, # Not accessible by JavaScript
)
return response
|
Python
| 0.00001
|
@@ -246,24 +246,154 @@
re(object):%0A
+ def has_sudo_privileges(self, request):%0A ## Override me to alter behavior%0A return has_sudo_privileges(request)%0A%0A
def proc
@@ -543,16 +543,21 @@
lambda:
+self.
has_sudo
|
31231afea71b3fd9213b39cf1bb32e10b2a9e843
|
Add Bitpay config model to Django Admin panel
|
djangovirtualpos/admin.py
|
djangovirtualpos/admin.py
|
# coding=utf-8
from django.contrib import admin
from djangovirtualpos.models import VirtualPointOfSale, VPOSRefundOperation, VPOSCeca, VPOSRedsys, VPOSSantanderElavon, VPOSPaypal
admin.site.register(VirtualPointOfSale)
admin.site.register(VPOSRefundOperation)
admin.site.register(VPOSCeca)
admin.site.register(VPOSRedsys)
admin.site.register(VPOSPaypal)
admin.site.register(VPOSSantanderElavon)
|
Python
| 0
|
@@ -172,16 +172,28 @@
OSPaypal
+, VPOSBitpay
%0A%0Aadmin.
@@ -402,10 +402,42 @@
Elavon)%0A
+admin.site.register(VPOSBitpay)%0A
%0A%0A
|
21837bb03e9a41eb8e932ece47fce0081fccba43
|
Update settings_example.py
|
djing/settings_example.py
|
djing/settings_example.py
|
# -*- coding: utf-8 -*
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
from django.core.urlresolvers import reverse_lazy
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!!!!!!!!!!!!!!!!!!!!!!!!YOUR SECRET KEY!!!!!!!!!!!!!!!!!!!!!!!!'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts_app',
'photo_app',
'abonapp',
'tariff_app',
'searchapp',
'devapp',
'mapapp',
'statistics',
'taskapp',
'clientsideapp',
'chatbot',
'django_messages',
'dialing_app'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'djing.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'taskapp.context_proc.get_active_tasks_count',
'global_context_processors.context_processor_additional_profile'
],
},
},
]
WSGI_APPLICATION = 'djing.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
#'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
'ENGINE': 'django.db.backends.mysql',
'NAME': 'djingdb',
'USER': 'USER', # You can change the user name
'PASSWORD': 'PASSWORD', # You can change the password
'HOST': 'localhost'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
SESSION_ENGINE = 'django.contrib.sessions.backends.file'
SESSION_COOKIE_HTTPONLY = True
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'ru-RU'
TIME_ZONE = 'Europe/Simferopol'
USE_I18N = True
USE_L10N = False
USE_TZ = False
DEFAULT_FROM_EMAIL = 'nerosketch@gmail.com'
# Максимальный загружаемый файл 3.90625M (кратно размеру блока диска 4kb, 4000 блоков)
FILE_UPLOAD_MAX_MEMORY_SIZE = 4096000
# Время жизни сессии, 1 сутки
SESSION_COOKIE_AGE = 60 * 60 * 24
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
if DEBUG:
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static"),)
# Пример вывода: 16 сентября 2012
DATE_FORMAT = 'd E Y'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
DEFAULT_PICTURE = '/static/images/default-avatar.png'
AUTH_USER_MODEL = 'accounts_app.UserProfile'
LOGIN_URL = reverse_lazy('acc_app:login')
LOGIN_REDIRECT_URL = reverse_lazy('acc_app:profile')
LOGOUT_URL = reverse_lazy('acc_app:logout_link')
PAGINATION_ITEMS_PER_PAGE=10
pay_SERV_ID = '<service id>'
pay_SECRET = '<secret>'
DIALING_MEDIA = 'path/to/asterisk_records'
DHCP_TIMEOUT = 14400
DEFAULT_SNMP_PASSWORD = 'public'
TELEGRAM_BOT_TOKEN = 'bot token'
TELEPHONE_REGEXP = r'^\+[7,8,9,3]\d{10,11}$'
|
Python
| 0.178665
|
@@ -2412,18 +2412,16 @@
%0A
- #
'ENGINE
@@ -2461,17 +2461,16 @@
-#
'NAME':
@@ -2507,32 +2507,33 @@
ite3'),%0A
+#
'ENGINE': 'djang
@@ -2554,32 +2554,33 @@
mysql',%0A
+#
'NAME': 'djingdb
@@ -2590,16 +2590,17 @@
+#
'USER':
@@ -2647,16 +2647,17 @@
+#
'PASSWOR
@@ -2711,16 +2711,17 @@
+#
'HOST':
|
3811543581d8c0ce31f7db332444f31802e68b46
|
Bump version to 0.1a15
|
chirptext/__version__.py
|
chirptext/__version__.py
|
# -*- coding: utf-8 -*-
# chirptext's package version information
__author__ = "Le Tuan Anh"
__email__ = "tuananh.ke@gmail.com"
__copyright__ = "Copyright (c) 2012, Le Tuan Anh"
__credits__ = []
__license__ = "MIT License"
__description__ = "ChirpText is a collection of text processing tools for Python."
__url__ = "https://github.com/letuananh/chirptext"
__maintainer__ = "Le Tuan Anh"
__version_major__ = "0.1"
__version__ = "{}a14".format(__version_major__)
__version_long__ = "{} - Alpha".format(__version_major__)
__status__ = "Prototype"
|
Python
| 0
|
@@ -431,9 +431,9 @@
%7B%7Da1
-4
+5
%22.fo
|
ef15a8ba699e10b9f2d059669b63af6f4c768d39
|
Change to console command prompt
|
casspy/admin_commands.py
|
casspy/admin_commands.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Cassoundra: admin-commands
~~~~~~~~~~
Module to handle special commands to control the bot once it is already running.
Created by Joshua Prince, 2017
"""
import discord
from casspy import cassoundra
async def process_input(loop):
while True:
command = await loop.run_in_executor(None, input, "> ")
if str(command).split(" ")[0].lower() == "shutdown":
return
print(await handle(command))
async def handle(cmd: str) -> str:
tok = cmd.split(' ')
try:
if tok[0].lower() == 'shutdown':
return await cmd_shutdown()
elif tok[0].lower() == 'say':
return await cmd_say(tok[1], ' '.join(tok[2:]))
else:
return "Unknown command " + tok[0] + "."
except IndexError:
pass
async def cmd_shutdown() -> str:
raise KeyboardInterrupt
async def cmd_say(channel: str, content: str) -> str:
ch = cassoundra.client.get_channel(channel)
if ch is None:
return '<#{}>: I couldn\'t find that channel!'.format(channel)
if ch.type == discord.ChannelType.voice:
return '<#{}>: Is a voice channel.'.format(channel)
await cassoundra.client.send_message(ch, content)
return '<#{}>: "{}"'.format(channel, content)
|
Python
| 0
|
@@ -357,14 +357,8 @@
nput
-, %22%3E %22
)%0A
|
2d8570f35dd507236d9c3bdc9209da248f572ef2
|
update version
|
cloudaux/__about__.py
|
cloudaux/__about__.py
|
__all__ = [
'__title__',
'__summary__',
'__uri__',
'__version__',
'__author__',
'__email__',
'__license__',
'__copyright__'
]
__title__ = 'cloudaux'
__summary__ = 'Cloud Auxiliary is a python wrapper and orchestration module for interacting with cloud providers'
__uri__ = 'https://github.com/Netflix-Skunkworks/cloudaux'
__version__ = '1.4.19'
__author__ = 'The Cloudaux Developers'
__email__ = 'oss@netflix.com'
__license__ = 'Apache License, Version 2.0'
__copyright__ = 'Copyright 2018 %s' % __author__
|
Python
| 0
|
@@ -371,12 +371,11 @@
'1.
-4
+5
.1
-9
'%0A%0A_
|
7931243e5b6b14ea25f3a17bb86a6bb496df082d
|
Fix game over check
|
netscramble/gui.py
|
netscramble/gui.py
|
#!/usr/bin/env python2.7
from gi.repository import Gtk, GLib #pylint: disable=E0611
import time
from netscramble import res
from netscramble import game, scene, grid_view
from netscramble.score_dialog import ScoreDialog
class Timer(object):
"""Context manager for simple benchmarking."""
def __init__(self, print_result):
self.print_result = print_result
self.start_time = None
def __enter__(self):
self.start_time = time.time()
def __exit__(self, _type, _value, _traceback):
millis_elapsed = 1000 * (time.time() - self.start_time)
if self.print_result:
print "{} ms elapsed".format(round(millis_elapsed, 2))
class MainWindow(scene.Scene):
"""Wrapper for the GtkWindow."""
def __init__(self):
builder = Gtk.Builder()
builder.add_from_file(res("glade/window1.glade"))
builder.connect_signals(self)
self.window = builder.get_object("window1")
self.drawing_area = builder.get_object("drawingarea1")
super(MainWindow, self).__init__(self.drawing_area, 60)
self.clicks = 0
self.start_time = None # when the current game started
self.submitted_score = False
self.render_matrix = None
self.render_matrix_inverted = None
self.game_grid = None
self.grid_view = None
self.on_new_game_action_activate(None)
new_game_f = lambda: self.on_new_game_action_activate(None)
self.score_dialog = ScoreDialog(self.window, new_game_f)
self.window.show()
def on_window1_destroy(self, widget, data=None):
"""End process when window is closed."""
Gtk.main_quit()
def on_drawingarea1_button_release_event(self, widget, event, data=None):
"""Handle mouse button presses."""
# TODO: move this logic into GridView
cell_pos = self.grid_view.get_grid_coord_at((event.x, event.y))
if cell_pos and event.button == 1: # left
tile = self.game_grid.get(*cell_pos)
self.grid_view.rotate_cell(cell_pos,
callback=lambda: (self._check_game_over,
self.tick_unlock(tile)))
self.tick_lock(tile)
self.clicks += 1
elif cell_pos and event.button == 3: # right
self.grid_view.toggle_cell_lock(cell_pos)
self.tick_once()
def _check_game_over(self):
"""Add score if the game is over."""
if game.is_game_over(self.game_grid) and not self.submitted_score:
self.submitted_score = True
self.score_dialog.show_and_add_score(
GLib.get_real_name(), int(time.time()), self.clicks,
int(time.time() - self.start_time))
def on_drawingarea1_draw(self, widget, cr, _data=None):
"""Draw in the drawing area."""
width = widget.get_allocated_width()
height = widget.get_allocated_height()
with Timer(False): # TODO
self.grid_view.update(1.0 / 60) # TODO: assumes 60 fps
self.grid_view.draw(cr, (width, height))
def on_new_game_action_activate(self, action, data=None):
"""Start a new game."""
self.clicks = 0
self.start_time = time.time()
self.submitted_score = False
self.game_grid = game.new_game_grid()
self.grid_view = grid_view.GridView(self.game_grid)
self.tick_once()
def on_view_scores_action_activate(self, action, data=None):
"""Show the scores dialog."""
self.score_dialog.show()
def main():
"""Start the game."""
MainWindow()
Gtk.main()
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -2123,16 +2123,18 @@
ame_over
+()
,%0A
|
cfd65e98e694b2ad40e97d06ffdd9096a3dea909
|
Fix flaky import test (#26953)
|
pandas/tests/test_downstream.py
|
pandas/tests/test_downstream.py
|
"""
Testing that we work in the downstream packages
"""
import builtins
import importlib
import subprocess
import sys
import numpy as np # noqa
import pytest
from pandas.compat import PY36
from pandas import DataFrame
from pandas.util import testing as tm
def import_module(name):
# we *only* want to skip if the module is truly not available
# and NOT just an actual import error because of pandas changes
if PY36:
try:
return importlib.import_module(name)
except ModuleNotFoundError: # noqa
pytest.skip("skipping as {} not available".format(name))
else:
try:
return importlib.import_module(name)
except ImportError as e:
if "No module named" in str(e) and name in str(e):
pytest.skip("skipping as {} not available".format(name))
raise
@pytest.fixture
def df():
return DataFrame({'A': [1, 2, 3]})
def test_dask(df):
toolz = import_module('toolz') # noqa
dask = import_module('dask') # noqa
import dask.dataframe as dd
ddf = dd.from_pandas(df, npartitions=3)
assert ddf.A is not None
assert ddf.compute() is not None
def test_xarray(df):
xarray = import_module('xarray') # noqa
assert df.to_xarray() is not None
def test_oo_optimizable():
# GH 21071
subprocess.check_call([sys.executable, "-OO", "-c", "import pandas"])
@tm.network
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_statsmodels():
statsmodels = import_module('statsmodels') # noqa
import statsmodels.api as sm
import statsmodels.formula.api as smf
df = sm.datasets.get_rdataset("Guerry", "HistData").data
smf.ols('Lottery ~ Literacy + np.log(Pop1831)', data=df).fit()
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't:ImportWarning")
def test_scikit_learn(df):
sklearn = import_module('sklearn') # noqa
from sklearn import svm, datasets
digits = datasets.load_digits()
clf = svm.SVC(gamma=0.001, C=100.)
clf.fit(digits.data[:-1], digits.target[:-1])
clf.predict(digits.data[-1:])
# Cython import warning and traitlets
@tm.network
@pytest.mark.filterwarnings("ignore")
def test_seaborn():
seaborn = import_module('seaborn')
tips = seaborn.load_dataset("tips")
seaborn.stripplot(x="day", y="total_bill", data=tips)
def test_pandas_gbq(df):
pandas_gbq = import_module('pandas_gbq') # noqa
@pytest.mark.xfail(reason="0.7.0 pending")
@tm.network
def test_pandas_datareader():
pandas_datareader = import_module('pandas_datareader') # noqa
pandas_datareader.DataReader(
'F', 'quandl', '2017-01-01', '2017-02-01')
# importing from pandas, Cython import warning
@pytest.mark.filterwarnings("ignore:The 'warn':DeprecationWarning")
@pytest.mark.filterwarnings("ignore:pandas.util:DeprecationWarning")
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
@pytest.mark.skip(reason="gh-25778: geopandas stack issue")
def test_geopandas():
geopandas = import_module('geopandas') # noqa
fp = geopandas.datasets.get_path('naturalearth_lowres')
assert geopandas.read_file(fp) is not None
# Cython import warning
@pytest.mark.filterwarnings("ignore:can't resolve:ImportWarning")
def test_pyarrow(df):
pyarrow = import_module('pyarrow') # noqa
table = pyarrow.Table.from_pandas(df)
result = table.to_pandas()
tm.assert_frame_equal(result, df)
def test_missing_required_dependency(monkeypatch):
# GH 23868
original_import = __import__
def mock_import_fail(name, *args, **kwargs):
if name == "numpy":
raise ImportError("cannot import name numpy")
elif name == "pytz":
raise ImportError("cannot import name some_dependency")
elif name == "dateutil":
raise ImportError("cannot import name some_other_dependency")
else:
return original_import(name, *args, **kwargs)
expected_msg = (
"Unable to import required dependencies:"
"\nnumpy: cannot import name numpy"
"\npytz: cannot import name some_dependency"
"\ndateutil: cannot import name some_other_dependency"
)
import pandas as pd
with monkeypatch.context() as m:
m.setattr(builtins, "__import__", mock_import_fail)
with pytest.raises(ImportError, match=expected_msg):
importlib.reload(pd)
|
Python
| 0.000072
|
@@ -53,24 +53,8 @@
%22%22%22%0A
-import builtins%0A
impo
@@ -3500,27 +3500,16 @@
endency(
-monkeypatch
):%0A #
@@ -3526,900 +3526,332 @@
-original_import = __import__%0A%0A def mock_import_fail(name, *args, **kwargs):%0A if name == %22numpy%22:%0A raise ImportError(%22cannot import name numpy%22)%0A elif name == %22pytz%22:%0A raise ImportError(%22cannot import name some_dependency%22)%0A elif name == %22dateutil%22:%0A raise ImportError(%22cannot import name some_other_dependency%22)%0A else:%0A return original_import(name, *args, **kwargs)%0A%0A expected_msg = (%0A %22Unable to import required dependencies:%22%0A %22%5Cnnumpy: cannot import name numpy%22%0A %22%5Cnpytz: cannot import name some_dependency%22%0A %22%5Cndateutil: cannot import name some_other_dependency%22%0A )%0A%0A import pandas as pd%0A%0A with monkeypatch.context() as m:%0A m.setattr(builtins, %22__import__%22, mock_import_fail)%0A with pytest.raises(ImportError, match=expected_msg):%0A importlib.reload(pd
+# use the -S flag to disable site-packages%0A call = %5B'python', '-S', '-c', 'import pandas'%5D%0A%0A with pytest.raises(subprocess.CalledProcessError) as exc:%0A subprocess.check_output(call, stderr=subprocess.STDOUT)%0A%0A output = exc.value.stdout.decode()%0A assert all(x in output for x in %5B'numpy', 'pytz', 'dateutil'%5D
)%0A
|
0a49c20aa1ea78fbb6f159e8b29167f8d1663408
|
Raise exceptions if answer from server is not as expected
|
netsecus/helper.py
|
netsecus/helper.py
|
from __future__ import unicode_literals, print_function
import base64
import imaplib
import logging
import os
import re
import smtplib
import sys
import tornado.web
from passlib.hash import pbkdf2_sha256
def processVariable(variables, text):
return re.sub(r'\$([a-zA-Z0-9_]+)',
lambda m: processVariable(variables, str(variables[m.group(0)])),
text)
def imapCommand(imapmail, command, *args):
logging.debug("\t%s %s" % (command, " ".join(args)))
args = [a.encode('utf-8') for a in args]
typ, dat = imapmail._simple_command(command, *args)
if command == "UID":
command = args[0]
code, response = imapmail._untagged_response(typ, dat, command)
response = response[0] # the response ships within a list with one element; we need to unpack that.
if "OK" in code:
if response:
return response
return
else:
logging.error("Server responded with Code '%s' for '%s %s'." % (code, command, args))
raise Exception("Server responded with Code '%s' for '%s %s'." % (code, command, args))
return
def smtpMail(config, to, what):
try:
username = config('mail.username')
except KeyError:
username = config('mail.address')
smtpmail = smtplib.SMTP(config("mail.smtp_server"))
smtpmail.ehlo()
smtpmail.starttls()
smtpmail.login(username, config("mail.password"))
smtpmail.sendmail(config("mail.address"), to, what)
smtpmail.quit()
def patch_imaplib():
# imaplib is missing some essential commands.
# Since we just need these passed through to the server, patch them in
imaplib.Commands["MOVE"] = ("SELECTED",)
imaplib.Commands["IDLE"] = ("AUTH", "SELECTED",)
imaplib.Commands["DONE"] = ("AUTH", "SELECTED",)
imaplib.Commands["ENABLE"] = ("AUTH",)
imaplib.Commands["CABABILITY"] = ("AUTH",)
if sys.version_info < (3, 0):
imaplib.IMAP4_SSL.send = imaplibSendPatch
def imaplibSendPatch(self, data):
data = data.encode("utf-8")
bytes = len(data)
while bytes > 0:
sent = self.sslobj.write(data)
if sent == bytes:
break # avoid copy
data = data[sent:]
bytes = bytes - sent
def escapePath(path):
if os.sep in path:
logging.error("Found '%s' in '%s', possible attack." % (os.sep, path))
path.replace(os.sep, "_")
for pathElement in path.split(os.sep):
if pathElement[0] == ".":
logging.error("Found dot at beginning of filename, possible attack.")
pathElement[0] = "_"
return path
def checkResult(imapmail, expected):
assert isinstance(expected, bytes)
line = imapmail.readline()
if expected not in line:
logging.error("'%s' expected, but read '%s'" % (expected, line))
class RequestHandlerWithAuth(tornado.web.RequestHandler):
def _execute(self, transforms, *args, **kwargs):
# executed before everything else.
receivedAuth = self.request.headers.get("Authorization")
if receivedAuth is not None:
authMode, auth_b64 = receivedAuth.split(" ")
if authMode != "Basic":
logging.error("Used other HTTP authmode than 'Basic', '%s'." % authMode)
else:
auth = base64.b64decode(auth_b64.encode('ascii'))
username_b, _, password_b = auth.partition(b":")
username = username_b.decode('utf-8')
password = password_b.decode('utf-8')
users = self.application.users
if username not in users:
logging.debug("Received nonexistent user '%s'." % username)
elif not pbkdf2_sha256.verify(password, users[username]):
logging.error("Failed login for %s from %s." % (username, self.request.remote_ip))
else:
return super(RequestHandlerWithAuth, self)._execute(transforms, *args, **kwargs)
self.set_status(401)
realm = getattr(self.application, 'realm', '')
self.set_header("WWW-Authenticate", "Basic realm=\"%s\"" % realm)
self._transforms = []
self.write("401: Authentifizierung erforderlich.")
self.finish()
def create_imap_conn(server, ssl, debug):
if ssl:
res = imaplib.IMAP4_SSL(server)
else:
res = imaplib.IMAP4(server)
if debug:
send_func = res.send
read_func = res.read
readline_func = res.readline
def _debug_send(data):
print('> %s' % data.decode('utf-8'), end='')
return send_func(data)
def _debug_read(size):
res = read_func(size)
print('< %s' % res.decode('utf-8'), end='')
return res
def _debug_readline():
res = readline_func()
print('< %s' % res.decode('utf-8'), end='')
return res
res.send = _debug_send
res.read = _debug_read
res.readline = _debug_readline
return res
# An error in email handling, e.g. we got the wrong message back, connection interrupted etc.
class MailError(BaseException):
pass
|
Python
| 0.000001
|
@@ -844,33 +844,8 @@
de:%0A
- if response:%0A
@@ -864,31 +864,16 @@
esponse%0A
- return%0A
else
@@ -882,30 +882,22 @@
-logging.error(
+err =
%22Server
@@ -950,33 +950,32 @@
, command, args)
-)
%0A raise E
@@ -977,104 +977,22 @@
ise
-Exception(%22Server responded with Code '%25s' for '%25s %25s'.%22 %25 (code, command, args))%0A return
+MailError(err)
%0A%0A%0Ad
@@ -2625,31 +2625,51 @@
-logging.error(%22
+raise MailError(%22Invalid response:
'%25s' exp
@@ -2679,20 +2679,19 @@
ed, but
-read
+got
'%25s'%22 %25
|
11aa00c96a456c3e3bb9699fc96c61ebbe7574d4
|
fix removal of temp file
|
cerbero/bootstrap/osx.py
|
cerbero/bootstrap/osx.py
|
# cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import tempfile
import shutil
from cerbero.bootstrap import BootstrapperBase
from cerbero.bootstrap.bootstrapper import register_bootstrapper
from cerbero.config import Distro, DistroVersion
from cerbero.utils import shell
class OSXBootstrapper (BootstrapperBase):
GCC_BASE_URL = 'https://github.com/downloads/kennethreitz/'\
'osx-gcc-installer/'
GCC_TAR = {
DistroVersion.OS_X_MOUNTAIN_LION: 'GCC-10.7-v2.pkg',
DistroVersion.OS_X_LION: 'GCC-10.7-v2.pkg',
DistroVersion.OS_X_SNOW_LEOPARD: 'GCC-10.6.pkg'}
CPANM_URL = 'https://raw.github.com/miyagawa/cpanminus/master/cpanm'
def start(self):
# skip system package install if not needed
if not self.config.distro_packages_install:
return
self._install_perl_deps()
# FIXME: enable it when buildbots are properly configured
return
tar = self.GCC_TAR[self.config.distro_version]
url = os.path.join(self.GCC_BASE_URL, tar)
pkg = os.path.join(self.config.local_sources, tar)
shell.download(url, pkg, check_cert=False)
shell.call('sudo installer -pkg %s -target /' % pkg)
def _install_perl_deps(self):
# Install cpan-minus, a zero-conf CPAN wrapper
cpanm_installer = tempfile.NamedTemporaryFile().name
shell.download(self.CPANM_URL, cpanm_installer)
shell.call('chmod +x %s' % cpanm_installer)
# Install XML::Parser, required for intltool
shell.call("sudo %s XML::Parser" % cpanm_installer)
cpanm_installer.close()
def register_all():
register_bootstrapper(Distro.OS_X, OSXBootstrapper)
|
Python
| 0.000001
|
@@ -2185,13 +2185,8 @@
le()
-.name
%0A
@@ -2228,32 +2228,37 @@
cpanm_installer
+.name
)%0A shell.
@@ -2285,32 +2285,37 @@
cpanm_installer
+.name
)%0A # Inst
@@ -2411,16 +2411,21 @@
nstaller
+.name
)%0A
|
d5a9d238ebdb312d3bc69071227175d0d49756a9
|
Fix the security filtering to handle None as a severity.
|
security_check/main.py
|
security_check/main.py
|
"""Checks the specified image for security vulnerabilities."""
import argparse
import json
import logging
import sys
import subprocess
_GCLOUD_CMD = ['gcloud', 'beta', 'container', 'images', '--format=json']
# Severities
_LOW = 'LOW'
_MEDIUM = 'MEDIUM'
_HIGH = 'HIGH'
_SEV_MAP = {
_LOW: 0,
_MEDIUM: 1,
_HIGH: 2
}
def _run_gcloud(cmd):
full_cmd = _GCLOUD_CMD + cmd
output = subprocess.check_output(full_cmd)
return json.loads(output)
def _check_image(image, severity):
digest = _resolve_latest(image)
full_name = '%s@%s' % (image, digest)
parsed = _run_gcloud(['describe', full_name])
unpatched = 0
for vuln in parsed['vulz_analysis']['FixesAvailable']:
if _filter_severity(vuln['severity'], severity):
unpatched += 1
if unpatched:
logging.info('Found %s unpatched vulnerabilities in %s. Run '
'[gcloud beta container images describe %s] '
'to see the full list.',
unpatched, image, full_name)
return unpatched
def _resolve_latest(image):
parsed = _run_gcloud(['list-tags', image, '--no-show-occurrences'])
for digest in parsed:
if 'latest' in digest['tags']:
return digest['digest']
raise Exception("Unable to find digest of 'latest' tag for %s" % image)
def _filter_severity(sev1, sev2):
"""Returns whether sev1 is higher than sev2"""
return _SEV_MAP[sev1] > _SEV_MAP[sev2]
def _main():
parser = argparse.ArgumentParser()
parser.add_argument('image', help='The image to test')
parser.add_argument('--severity', choices=[_LOW, _MEDIUM, _HIGH],
default=_MEDIUM,
help='The minimum severity to filter on.')
args = parser.parse_args()
logging.basicConfig(level=logging.DEBUG)
return _check_image(args.image, args.severity)
if __name__ == '__main__':
sys.exit(_main())
|
Python
| 0
|
@@ -265,16 +265,39 @@
= 'HIGH'
+%0A_CRITICAL = 'CRITICAL'
%0A%0A_SEV_M
@@ -344,16 +344,35 @@
_HIGH: 2
+,%0A _CRITICAL: 3,
%0A%7D%0A%0A%0Adef
@@ -706,17 +706,21 @@
n parsed
-%5B
+.get(
'vulz_an
@@ -730,10 +730,18 @@
sis'
-%5D%5B
+, %7B%7D).get(
'Fix
@@ -752,17 +752,21 @@
ailable'
-%5D
+, %5B%5D)
:%0A
@@ -1505,16 +1505,27 @@
_MAP
-%5Bsev1%5D
+.get(sev1, _LOW)
%3E
+=
_SE
@@ -1533,14 +1533,24 @@
_MAP
-%5Bsev2%5D
+.get(sev2, _LOW)
%0A%0A%0Ad
@@ -1696,16 +1696,40 @@
verity',
+%0A
choices
@@ -1750,16 +1750,27 @@
M, _HIGH
+, _CRITICAL
%5D,%0A
|
661f048364a44e70f5c70202b392692c58850bee
|
Revert "Revert "Add after hana environment set up (#12878)" (#12900)" (#12901)
|
sap_hana/tests/conftest.py
|
sap_hana/tests/conftest.py
|
# (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from contextlib import closing
from copy import deepcopy
import pytest
from hdbcli.dbapi import Connection as HanaConnection
from datadog_checks.dev import WaitFor, docker_run
from datadog_checks.dev.conditions import CheckDockerLogs
from datadog_checks.sap_hana.queries import (
GlobalSystemBackupProgress,
GlobalSystemConnectionsStatus,
GlobalSystemDiskUsage,
GlobalSystemLicenses,
GlobalSystemRowStoreMemory,
GlobalSystemServiceComponentMemory,
GlobalSystemServiceMemory,
GlobalSystemServiceStatistics,
GlobalSystemVolumeIO,
MasterDatabase,
SystemDatabases,
)
from .common import ADMIN_CONFIG, COMPOSE_FILE, CONFIG, E2E_METADATA, TIMEOUT
class DbManager(object):
def __init__(self, connection_config, schema):
self.connection_args = {
'address': connection_config['server'],
'port': connection_config['port'],
'user': connection_config['username'],
'password': connection_config['password'],
}
self.schema = schema
self.conn = None
def initialize(self):
with closing(self.conn) as conn:
with closing(conn.cursor()) as cursor:
cursor.execute('CREATE RESTRICTED USER datadog PASSWORD "{}"'.format(CONFIG['password']))
cursor.execute('ALTER USER datadog ENABLE CLIENT CONNECT')
cursor.execute('ALTER USER datadog DISABLE PASSWORD LIFETIME')
# Create a role with the necessary monitoring privileges
cursor.execute('CREATE ROLE DD_MONITOR')
cursor.execute('GRANT CATALOG READ TO DD_MONITOR')
for cls in (MasterDatabase, SystemDatabases):
instance = cls()
cursor.execute('GRANT SELECT ON {}.{} TO DD_MONITOR'.format(instance.schema, instance.view))
for cls in (
GlobalSystemBackupProgress,
GlobalSystemLicenses,
GlobalSystemConnectionsStatus,
GlobalSystemDiskUsage,
GlobalSystemServiceMemory,
GlobalSystemServiceComponentMemory,
GlobalSystemRowStoreMemory,
GlobalSystemServiceStatistics,
GlobalSystemVolumeIO,
):
instance = cls(self.schema)
cursor.execute('GRANT SELECT ON {}.{} TO DD_MONITOR'.format(instance.schema, instance.view))
# For custom query test
cursor.execute('GRANT SELECT ON {}.M_DATA_VOLUMES TO DD_MONITOR'.format(self.schema))
# Assign the monitoring role to the user
cursor.execute('GRANT DD_MONITOR TO datadog')
# Trigger a backup
cursor.execute("BACKUP DATA USING FILE ('/tmp/backup')")
def connect(self):
self.conn = HanaConnection(**self.connection_args)
@pytest.fixture(scope='session')
def dd_environment(schema="SYS_DATABASES"):
db = DbManager(ADMIN_CONFIG, schema)
with docker_run(
COMPOSE_FILE,
conditions=[
CheckDockerLogs(COMPOSE_FILE, ['Startup finished!'], wait=5, attempts=120),
WaitFor(db.connect),
db.initialize,
],
env_vars={'PASSWORD': ADMIN_CONFIG['password']},
):
yield CONFIG, E2E_METADATA
@pytest.fixture
def instance():
return deepcopy(CONFIG)
@pytest.fixture
def instance_custom_queries():
instance = deepcopy(CONFIG)
instance['custom_queries'] = [
{
'tags': ['test:sap_hana'],
'query': 'SELECT DATABASE_NAME, COUNT(*) FROM SYS_DATABASES.M_DATA_VOLUMES GROUP BY DATABASE_NAME',
'columns': [{'name': 'db', 'type': 'tag'}, {'name': 'data_volume.total', 'type': 'gauge'}],
'timeout': TIMEOUT,
}
]
return instance
|
Python
| 0
|
@@ -3460,16 +3460,34 @@
ord'%5D%7D,%0A
+ sleep=10,%0A
):%0A
|
da9a50ad2d5a5a254c7a842407d046485e410057
|
Drop excessive import
|
satchless/util/__init__.py
|
satchless/util/__init__.py
|
from decimal import Decimal
import locale
def decimal_format(value, min_decimal_places=0):
decimal_tuple = value.as_tuple()
have_decimal_places = -decimal_tuple.exponent
digits = list(decimal_tuple.digits)
while have_decimal_places < min_decimal_places:
digits.append(0)
have_decimal_places += 1
while have_decimal_places > min_decimal_places and not digits[-1]:
digits = digits[:-1]
have_decimal_places -= 1
return Decimal((decimal_tuple.sign, digits, -have_decimal_places))
|
Python
| 0
|
@@ -24,22 +24,8 @@
imal
-%0Aimport locale
%0A%0Ade
|
b32eab807b54c9c378542474631b3bdbced94456
|
add filter
|
cogs/r9k.py
|
cogs/r9k.py
|
import asyncio
import logging
import re
from cogs.cog import Cog
from utils import unzalgo
logger = logging.getLogger('terminal')
class R9K(Cog):
def __init__(self, bot):
super().__init__(bot)
self._messages = []
self._update_task = asyncio.run_coroutine_threadsafe(self._update_loop(), loop=bot.loop)
self.emote_regex = re.compile(r'<:(\w+):\d+>')
def cog_unload(self):
self._update_task.cancel()
try:
self._update_task.result(timeout=20)
except (TimeoutError, asyncio.CancelledError):
pass
async def _update_loop(self):
while True:
if not self._messages:
await asyncio.sleep(10)
continue
messages = self._messages
self._messages = []
try:
sql = 'INSERT INTO r9k (message) VALUES ($1) ON CONFLICT DO NOTHING'
await self.bot.dbutil.execute(sql, messages, insertmany=True)
except:
logger.exception('Failed to insert r9k')
await asyncio.sleep(10)
@Cog.listener()
async def on_message(self, msg):
# Don't wanna log bot messages
if msg.author.bot:
return
# Gets the content like you see in the client
content = msg.clean_content
# Remove zalgo text
content = unzalgo.unzalgo(content)
content = self.emote_regex.sub(r'\1', content)
self._messages.append((content,))
if self._update_task.done():
self._update_task = asyncio.run_coroutine_threadsafe(self._update_loop(), loop=self.bot.loop)
def setup(bot):
bot.add_cog(R9K(bot))
|
Python
| 0.000002
|
@@ -1156,16 +1156,98 @@
, msg):%0A
+ if not msg.guild or msg.guild.id != 217677285442977792:%0A return
%0A%0A
|
75c0861608871de2a2b1a6b4f2ea89c800dd8c07
|
Make verbose loading messages optional
|
pava/implementation/__init__.py
|
pava/implementation/__init__.py
|
import sys
method_count = 0
def method(argcount, nlocals, stacksize, flags, codestring, constants, names,
varnames, filename, name, firstlineno, lnotab, modules, static):
global method_count
print 'define', name, method_count
method_count += 1
globals_dict = {}
for module_name in modules:
if not '[' in module_name and not '.' in module_name:
globals_dict[module_name] = __import__(module_name, {})
code = new.code(argcount, nlocals, stacksize, flags, codestring, constants, names,
varnames, filename, name, firstlineno, lnotab)
method = new.function(code, globals_dict, name)
return staticmethod(method) if static else method
nan = None
inf = sys.maxint
|
Python
| 0.000037
|
@@ -1,18 +1,44 @@
import
-sys
+new%0Aimport sys%0A%0ADEBUG = False
%0A%0Amethod
@@ -225,24 +225,42 @@
ethod_count%0A
+ if DEBUG:%0A
print 'd
|
c62d69249f7413e2142f7147ccce0872dbbae90a
|
Fix nipap CLI setup.py to include man file
|
nipap-cli/setup.py
|
nipap-cli/setup.py
|
#!/usr/bin/env python
from distutils.core import setup
import nipap_cli
# This is a bloody hack to circumvent a lack of feature with Python distutils.
# Files specified in the data_files list cannot be renamed upon installation
# and we don't want to keep two copies of the .nipaprc file in git
import shutil
shutil.copyfile('nipaprc', '.nipaprc')
setup(
name = 'nipap-cli',
version = nipap_cli.__version__,
description = "NIPAP shell command",
long_description = "A shell command to interact with NIPAP.",
author = nipap_cli.__author__,
author_email = nipap_cli.__author_email__,
license = nipap_cli.__license__,
url = nipap_cli.__url__,
packages = [ 'nipap_cli', ],
keywords = ['nipap_cli', ],
requires = ['pynipap', ],
data_files = [
('/etc/skel/', ['.nipaprc']),
('/usr/bin/', ['helper-nipap', 'nipap']),
('/usr/share/doc/nipap-cli/', ['bash_complete', 'nipaprc'])
],
classifiers = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Intended Audience :: Telecommunications Industry',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.6',
'Topic :: Internet'
]
)
# Remove the .nipaprc put the by the above hack.
import os
os.remove('.nipaprc')
|
Python
| 0
|
@@ -53,27 +53,117 @@
tup%0A
-%0Aimport nipap_cli%0A%0A
+import subprocess%0Aimport sys%0A%0Aimport nipap_cli%0A%0A# return all the extra data files%0Adef get_data_files():%0A%0A
# Th
@@ -237,16 +237,20 @@
tutils.%0A
+
# Files
@@ -318,16 +318,20 @@
llation%0A
+
# and we
@@ -389,16 +389,20 @@
in git%0A
+
import s
@@ -407,16 +407,20 @@
shutil%0A
+
shutil.c
@@ -451,16 +451,680 @@
aprc')%0A%0A
+ # generate man pages using rst2man%0A try:%0A subprocess.call(%5B%22rst2man%22, %22nipap.man.rst%22, %22nipap.1%22%5D)%0A subprocess.call(%5B%22gzip%22, %22-f%22, %22-9%22, %22nipap.1%22%5D)%0A except OSError as exc:%0A print %3E%3E sys.stderr, %22rst2man failed to run:%22, str(exc)%0A sys.exit(1)%0A%0A files = %5B%0A ('/etc/nipap/', %5B'local_auth.db', 'nipap.conf'%5D),%0A ('/usr/sbin/', %5B'nipapd', 'nipap-passwd'%5D),%0A ('/usr/share/nipap/sql/', %5B%0A 'sql/functions.plsql',%0A 'sql/ip_net.plsql',%0A 'sql/clean.plsql'%0A %5D),%0A ('/usr/share/man/man1/', 'nipap.1.gz')%0A %5D%0A%0A return files%0A%0A%0A
setup(%0A
|
d7f78c25448360f8a77417de8ca1b3ddbf99fbd9
|
Remove commented out lines.
|
chaco/cmap_image_plot.py
|
chaco/cmap_image_plot.py
|
#
# (C) Copyright 2013 Enthought, Inc., Austin, TX
# All right reserved.
#
# This file is open source software distributed according to the terms in
# LICENSE.txt
#
from numpy import zeros
# Enthought library imports.
from traits.api import Any, Bool, Float, Instance, Property, Tuple
# Local relative imports
from image_plot import ImagePlot
from abstract_colormap import AbstractColormap
from speedups import apply_selection_fade
class CMapImagePlot(ImagePlot):
""" Colormapped image plot. Takes a value data object whose elements are
scalars, and renders them as a colormapped image.
"""
# TODO: Modify ImageData to explicitly support scalar value arrays
#------------------------------------------------------------------------
# Data-related traits
#------------------------------------------------------------------------
# Maps from scalar data values in self.data.value to color tuples
value_mapper = Instance(AbstractColormap)
# Convenience property for value_mapper as color_mapper
color_mapper = Property
# Convenience property for accessing the data range of the mapper.
value_range = Property
# alpha value to use to fade out unselected data points when there is an
# active selection
fade_alpha = Float(0.3)
#fade_background = Tuple((255,255,255))
# RGB color to use to fade out unselected points.
fade_background = Tuple((0,0,0))
# whether to pre-compute the full colormapped RGB(A) image
cache_full_map = Bool(True)
#------------------------------------------------------------------------
# Private Traits
#------------------------------------------------------------------------
# Is the mapped image valid?
_mapped_image_cache_valid = Bool(False)
# Cache of the fully mapped RGB(A) image.
_cached_mapped_image = Any
#------------------------------------------------------------------------
# Public methods
#------------------------------------------------------------------------
def __init__(self, **kwargs):
super(CMapImagePlot, self).__init__(**kwargs)
if self.value_mapper:
self.value_mapper.on_trait_change(self._update_value_mapper,
"updated")
if self.value:
self.value.on_trait_change(self._update_selections,
"metadata_changed")
def set_value_selection(self, val):
""" Sets a range of values in the value data source as selected.
"""
if val is not None:
low, high = val
data = self.value.get_data()
new_mask = (data>=low) & (data<=high)
self.value.metadata["selection_masks"] = [new_mask]
else:
del self.value.metadata["selection_masks"]
self._update_selections()
#------------------------------------------------------------------------
# Base2DPlot interface
#------------------------------------------------------------------------
def _render(self, gc):
""" Ensures that the cached image is valid.
Called before _render() is called. Implements the Base2DPlot interface.
"""
if not self._mapped_image_cache_valid:
if 'selection_masks' in self.value.metadata:
self._compute_cached_image(self.value.metadata['selection_masks'])
else:
self._compute_cached_image()
ImagePlot._render(self, gc)
#------------------------------------------------------------------------
# Private methods
#------------------------------------------------------------------------
def _cmap_values(self, data, selection_masks=None):
""" Maps the data to RGB(A) with optional selection masks overlayed
"""
# get the RGBA values from the color map as uint8
mapped_image = self.value_mapper.map_uint8(data)
if selection_masks is not None:
# construct a composite mask
if len(selection_masks) > 0:
mask = zeros(mapped_image.shape[:2], dtype=bool)
for m in selection_masks:
mask = mask | m
else:
mask = zeros(self._cached_mapped_image.shape[:2], dtype=bool)
# Apply the selection fade, from speedups.py
apply_selection_fade(mapped_image, mask,
self.fade_alpha, self.fade_background)
return mapped_image
def _compute_cached_image(self, selection_masks=None):
""" Updates the cached image.
"""
if self.cache_full_map:
if not self._mapped_image_cache_valid:
self._cached_mapped_image = self._cmap_values(self.value.data,
selection_masks)
self._mapped_image_cache_valid = True
mapped_value = self._cached_mapped_image
ImagePlot._compute_cached_image(self, mapped_value)
else:
self._mapped_image_cache_valid = True
ImagePlot._compute_cached_image(self, self.value.data, mapper=lambda data:
self._cmap_values(data))
def _update_value_mapper(self):
self._mapped_image_cache_valid = False
self._image_cache_valid = False
self.invalidate_draw()
#self.request_redraw()
def _update_selections(self):
self._mapped_image_cache_valid = False
self._image_cache_valid = False
self.invalidate_draw()
#self.request_redraw()
#------------------------------------------------------------------------
# Properties
#------------------------------------------------------------------------
def _get_value_range(self):
return self.value_mapper.range
def _set_value_range(self, val):
self.value_mapper.range = val
def _get_color_mapper(self):
return self.value_mapper
def _set_color_mapper(self, val):
self.value_mapper = val
#------------------------------------------------------------------------
# Event handlers
#------------------------------------------------------------------------
def _value_mapper_changed(self, old, new):
if old is not None:
old.on_trait_change(self._update_value_mapper,
"updated", remove=True)
if new is not None:
new.on_trait_change(self._update_value_mapper, "updated")
if old and new:
if new.range is None and old.range is not None:
new.range = old.range
self._update_value_mapper()
def _value_data_changed_fired(self):
super(CMapImagePlot, self)._value_data_changed_fired()
self._mapped_image_cache_valid = False
return
def _index_data_changed_fired(self):
super(CMapImagePlot, self)._index_data_changed_fired()
self._mapped_image_cache_valid = False
return
def _cache_full_map_changed(self):
self._mapped_image_cache_valid = False
|
Python
| 0
|
@@ -5380,39 +5380,8 @@
aw()
-%0A #self.request_redraw()
%0A%0A
@@ -5533,39 +5533,8 @@
aw()
-%0A #self.request_redraw()
%0A%0A
|
6ece66793b88fb8d03f6caffe1a3f5cc8d945ab6
|
remove commented out print statements
|
citeseerx_citation_network/citations.py
|
citeseerx_citation_network/citations.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Module that defines citations search results from an article"""
import re
import math
import random
import time
import requests
from bs4 import BeautifulSoup
from progressbar import ProgressBar
import permalink
class Citations(permalink.DigitalObjectIdentifier):
"""Citations results from an Article
"""
def __init__(self, article=None, doi=None, url=None,
base_url='http://citeseerx.ist.psu.edu/showciting?doi='):
if article is not None:
assert isinstance(article, object),\
"article param needs to be an object, currently {}".\
format(str(type(article)))
self.article = article
self.doi = article.doi
self.url = url
self.base_url = base_url
else:
super(Citations, self).__init__(doi=doi, url=url,
base_url=base_url)
def _get_result_info_soup(self, find_index=0):
return(self.soup.find_all('div', id='result_info')[find_index])
def _get_results_info_soup_clean(self, result_info_soup):
stripped = result_info_soup.getText().strip()
single_white_space = re.sub('\s+', ' ', stripped)
match = re.match('^Results\s\d+\s-\s\d+\sof\s\d+', single_white_space)
assert match is not None
return(single_white_space)
def _get_page_soup_text(self, url):
if url is None:
r = requests.get(self.url)
else:
r = requests.get(url)
data = r.text
return(BeautifulSoup(data))
def get_all_result_soup(self, save_to=None, list_append=True,
base_result_page_url=None,
base_search_result_url='&sort=cite&start=',
max_pause=120):
"""Iteratively get search result pages
Iteratevely gets the search result pages, by default it will just
print the values to screen and be stored in a variable.
If a CSV or Database handler is passed, it will append to the
file or database at the end of each iteration. This could potentially
be used to save work as the loop progresses
:param save_to: where to save results to, default is None, which will
only save values to a python list.
:type save_to: str
:param list_append: Whether the values should be stored in a list.
:type list_append: bool
:param base_result_page_url: base url of page results. Defaults to
appending the appropriate suffix to self.url (which contains the doi)
:type base_result_page_url: str
:param base_search_ressult_url: appended to the base_result_page_url
:type base_search_result_url: str
:param max_pause: maximum number of seconds to pause between results.
Defaults to 120 seconds (2 minutes)
:type max_pause: int
"""
if base_result_page_url is None:
base_result_page_url = self.url + base_search_result_url
else:
base_result_page_url = base_result_page_url + \
base_search_result_url
assert self.num_page_results.is_integer(),\
'self.num_page_results is not a whole number'
if list_append is True:
list_of_result_soup = []
pbar = ProgressBar()
for page in pbar(range(int(self.num_page_results) + 1)[:3]):
start_citation = page * 10
page_url = base_result_page_url + str(start_citation)
# print(page_url)
list_of_result_soup.\
append(self.get_page_soup(url=page_url, return_method='str'))
# print(list_of_result_soup)
pause_time = random.randrange(max_pause) + random.random()
# print('Pausing for {} seconds.'.format(str(pause_time)))
time.sleep(pause_time)
if list_append is True:
self.list_all_result_page_soup = list_of_result_soup
return(self)
def get_num_results(self, result_info=None,
split1=' of ', idx1=1,
split2=' ', idx2=0):
if result_info is None:
result_info = self.result_info
num_results = result_info.\
split(split1)[idx1].\
split(split2)[idx2]
self.num_results = int(num_results)
print(num_results)
return(self)
def get_num_page_results(self, num_results=None, num_results_per_page=10,
offset=1):
"""Get the number of search page results from a given number of results
:param num_results: number of results from a search, defaults to None
because self.num_results typically needs to be caculated first
:type num_results: int
:param num_results_per_page: number of results per page, default to 10
:type num_results_per_page: int
:param offset: offset value for number of page result calculation.
Default is 1.
:type offset: int
:returns: self
The offset value exists because citeseerx does not count by page
results, but rather it shows the result number to start from.
if the num_results_per_page is 10, then the second page will show
10. This offset is so that we can multiply the num_page_results
value by 10, and know which result number will be on the last page.
"""
if num_results is None:
num_results = self.num_results
self.num_page_results = \
math.ceil(num_results / float(num_results_per_page)) - offset
return(self)
def get_result_info(self, find_index=0):
"""Get the number of results
"""
assert self.soup is not None, "self.soup has a value of None"
result_info_soup = self._get_result_info_soup(find_index)
result_info_soup_clean = self.\
_get_results_info_soup_clean(result_info_soup)
self.result_info = result_info_soup_clean
return(self)
def get_page_soup(self, url=None, return_method='self'):
"""Get the HTML soup of the article
Depending on return_method, the soup would either be returned
as a string, or set to self.soup and return self
If no url is passed, then the relevent class variable will be used
:param url: url to get soup from, defaults to None
:type url: str
:param return_method: how to set the soup - 'self' or 'str'
:type return_method: str
:returns: self
"""
if return_method == 'self':
self.soup = self._get_page_soup_text(url)
return(self)
if return_method == 'str':
return(self._get_page_soup_text(url))
|
Python
| 0.000001
|
@@ -3608,38 +3608,8 @@
on)%0A
- # print(page_url)%0A
@@ -3638,16 +3638,16 @@
_soup.%5C%0A
+
@@ -3720,49 +3720,8 @@
'))%0A
- # print(list_of_result_soup)%0A
@@ -3791,79 +3791,8 @@
m()%0A
- # print('Pausing for %7B%7D seconds.'.format(str(pause_time)))%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.