commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
d9a9cb9004ddc20d92441df50d3a0f73432803bb
|
Remove import only used for debugging
|
scripts/mvf_read_benchmark.py
|
scripts/mvf_read_benchmark.py
|
#!/usr/bin/env python
from __future__ import print_function, division, absolute_import
from builtins import range
import argparse
import logging
import time
import dask
import katdal
from katdal.lazy_indexer import DaskLazyIndexer
import numpy as np
parser = argparse.ArgumentParser()
parser.add_argument('filename')
parser.add_argument('--time', type=int, default=10, help='Number of times to read per batch')
parser.add_argument('--channels', type=int, help='Number of channels to read')
parser.add_argument('--dumps', type=int, help='Number of times to read')
parser.add_argument('--joint', action='store_true', help='Load vis, weights, flags together')
parser.add_argument('--applycal', help='Calibration solutions to apply')
args = parser.parse_args()
logging.basicConfig(level='INFO', format='%(asctime)s [%(levelname)s] %(message)s')
logging.info('Starting')
kwargs = {}
if args.applycal is not None:
kwargs['applycal'] = args.applycal
f = katdal.open(args.filename, **kwargs)
logging.info('File loaded, shape %s', f.shape)
if args.channels:
f.select(channels=np.s_[:args.channels])
if args.dumps:
f.select(dumps=np.s_[:args.dumps])
start = time.time()
for st in range(0, f.shape[0], args.time):
et = st + args.time
if args.joint:
vis, weights, flags = DaskLazyIndexer.get([f.vis, f.weights, f.flags], np.s_[st:et])
else:
vis = f.vis[st:et]
weights = f.weights[st:et]
flags = f.flags[st:et]
logging.info('Loaded %d dumps', vis.shape[0])
size = np.product(f.shape) * 10
elapsed = time.time() - start
logging.info('Loaded %d bytes in %.3f s (%.3f MB/s)', size, elapsed, size / elapsed / 1e6)
|
Python
| 0
|
@@ -154,20 +154,8 @@
time
-%0Aimport dask
%0A%0Aim
|
cd4078dd7c23e4c2b8bdd985cf0d65b642d5ab75
|
Remove unnessecary prints
|
blox/compile.py
|
blox/compile.py
|
'''blox/compile.py
Creates an optimized programattically generated template from an html file
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
'''
import json
from blox.base import Blox, Text, UnsafeText
from blox.all import factory
from xml.dom import minidom
from lxml.etree import HTMLParser, parse, fromstring
parser = HTMLParser()
SCRIPT_TEMPLATE = """# WARNING: DON'T EDIT AUTO-GENERATED
from blox.base import Blox, Text, UnsafeText
class Template(Blox):
{indent}__slots__ = tuple({accessors})
def build(factory):
{indent}template = Template()
{indent}{build_steps}
{indent}return template
"""
def from_file(file_object):
return parse(file_object, parser=parser).getroot()
def from_filename(file_name):
return from_file(open(file_name))
def from_string(html):
return fromstring(html)
def to_python(dom, factory=factory, indent=' '):
current = [0]
def increment(element_name=''):
current[0] += 1
return ('{0}{1}'.format(element_name, current[0]), factory.get(element_name))
lines = []
accessors = []
def compile_node(node, parent='template'):
print(node.tag)
if node.tag == 'title':
import ipdb;ipdb.set_trace()
blok_name, blok = increment(node.tag)
lines.append("{0} = {1}(factory('{2}'))".format(blok_name, parent, node.tag))
text = (node.text or "").strip().replace('"', '\\"')
if text:
if hasattr(blok, 'text'):
lines.append('{0}.text = "{1}"'.format(blok_name, text))
else:
lines.append('{0}(Text("{1}"))'.format(blok_name, text))
for attribute_name, attribute_value in node.items():
lines.append('{0}["{1}"] = "{2}"'.format(blok_name, attribute_name.replace('"', '\\"'),
attribute_value.replace('"', '\\"')))
if attribute_name == 'accessor':
accessors.append(attribute_value)
lines.apppend('{0}.{1} = {2}'.format(parent, attribute_value, blok_name))
for child_node in node:
print(node.tag + " > " + child_node.tag)
if child_node.tag in getattr(blok, 'blok_attributes', {}):
attached_child = "{0}.{1}".format(blok_name, blok.blok_attributes[child_node.tag].name)
for nested_child_node in child_node:
compile_node(nested_child_node, parent=attached_child)
attached_text = (child_node.text or "").strip().replace('"', '\\"')
if attached_text:
if 'text' in dir(blok.blok_attributes[child_node.tag].type):
lines.append('{0}.text = "{1}"'.format(attached_child, attached_text))
else:
lines.append('{0}(Text("{1}"))'.format(attached_child, attached_text))
else:
compile_node(child_node, parent=blok_name)
tail = (child_node.tail or "").strip().replace('"', '\\"')
if tail:
lines.append('{0}(Text("{1}"))'.format(blok_name, tail))
compile_node(dom)
return SCRIPT_TEMPLATE.format(accessors=json.dumps(accessors),
build_steps="\n{indent}".join(lines).format(indent=indent),
indent=indent)
|
Python
| 0.000002
|
@@ -2132,32 +2132,8 @@
'):%0A
- print(node.tag)%0A
@@ -3089,61 +3089,8 @@
de:%0A
- print(node.tag + %22 %3E %22 + child_node.tag)%0A
|
3a3332074c6e753ee6a52d1952a5a45f73e85b20
|
Fix instant test on Mac: use cgi.parse_qs() instead of urlparse.urlparse()
|
chrome/test/functional/instant.py
|
chrome/test/functional/instant.py
|
#!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import pyauto_functional # Must be imported before pyauto
import pyauto
import urlparse
class InstantTest(pyauto.PyUITest):
"""TestCase for Omnibox Instant feature."""
def setUp(self):
pyauto.PyUITest.setUp(self)
self.SetPrefs(pyauto.kInstantEnabled, True)
def _DoneLoading(self):
info = self.GetInstantInfo()
return info.get('current') and not info.get('loading')
def _DoneLoadingGoogleQuery(self, query):
"""Wait for Omnibox Instant to load Google search result
and verify location URL contains the specifed query.
Args:
query: Value of query parameter.
E.g., http://www.google.com?q=hi so query is 'hi'.
"""
self.assertTrue(self.WaitUntil(self._DoneLoading))
location = self.GetInstantInfo().get('location')
if location is not None:
q = urlparse.parse_qs(location).get('q')
if q is not None and query in q:
return True
return False
def testInstantNavigation(self):
"""Test that instant navigates based on omnibox input."""
self.SetOmniboxText('google.com')
self.assertTrue(self.WaitUntil(self._DoneLoading))
location = self.GetInstantInfo()['location']
self.assertTrue('google.com' in location,
msg='No google.com in %s' % location)
self.SetOmniboxText('google.es')
self.assertTrue(self.WaitUntil(self._DoneLoading))
location = self.GetInstantInfo()['location']
self.assertTrue('google.es' in location,
msg='No google.es in %s' % location)
# Initiate instant search (at default google.com).
self.SetOmniboxText('chrome instant')
self.assertTrue(self.WaitUntil(self._DoneLoading))
location = self.GetInstantInfo()['location']
self.assertTrue('google.com' in location,
msg='No google.com in %s' % location)
def testInstantDisabledInIncognito(self):
"""Test that instant is disabled in Incognito mode."""
self.RunCommand(pyauto.IDC_NEW_INCOGNITO_WINDOW)
self.SetOmniboxText('google.com', windex=1)
self.assertFalse(self.GetInstantInfo()['active'],
'Instant enabled in Incognito mode.')
def testInstantOverlayNotStoredInHistory(self):
"""Test that instant overlay page is not stored in history."""
url = self.GetFileURLForPath(os.path.join(self.DataDir(), 'title2.html'))
self.SetOmniboxText(url)
self.assertTrue(self.WaitUntil(self._DoneLoading))
history = self.GetHistoryInfo().History()
self.assertEqual(0, len(history))
def testInstantDisabledForJavaScript(self):
"""Test that instant is disabled for javascript URLs."""
self.SetOmniboxText('javascript:')
self.assertFalse(self.GetInstantInfo()['active'],
'Instant enabled for javascript URL.')
def testInstantDisablesPopupsOnPrefetch(self):
"""Test that instant disables popups when prefetching."""
file_url = self.GetFileURLForPath(os.path.join(
self.DataDir(), 'popup_blocker', 'popup-blocked-to-post-blank.html'))
self.SetOmniboxText(file_url)
self.assertTrue(self.WaitUntil(self._DoneLoading))
location = self.GetInstantInfo()['location']
self.assertTrue(file_url in location,
msg='Prefetched page is not %s' % file_url)
blocked_popups = self.GetBlockedPopupsInfo()
self.assertEqual(0, len(blocked_popups),
msg='Unexpected popup in instant preview.')
def testInstantLoadsFor100CharsLongQuery(self):
"""Test that instant loads for search query of 100 characters."""
query = '#' * 100
self.SetOmniboxText(query)
self.assertTrue(self.WaitUntil(self._DoneLoadingGoogleQuery, args=[query]))
if __name__ == '__main__':
pyauto_functional.Main()
|
Python
| 0.999627
|
@@ -179,16 +179,27 @@
file.%0A%0A
+import cgi%0A
import o
@@ -278,24 +278,8 @@
uto%0A
-import urlparse%0A
%0A%0Acl
@@ -1025,16 +1025,11 @@
q =
-urlparse
+cgi
.par
|
8f148a6c41480792ea517eb7ba650e111a31bb34
|
use already condtructed tender
|
src/openprocurement/api/traversal.py
|
src/openprocurement/api/traversal.py
|
# -*- coding: utf-8 -*-
from openprocurement.api.models import Tender
from pyramid.security import (
ALL_PERMISSIONS,
Allow,
Deny,
Everyone,
)
from openprocurement.api.utils import error_handler
class Root(object):
__name__ = None
__parent__ = None
__acl__ = [
# (Allow, Everyone, ALL_PERMISSIONS),
(Allow, Everyone, 'view_tender'),
(Deny, 'broker05', 'create_bid'),
(Deny, 'broker05', 'create_complaint'),
(Deny, 'broker05', 'create_question'),
(Deny, 'broker05', 'create_tender'),
(Allow, 'g:brokers', 'create_bid'),
#(Allow, 'g:brokers', 'create_complaint'),
(Allow, 'g:brokers', 'create_question'),
(Allow, 'g:brokers', 'create_tender'),
(Allow, 'g:auction', 'auction'),
(Allow, 'g:auction', 'upload_tender_documents'),
(Allow, 'g:chronograph', 'edit_tender'),
(Allow, 'g:Administrator', 'edit_tender'),
(Allow, 'g:Administrator', 'edit_bid'),
(Allow, 'g:admins', ALL_PERMISSIONS),
]
def __init__(self, request):
self.request = request
self.db = request.registry.db
def get_item(parent, key, request, root):
request.validated['{}_id'.format(key)] = request.matchdict['{}_id'.format(key)]
items = [i for i in getattr(parent, '{}s'.format(key), []) if i.id == request.matchdict['{}_id'.format(key)]]
if not items:
request.errors.add('url', '{}_id'.format(key), 'Not Found')
request.errors.status = 404
raise error_handler(request.errors)
else:
if key == 'document':
request.validated['{}s'.format(key)] = items
item = items[-1]
request.validated[key] = item
request.validated['id'] = request.matchdict['{}_id'.format(key)]
item.__parent__ = parent
return item
def factory(request):
request.validated['tender_src'] = {}
root = Root(request)
if not request.matchdict or not request.matchdict.get('tender_id'):
return root
request.validated['tender_id'] = request.matchdict['tender_id']
tender = Tender.load(root.db, request.matchdict['tender_id'])
if not tender:
request.errors.add('url', 'tender_id', 'Not Found')
request.errors.status = 404
raise error_handler(request.errors)
tender.__parent__ = root
request.validated['tender'] = tender
request.validated['tender_status'] = tender.status
if request.method != 'GET':
request.validated['tender_src'] = tender.serialize('plain')
if request.matchdict.get('award_id'):
award = get_item(tender, 'award', request, root)
if request.matchdict.get('complaint_id'):
complaint = get_item(award, 'complaint', request, root)
if request.matchdict.get('document_id'):
return get_item(complaint, 'document', request, root)
else:
return complaint
elif request.matchdict.get('document_id'):
return get_item(award, 'document', request, root)
else:
return award
elif request.matchdict.get('contract_id'):
contract = get_item(tender, 'contract', request, root)
if request.matchdict.get('document_id'):
return get_item(contract, 'document', request, root)
else:
return contract
elif request.matchdict.get('bid_id'):
bid = get_item(tender, 'bid', request, root)
if request.matchdict.get('document_id'):
return get_item(bid, 'document', request, root)
else:
return bid
elif request.matchdict.get('complaint_id'):
complaint = get_item(tender, 'complaint', request, root)
if request.matchdict.get('document_id'):
return get_item(complaint, 'document', request, root)
else:
return complaint
elif request.matchdict.get('cancellation_id'):
cancellation = get_item(tender, 'cancellation', request, root)
if request.matchdict.get('document_id'):
return get_item(cancellation, 'document', request, root)
else:
return cancellation
elif request.matchdict.get('document_id'):
return get_item(tender, 'document', request, root)
elif request.matchdict.get('question_id'):
return get_item(tender, 'question', request, root)
elif request.matchdict.get('lot_id'):
return get_item(tender, 'lot', request, root)
request.validated['id'] = request.matchdict['tender_id']
return tender
|
Python
| 0
|
@@ -2108,60 +2108,23 @@
r =
-Tender.load(root.db, request.matchdict%5B'tender_id'%5D)
+request._tender
%0A
@@ -2275,24 +2275,112 @@
est.errors)%0A
+%0A # temporary check%0A if request.validated%5B'tender_id'%5D != tender.id:%0A 1/0%0A%0A
tender._
|
96539193d7fd88bcee371aec2e52926f0cd033fc
|
rename test_64bit_PE to test_PE
|
contrib/devtools/test-security-check.py
|
contrib/devtools/test-security-check.py
|
#!/usr/bin/env python3
# Copyright (c) 2015-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Test script for security-check.py
'''
import subprocess
import unittest
def write_testcode(filename):
with open(filename, 'w', encoding="utf8") as f:
f.write('''
#include <stdio.h>
int main()
{
printf("the quick brown fox jumps over the lazy god\\n");
return 0;
}
''')
def call_security_check(cc, source, executable, options):
subprocess.check_call([cc,source,'-o',executable] + options)
p = subprocess.Popen(['./security-check.py',executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
(stdout, stderr) = p.communicate()
return (p.returncode, stdout.rstrip())
class TestSecurityChecks(unittest.TestCase):
def test_ELF(self):
source = 'test1.c'
executable = 'test1'
cc = 'gcc'
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE']),
(1, executable+': failed PIE NX RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE']),
(1, executable+': failed PIE RELRO Canary'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE']),
(1, executable+': failed PIE RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE']),
(1, executable+': failed RELRO'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE']),
(0, ''))
def test_64bit_PE(self):
source = 'test1.c'
executable = 'test1.exe'
cc = 'x86_64-w64-mingw32-gcc'
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--no-nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va']), (1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA NX'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--no-dynamicbase','-Wl,--no-high-entropy-va']), (1, executable+': failed DYNAMIC_BASE HIGH_ENTROPY_VA'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--no-high-entropy-va']), (1, executable+': failed HIGH_ENTROPY_VA'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,--nxcompat','-Wl,--dynamicbase','-Wl,--high-entropy-va']), (0, ''))
def test_MACHO(self):
source = 'test1.c'
executable = 'test1'
cc = 'clang'
write_testcode(source)
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace', '-Wl,-allow_stack_execute']),
(1, executable+': failed PIE NOUNDEFS NX'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace']),
(1, executable+': failed PIE NOUNDEFS'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie']),
(1, executable+': failed PIE'))
self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie']),
(0, ''))
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000014
|
@@ -2106,14 +2106,8 @@
est_
-64bit_
PE(s
@@ -2104,32 +2104,32 @@
test_PE(self):%0A
+
source =
@@ -2376,32 +2376,44 @@
h-entropy-va'%5D),
+%0A
(1, executable+
@@ -2588,32 +2588,44 @@
h-entropy-va'%5D),
+%0A
(1, executable+
@@ -2802,16 +2802,28 @@
y-va'%5D),
+%0A
(1, exe
@@ -2992,16 +2992,28 @@
y-va'%5D),
+%0A
(0, '')
|
f44ca7d6a8199465035f90be45177d09da64c973
|
Update python functions for transforming ASTs to functions/Mongo filters
|
app/resonant-laboratory/server/querylang.py
|
app/resonant-laboratory/server/querylang.py
|
"""Utilities for handling query language expressions on the serverside."""
_opfunc = {
'<=': lambda x, y: x <= y,
'<': lambda x, y: x < y,
'>=': lambda x, y: x >= y,
'>': lambda x, y: x > y,
'=': lambda x, y: x == y,
'!=': lambda x, y: x != y
}
def astToFunction(ast):
"""Convert a query language AST to a Python function that implements it."""
operator = ast['operator']
operands = ast['operands']
if operator == 'or':
f0 = astToFunction(operands[0])
f1 = astToFunction(operands[1])
return lambda row: f0(row) or f1(row)
elif operator == 'and':
f0 = astToFunction(operands[0])
f1 = astToFunction(operands[1])
return lambda row: f0(row) and f1(row)
elif operator == 'not':
f = astToFunction(operands)
return lambda row: not f(row)
elif operator == 'in':
field = operands[0]
candidates = operands[1]
return lambda row: field in row and row[field] in candidates
elif operator == 'not in':
field = operands[0]
candidates = operands[1]
return lambda row: field in row and row[field] not in candidates
elif operator in ['<=', '<', '>=', '>', '=', '!=']:
field = operands[0]
value = operands[1]
return lambda row: _opfunc[operator](row[field], value)
_mongo_operators = {
'or': '$or',
'and': '$and',
'in': '$in',
'not in': '$nin',
'<=': '$lte',
'<': '$lt',
'>=': '$gte',
'>': '$gt',
'=': '$eq',
'!=': '$ne'
}
def _astToMongo_helper(ast):
"""Convert a query language AST into an equivalent Mongo filter."""
operator = ast['operator']
operands = ast['operands']
if operator in ['or', 'and']:
left = _astToMongo_helper(operands[0])
right = _astToMongo_helper(operands[1])
return {_mongo_operators[operator]: [left, right]}
elif operator == 'not':
raise TypeError('_astToMongo_helper() cannot operate on an AST with not-nodes.')
elif operator in ['in', 'not in', '<=', '<', '>=', '>', '=', '!=']:
field = operands[0]
value = operands[1]
return {field: {_mongo_operators[operator]: value}}
def _invert(ast):
"""Invert the polarity of a boolean expression."""
operator = ast['operator']
operands = ast['operands']
if operator == 'not':
# To invert a not expression, just remove the not.
return _eliminate_not(operands)
elif operator in ['and', 'or']:
# For and/or expressions, apply DeMorgan's laws.
new_operator = 'and' if operator == 'or' else 'or'
new_operands = map(lambda x: {'operator': 'not', 'operands': x}, operands)
return {'operator': new_operator,
'operands': map(_eliminate_not, new_operands)}
elif operator in ['in', 'not in']:
# For inclusion operators, just switch the one that was being used.
return {'operator': 'in' if operator == 'not in' else 'not in',
'operands': operands}
elif operator[0] in ['<', '>']:
# For comparison operators, flip the operator around.
new_operator = '<' if operator[0] == '>' else '>'
if len(operator) == 1:
new_operator += '='
return {'operator': new_operator,
'operands': operands}
else:
# For equality operators, just switch the operator
return {'operator': '=' if operator == '!=' else '!=',
'operands': operands}
def _eliminate_not(ast):
"""Eliminate all not-nodes in the AST by transforming their contents."""
operator = ast['operator']
operands = ast['operands']
if operator in ['and', 'or']:
# And/or expressions have two boolean operands that must be processed
# recursively.
return {'operator': operator,
'operands': map(_eliminate_not, operands)}
elif operator in ['in', 'not in', '<=', '<', '>=', '>', '=', '!=']:
# Operator expressions that work on constant values stay unchanged.
return ast
else:
# Not expressions lose the not itself and invert the operand.
return _eliminate_not(_invert(operands))
def astToMongo(ast):
"""Run the AST-to-mongo helper function above after converting it to a not-free equivalent AST."""
return _astToMongo_helper(_eliminate_not(ast))
|
Python
| 0
|
@@ -889,33 +889,47 @@
ld = operands%5B0%5D
+%5B'identifier'%5D
%0A
-
candidat
@@ -1065,32 +1065,46 @@
ld = operands%5B0%5D
+%5B'identifier'%5D
%0A candida
@@ -1243,32 +1243,32 @@
%3E', '=', '!='%5D:%0A
-
field =
@@ -1270,32 +1270,46 @@
ld = operands%5B0%5D
+%5B'identifier'%5D
%0A value =
@@ -2157,16 +2157,30 @@
rands%5B0%5D
+%5B'identifier'%5D
%0A
|
873c5e8bf85a8be5a08852134967d29353ed3009
|
Swap ndcms for generic T3 string.
|
examples/simple.py
|
examples/simple.py
|
from lobster import cmssw
from lobster.core import *
storage = StorageConfiguration(
output=[
"hdfs:///store/user/matze/test_shuffle_take29",
"file:///hadoop/store/user/matze/test_shuffle_take29",
"root://ndcms.crc.nd.edu//store/user/matze/test_shuffle_take29",
"srm://T3_US_NotreDame/store/user/matze/test_shuffle_take29",
]
)
processing = Category(
name='processing',
cores=1,
runtime=900,
memory=1000
)
workflows = []
single_mu = Workflow(
label='single_mu',
dataset=cmssw.Dataset(
dataset='/SingleMu/Run2012A-recover-06Aug2012-v1/AOD',
events_per_task=5000
),
category=processing,
pset='slim.py',
publish_label='test',
merge_size='3.5G',
outputs=['output.root']
)
workflows.append(single_mu)
config = Config(
label='shuffle',
workdir='/tmpscratch/users/matze/test_shuffle_take30',
plotdir='/afs/crc.nd.edu/user/m/mwolf3/www/lobster/test_shuffle_take29',
storage=storage,
workflows=workflows,
advanced=AdvancedOptions(log_level=1)
)
|
Python
| 0.000001
|
@@ -247,25 +247,23 @@
t://
-ndcms.crc.nd.edu/
+T3_US_NotreDame
/sto
|
1b3ebcdca591cd0c7ce9aa6966aa2a583bf41090
|
raise chunk size if it will cause us to run out of parts, part 2
|
lib/python/dxpy/bindings/dxfile_functions.py
|
lib/python/dxpy/bindings/dxfile_functions.py
|
'''
Helper Functions
****************
These two functions provide functionality for opening an existing
remote file (read-only) and creating a new remote file (write-only).
Both return a remote file handler that can be treated as a file
descriptor. These two functions are essentially useful aliases for
executing simple download and upload operations between the local and
the remote file systems.
'''
import os
from dxpy.bindings import *
def open_dxfile(dxid, project=None, request_size=DEFAULT_REQUEST_SIZE,
buffer_size=DEFAULT_BUFFER_SIZE):
'''
:param dxid: file ID
:type dxid: string
:rtype: :class:`dxpy.bindings.dxfile.DXFile`
Given the object ID of an uploaded file, this function returns a
remote file handler which can be treated as a read-only file
descriptor.
Example::
with open_dxfile("file-xxxx") as fd:
for line in fd:
...
Note that this is shorthand for::
DXFile(dxid)
'''
return DXFile(dxid, project=project, request_size=request_size, buffer_size=buffer_size)
def new_dxfile(keep_open=False, request_size=DEFAULT_REQUEST_SIZE,
buffer_size=DEFAULT_BUFFER_SIZE, **kwargs):
'''
:param media_type: Internet Media Type (optional)
:type media_type: string
:rtype: :class:`dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Creates a new remote file object that is ready to be written to
and returns a DXFile object which can be treated as a write-only
file descriptor. Other optional parameters available (see
:func:`dxpy.bindings.DXDataObject.new()`).
Example::
with new_dxfile(media_type="application/json") as fd:
fd.write("foo\\n")
Note that this is shorthand for::
dxFile = DXFile()
dxFile.new(**kwargs)
'''
dx_file = DXFile(keep_open=keep_open, request_size=request_size, buffer_size=buffer_size)
dx_file.new(**kwargs)
return dx_file
def slow_download_dxfile(dxid, filename, chunksize=DEFAULT_REQUEST_SIZE, append=False,
**kwargs):
mode = 'ab' if append else 'wb'
with DXFile(dxid) as dxfile:
with open(filename, mode) as fd:
while True:
file_content = dxfile.slow_read(chunksize, **kwargs)
if len(file_content) == 0:
break
fd.write(file_content)
def download_dxfile(dxid, filename, chunksize=DEFAULT_REQUEST_SIZE, append=False,
**kwargs):
'''
:param dxid: Object ID of a file
:type dxid: string
:param filename: Local filename
:type filename: string
:param append: Set to true if the local filename is to be appended to
:type append: boolean
Downloads the remote file with object ID *dxid* and saves it to
*filename*.
Example::
download_dxfile("file-xxxx", "localfilename.fastq")
'''
mode = 'ab' if append else 'wb'
with DXFile(dxid) as dxfile:
with open(filename, mode) as fd:
while True:
file_content = dxfile.read(chunksize, **kwargs)
if len(file_content) == 0:
break
fd.write(file_content)
def upload_local_file(filename=None, file=None, media_type=None, keep_open=False,
wait_on_close=False, **kwargs):
'''
:param filename: Local filename
:type filename: string
:param file: File-like object
:type file: File-like object
:param media_type: Internet Media Type
:type media_type: string
:returns: Remote file handler
:rtype: :class:`dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Uploads *filename* or reads from *file* into a new file object (with media type
*media_type* if given) and returns the associated remote file
handler. In addition, it will set the "name" property of the
remote file to *filename* or to *file.name* (if it exists).
Examples:
dxpy.upload_local_file("/home/ubuntu/reads.fastq.gz")
with open("reads.fastq") as fh:
dxpy.upload_local_file(file=fh)
TODO: Do I want an optional argument to indicate in what size
chunks the file should be uploaded or in how many pieces?
'''
fd = file if filename is None else open(filename, 'rb')
# Prevent exceeding 10K parts limit
buffer_size = DEFAULT_BUFFER_SIZE
try:
file_size = os.fstat(fd.fileno()).st_size
except:
file_size = 0
request_size = max(DEFAULT_REQUEST_SIZE, file_size/10000)
if request_size > DEFAULT_REQUEST_SIZE:
buffer_size = request_size * 4
dxfile = new_dxfile(keep_open=keep_open, media_type=media_type, buffer_size=buffer_size,
request_size=request_size, **kwargs)
creation_kwargs, remaining_kwargs = dxpy.DXDataObject._get_creation_params(kwargs)
while True:
buf = fd.read(dxfile._bufsize)
if len(buf) == 0:
break
dxfile.write(buf, **remaining_kwargs)
if filename is not None:
fd.close()
if not keep_open:
dxfile.close(block=wait_on_close, **remaining_kwargs)
if filename is not None:
dxfile.rename(os.path.basename(filename), **remaining_kwargs)
else:
try:
dxfile.rename(os.path.basename(file.name), **remaining_kwargs)
except AttributeError:
pass
return dxfile
def upload_string(to_upload, media_type=None, keep_open=False,
wait_on_close=False, **kwargs):
"""
:param to_upload: String to upload into a file
:type to_upload: string
:param media_type: Internet Media Type
:type media_type: string
:returns: Remote file handler
:rtype: :class:`dxpy.bindings.dxfile.DXFile`
Additional optional parameters not listed: all those under
:func:`dxpy.bindings.DXDataObject.new`.
Uploads the given string *to_upload* into a new file object (with
media type *media_type* if given) and returns the associated
remote file handler.
"""
dxfile = new_dxfile(media_type=media_type, keep_open=keep_open, **kwargs)
creation_kwargs, remaining_kwargs = dxpy.DXDataObject._get_creation_params(kwargs)
dxfile.write(to_upload, **remaining_kwargs)
dxfile.close(block=wait_on_close, **remaining_kwargs)
return dxfile
|
Python
| 0
|
@@ -4746,13 +4746,12 @@
ize/
-10000
+9999
)%0A
|
6beff62ef9741cfe5ed0443250f5a93d04d74bca
|
Create UserCandidate model
|
packages/grid/backend/grid/api/users/models.py
|
packages/grid/backend/grid/api/users/models.py
|
# stdlib
from typing import Optional
from typing import Union
# third party
from nacl.encoding import HexEncoder
from nacl.signing import SigningKey
from pydantic import BaseModel
from pydantic import EmailStr
class BaseUser(BaseModel):
email: Optional[EmailStr]
name: Optional[str]
role: Union[Optional[int], Optional[str]] # TODO: Should be int in SyftUser
daa_pdf: Optional[bytes] = b""
class Config:
orm_mode = True
class UserCreate(BaseUser):
email: EmailStr
role: str = "Data Scientist"
name: str
password: str
class UserUpdate(BaseUser):
password: Optional[str]
budget: Optional[float]
class User(BaseUser):
id: int
role: Union[int, str] # TODO: This should be int. Perhaps add role_name instead?
budget_spent: Optional[float]
class UserPrivate(User):
private_key: str
def get_signing_key(self) -> SigningKey:
return SigningKey(self.private_key.encode(), encoder=HexEncoder)
class UserSyft(User):
hashed_password: str
salt: str
verify_key: str
|
Python
| 0.000002
|
@@ -641,32 +641,127 @@
tional%5Bfloat%5D%0A%0A%0A
+class UserCandidate(BaseUser):%0A email: EmailStr%0A status: str = %22pending%22%0A name: str%0A%0A%0A
class User(BaseU
|
58f7f964a5f830c21f5fcb6c2c1ab854df7644eb
|
Disable translation processes we currently don't use
|
contrib/devtools/update-translations.py
|
contrib/devtools/update-translations.py
|
#!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Run this script from the root of the repository to update all translations from
transifex.
It will do the following automatically:
- fetch all translations using the tx tool
- post-process them into valid and committable format
- remove invalid control characters
- remove location tags (makes diffs less noisy)
TODO:
- auto-add new translations to the build system according to the translation process
'''
from __future__ import division, print_function
import subprocess
import re
import sys
import os
import io
import xml.etree.ElementTree as ET
# Name of transifex tool
TX = 'tx'
# Name of source language file
SOURCE_LANG = 'bitcoin_en.ts'
# Directory with locale files
LOCALE_DIR = 'src/qt/locale'
def check_at_repository_root():
if not os.path.exists('.git'):
print('No .git directory found')
print('Execute this script at the root of the repository', file=sys.stderr)
exit(1)
def fetch_all_translations():
if subprocess.call([TX, 'pull', '-f']):
print('Error while fetching translations', file=sys.stderr)
exit(1)
def find_format_specifiers(s):
'''Find all format specifiers in a string.'''
pos = 0
specifiers = []
while True:
percent = s.find('%', pos)
if percent < 0:
break
specifiers.append(s[percent+1])
pos = percent+2
return specifiers
def split_format_specifiers(specifiers):
'''Split format specifiers between numeric (Qt) and others (strprintf)'''
numeric = []
other = []
for s in specifiers:
if s in {'1','2','3','4','5','6','7','8','9'}:
numeric.append(s)
else:
other.append(s)
# numeric (Qt) can be present in any order, others (strprintf) must be in specified order
return set(numeric),other
def sanitize_string(s):
'''Sanitize string for printing'''
return s.replace('\n',' ')
def check_format_specifiers(source, translation, errors):
source_f = split_format_specifiers(find_format_specifiers(source))
# assert that no source messages contain both Qt and strprintf format specifiers
# if this fails, go change the source as this is hacky and confusing!
assert(not(source_f[0] and source_f[1]))
try:
translation_f = split_format_specifiers(find_format_specifiers(translation))
except IndexError:
errors.append("Parse error in translation '%s'" % sanitize_string(translation))
return False
else:
if source_f != translation_f:
errors.append("Mismatch between '%s' and '%s'" % (sanitize_string(source), sanitize_string(translation)))
return False
return True
def all_ts_files(suffix=''):
for filename in os.listdir(LOCALE_DIR):
# process only language files, and do not process source language
if not filename.endswith('.ts'+suffix) or filename == SOURCE_LANG+suffix:
continue
if suffix: # remove provided suffix
filename = filename[0:-len(suffix)]
filepath = os.path.join(LOCALE_DIR, filename)
yield(filename, filepath)
FIX_RE = re.compile(b'[\x00-\x09\x0b\x0c\x0e-\x1f]')
def remove_invalid_characters(s):
'''Remove invalid characters from translation string'''
return FIX_RE.sub(b'', s)
# Override cdata escape function to make our output match Qt's (optional, just for cleaner diffs for
# comparison, disable by default)
_orig_escape_cdata = None
def escape_cdata(text):
text = _orig_escape_cdata(text)
text = text.replace("'", ''')
text = text.replace('"', '"')
return text
def postprocess_translations(reduce_diff_hacks=False):
print('Checking and postprocessing...')
if reduce_diff_hacks:
global _orig_escape_cdata
_orig_escape_cdata = ET._escape_cdata
ET._escape_cdata = escape_cdata
for (filename,filepath) in all_ts_files():
os.rename(filepath, filepath+'.orig')
have_errors = False
for (filename,filepath) in all_ts_files('.orig'):
# pre-fixups to cope with transifex output
parser = ET.XMLParser(encoding='utf-8') # need to override encoding because 'utf8' is not understood only 'utf-8'
with open(filepath + '.orig', 'rb') as f:
data = f.read()
# remove control characters; this must be done over the entire file otherwise the XML parser will fail
data = remove_invalid_characters(data)
tree = ET.parse(io.BytesIO(data), parser=parser)
# iterate over all messages in file
root = tree.getroot()
for context in root.findall('context'):
for message in context.findall('message'):
numerus = message.get('numerus') == 'yes'
source = message.find('source').text
translation_node = message.find('translation')
# pick all numerusforms
if numerus:
translations = [i.text for i in translation_node.findall('numerusform')]
else:
translations = [translation_node.text]
for translation in translations:
if translation is None:
continue
errors = []
valid = check_format_specifiers(source, translation, errors)
for error in errors:
print('%s: %s' % (filename, error))
if not valid: # set type to unfinished and clear string if invalid
translation_node.clear()
translation_node.set('type', 'unfinished')
have_errors = True
# Remove location tags
for location in message.findall('location'):
message.remove(location)
# Remove entire message if it is an unfinished translation
if translation_node.get('type') == 'unfinished':
context.remove(message)
# write fixed-up tree
# if diff reduction requested, replace some XML to 'sanitize' to qt formatting
if reduce_diff_hacks:
out = io.BytesIO()
tree.write(out, encoding='utf-8')
out = out.getvalue()
out = out.replace(b' />', b'/>')
with open(filepath, 'wb') as f:
f.write(out)
else:
tree.write(filepath, encoding='utf-8')
return have_errors
if __name__ == '__main__':
check_at_repository_root()
fetch_all_translations()
postprocess_translations()
|
Python
| 0
|
@@ -6088,32 +6088,112 @@
%0A
+ # disable this for dogecoin because we do manual translations%0A #
if translation_
@@ -6234,32 +6234,33 @@
+#
context.remo
@@ -6797,16 +6797,111 @@
ot()%0A
+ # disable the transifex process as long as we sync with bitcoin, but keep postprocessing%0A #
fetch_a
|
569448e616aff2d5bc2b0d262b24e4122b32bf11
|
add sip
|
robustus/detail/install_ros.py
|
robustus/detail/install_ros.py
|
# =============================================================================
# COPYRIGHT 2013 Brain Corporation.
# License under MIT license (see LICENSE file)
# =============================================================================
import logging
import os
from requirement import RequirementException
import shutil
import sys
import platform
from utility import run_shell, add_source_ref
def install(robustus, requirement_specifier, rob_file, ignore_index):
ver, dist = requirement_specifier.version.split('.')
# check distro
if ver != 'hydro':
logging.warn('Robustus is only tested to install ROS hydro.\n'
'Still, it will try to install required distribution "%s"' % requirement_specifier.version)
# install dependencies, may throw
robustus.execute(['install',
'catkin_pkg==0.1.24',
'rosinstall==0.6.30',
'rosinstall_generator==0.1.4',
'wstool==0.0.4',
'empy==3.3.2',
'rosdep==0.10.24'])
def in_cache():
devel_dir = os.path.join(robustus.cache, 'ros-%s' % requirement_specifier.version, 'devel_isolated')
return os.path.isdir(devel_dir)
try:
cwd = os.getcwd()
# create ros cache
ros_cache = os.path.join(robustus.cache, 'ros-%s' % requirement_specifier.version)
if not os.path.isdir(ros_cache):
os.mkdir(ros_cache)
os.chdir(ros_cache)
# build ros if necessary
if not in_cache() and not ignore_index:
rosdep = os.path.join(robustus.env, 'bin/rosdep')
if rosdep is None:
raise RequirementException('Failed to find rosdep')
# add ros package sources
if sys.platform.startswith('linux') and not os.path.isfile('/etc/apt/sources.list.d/ros-latest.list'):
ubuntu_distr = platform.linux_distribution()[2]
os.system('sudo sh -c \'echo "deb http://packages.ros.org/ros/ubuntu %s main"'
' > /etc/apt/sources.list.d/ros-latest.list\'' % ubuntu_distr)
os.system('wget http://packages.ros.org/ros.key -O - | sudo apt-key add -')
os.system('sudo apt-get update')
# init rosdep, rosdep can already be initialized resulting in error, that's ok
os.system('sudo ' + rosdep + ' init')
# update ros dependencies
retcode = run_shell(rosdep + ' update',
verbose=robustus.settings['verbosity'] >= 1)
if retcode != 0:
raise RequirementException('Failed to update ROS dependencies')
# install desktop version of ROS
rosinstall_generator = os.path.join(robustus.env, 'bin/rosinstall_generator')
retcode = run_shell(rosinstall_generator + ' %s --rosdistro %s' % (dist, ver)
+ ' --deps --wet-only > %s-%s-wet.rosinstall' % (dist, ver),
verbose=robustus.settings['verbosity'] >= 1)
if retcode != 0:
raise RequirementException('Failed to generate rosinstall file')
wstool = os.path.join(robustus.env, 'bin/wstool')
retcode = run_shell(wstool + ' init -j8 src %s-%s-wet.rosinstall' % (dist, ver),
verbose=robustus.settings['verbosity'] >= 1)
if retcode != 0:
raise RequirementException('Failed to build ROS')
# resolve dependencies
retcode = run_shell(rosdep + ' install -r --from-paths src --ignore-src --rosdistro %s -y' % ver,
verbose=robustus.settings['verbosity'] >= 1)
if retcode != 0:
if platform.machine() == 'armv7l':
# Due to the lack of LISP machine for ARM we expect some failures
logging.info("No LISP on ARM. Expected not all dependencies to be installed.")
else:
raise RequirementException('Failed to resolve ROS dependencies')
# create catkin workspace
rosdir = os.path.join(robustus.env, 'ros')
py_activate_file = os.path.join(robustus.env, 'bin', 'activate')
catkin_make_isolated = os.path.join(ros_cache, 'src/catkin/bin/catkin_make_isolated')
retcode = run_shell('. ' + py_activate_file + ' && ' +
catkin_make_isolated + ' --install-space %s --install' % rosdir,
verbose=robustus.settings['verbosity'] >= 1)
if retcode != 0:
raise RequirementException('Failed to create catkin workspace for ROS')
os.chdir(cwd)
# Add ROS settings to activate file
add_source_ref(robustus, os.path.join(robustus.env, 'ros', 'setup.sh'))
except RequirementException:
os.chdir(cwd)
if robustus.settings['debug']:
logging.info('Not removing folder %s due to debug flag.' % ros_cache)
else:
shutil.rmtree(ros_cache)
raise
|
Python
| 0.000038
|
@@ -1079,16 +1079,45 @@
0.10.24'
+,%0A 'sip'
%5D)%0A%0A
|
40cbe842a7c3d596bbe07d84666d2146e9d0698b
|
Update or delete max 100 of 300 random domain names.
|
domains/views.py
|
domains/views.py
|
import random
from datetime import datetime
from google.appengine.ext import db
from google.appengine.ext.db import stats
from django.http import HttpResponseRedirect
from ragendja.template import render_to_response
from ragendja.dbutils import get_object_or_404
from domains.models import MAX_NAME_LENGTH, DOMAIN_CHARS, OBSOLETE_ATTRIBUTES
from domains.models import Domain
from domains.utils import random_domains
def index(request):
# Display list of recent names.
newest = Domain.all().order('-timestamp').fetch(10)
oldest = Domain.all().order('timestamp').fetch(5)
oldest.reverse()
domain_list = newest + [''] + oldest
# Recent statistics.
domain_stats = stats.KindStat.all().filter('kind_name', 'domains_domain')
domain_stats = domain_stats.order('-timestamp').fetch(3)
return render_to_response(request, 'domains/index.html', locals())
def detail(request, key_name):
name = get_object_or_404(Name, key_name=key_name)
return render_to_response(request, 'domains/detail.html', locals())
def cron(request):
updated_domains = []
deleted_domains = []
query, update_description = random_domains(
length_choices=[MAX_NAME_LENGTH])
domains = query.fetch(200)
count_random = len(domains)
count_obsolete = 0
count_languages = 0
for domain in domains:
if len(domain.key().name()) > MAX_NAME_LENGTH:
deleted_domains.append(domain)
continue
updated = False
for attr in OBSOLETE_ATTRIBUTES:
if hasattr(domain, attr):
delattr(domain, attr)
updated = True
if updated:
count_obsolete += 1
if (not hasattr(domain, 'english') or domain.english is None or
not hasattr(domain, 'spanish') or domain.spanish is None or
not hasattr(domain, 'french') or domain.french is None or
not hasattr(domain, 'german') or domain.german is None):
domain.update_languages()
count_languages += 1
updated = True
if (len(domain.key().name()) > 6 and
domain.english == 0 and domain.spanish == 0 and
domain.french == 0 and domain.german == 0):
deleted_domains.append(domain)
continue
if updated:
domain.timestamp = datetime.now()
updated_domains.append(domain)
db.put(updated_domains)
db.delete(deleted_domains)
count_updated = len(updated_domains)
count_deleted = len(deleted_domains)
domain_list = updated_domains[:10] + [None] + deleted_domains[:10]
return render_to_response(request, 'domains/index.html', locals())
|
Python
| 0
|
@@ -944,12 +944,14 @@
404(
-Name
+Domain
, ke
@@ -1230,9 +1230,9 @@
tch(
-2
+3
00)%0A
@@ -1337,16 +1337,101 @@
omains:%0A
+ if max(len(updated_domains), len(deleted_domains)) %3E= 100:%0A break%0A
|
1658466215a18504992748f430fe0747a901bd40
|
Move ALLOWED_METRIC_SCRAPE_IPS above secret.py import
|
web3/settings/__init__.py
|
web3/settings/__init__.py
|
"""
Django settings for web3 project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
PROJECT_ROOT = BASE_DIR
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 's-gh_-#s^oq^0*5=y8k&*^l8m9540mvo@m*tazzw%3*o7$y&m0'
ALLOWED_HOSTS = ["director.tjhsst.edu", "127.0.0.1", "localhost"]
# This is the path to the certificate that is used to authenticate with the conductor agent.
CONDUCTOR_CERT_PATH = os.path.join(PROJECT_ROOT, "settings/conductor.pem")
# Maximum number of machines that a non-staff, non-superuser account can create.
MAX_VMS = 5
# If this is set, the variable appears on all pages on Director.
GLOBAL_WARNING = None
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# Default database password used in development.
DB_USERNAME = "web3"
DB_PASSWORD = "web3"
# Default project domain, subdomains are used for project websites.
PROJECT_DOMAIN = "tjhsst.io"
# Enables existing accounts to login with a username and password.
# Default to not allow password authentication.
PASSWORD_AUTH = False
# Let's Encrypt Settings
LE_WEBROOT = "/var/www/certbot/"
try:
from .secret import * # noqa
except ImportError:
pass
if "TRAVIS" in os.environ:
DB_USERNAME = "postgres"
DB_PASSWORD = ""
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_extensions',
'social_django',
'simple_history',
'web3',
'web3.apps.auth',
'web3.apps.sites',
'web3.apps.users',
'web3.apps.vms',
'web3.apps.feedback',
'web3.apps.request',
'web3.apps.docs'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'simple_history.middleware.HistoryRequestMiddleware'
]
ROOT_URLCONF = 'web3.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, "templates")
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social_django.context_processors.backends',
'social_django.context_processors.login_redirect',
'web3.apps.context_processors.email'
],
},
},
]
if DEBUG:
INSTALLED_APPS += ('debug_toolbar',)
MIDDLEWARE += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
else:
INSTALLED_APPS += ('raven.contrib.django.raven_compat',)
WSGI_APPLICATION = 'web3.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
"default": {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'web3',
'USER': DB_USERNAME,
'PASSWORD': DB_PASSWORD,
'HOST': '127.0.0.1',
'PORT': '5432'
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'US/Eastern'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, 'static/'),
)
SOCIAL_AUTH_PIPELINE = (
'social_core.pipeline.social_auth.social_details',
'social_core.pipeline.social_auth.social_uid',
'social_core.pipeline.social_auth.auth_allowed',
'social_core.pipeline.social_auth.social_user',
'web3.apps.auth.oauth.get_username',
'social_core.pipeline.social_auth.associate_by_email',
'social_core.pipeline.user.create_user',
'social_core.pipeline.social_auth.associate_user',
'social_core.pipeline.social_auth.load_extra_data',
'web3.apps.auth.oauth.create_user_group',
'web3.apps.auth.oauth.add_to_global_group'
)
AUTHENTICATION_BACKENDS = (
'web3.apps.auth.backends.PAMAuthenticationBackend',
'web3.apps.auth.oauth.IonOauth2',
)
AUTH_USER_MODEL = "users.User"
SOCIAL_AUTH_USER_FIELDS = ['username', 'full_name', 'email', 'id', 'is_superuser', 'is_staff']
SOCIAL_AUTH_URL_NAMESPACE = 'social'
LOGIN_URL = '/login/'
LOGIN_REDIRECT_URL = '/'
LOGIN_ERROR_URL = '/login/'
INTERNAL_IPS = ['127.0.0.1']
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = "mail.tjhsst.edu"
EMAIL_PORT = 25
EMAIL_USE_TLS = False # FIXME: use tls
EMAIL_SUBJECT_PREFIX = "[Director]"
EMAIL_FROM = "director-noreply@tjhsst.edu"
EMAIL_FEEDBACK = "director@lists.tjhsst.edu"
EMAIL_CONTACT = "sysadmins@tjhsst.edu"
ALLOWED_METRIC_SCRAPE_IPS = []
|
Python
| 0
|
@@ -1639,16 +1639,48 @@
tbot/%22%0A%0A
+ALLOWED_METRIC_SCRAPE_IPS = %5B%5D%0A%0A
try:%0A
@@ -6264,36 +6264,4 @@
du%22%0A
-%0AALLOWED_METRIC_SCRAPE_IPS = %5B%5D%0A
|
60886a34f0a35088fb4e90caddbff7f905c0a53c
|
set TEST_TMPDIR inside test directory
|
build-support/run_dist_test.py
|
build-support/run_dist_test.py
|
#!/usr/bin/env python2
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# This script runs on the distributed-test slave and acts
# as a wrapper around run-test.sh.
#
# The distributed testing system can't pass in environment variables
# to commands, so this takes some parameters, turns them into environment
# variables, and then executes the test wrapper.
#
# We also 'cat' the test log upon completion so that the test logs are
# uploaded by the test slave back.
import optparse
import os
import re
import shutil
import subprocess
import sys
ME = os.path.abspath(__file__)
ROOT = os.path.abspath(os.path.join(os.path.dirname(ME), ".."))
def is_elf_binary(path):
""" Determine if the given path is an ELF binary (executable or shared library) """
if not os.path.isfile(path) or os.path.islink(path):
return False
try:
with file(path, "rb") as f:
magic = f.read(4)
return magic == "\x7fELF"
except:
# Ignore unreadable files
return False
def fix_rpath_component(bin_path, path):
"""
Given an RPATH component 'path' of the binary located at 'bin_path',
fix the thirdparty dir to be relative to the binary rather than absolute.
"""
rel_tp = os.path.relpath(os.path.join(ROOT, "thirdparty/"),
os.path.dirname(bin_path))
path = re.sub(r".*thirdparty/", "$ORIGIN/"+rel_tp + "/", path)
return path
def fix_rpath(path):
"""
Fix the RPATH/RUNPATH of the binary located at 'path' so that
the thirdparty/ directory is properly found, even though we will
run the binary at a different path than it was originally built.
"""
# Fetch the original rpath.
p = subprocess.Popen(["chrpath", path],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode != 0:
return
rpath = re.search("R(?:UN)?PATH=(.+)", stdout.strip()).group(1)
# Fix it to be relative.
new_path = ":".join(fix_rpath_component(path, c) for c in rpath.split(":"))
# Write the new rpath back into the binary.
subprocess.check_call(["chrpath", "-r", new_path, path])
def fixup_rpaths(root):
"""
Recursively walk the directory tree 'root' and fix the RPATH for any
ELF files (binaries/libraries) that are found.
"""
for dirpath, dirnames, filenames in os.walk(root):
for f in filenames:
p = os.path.join(dirpath, f)
if is_elf_binary(p):
fix_rpath(p)
def main():
p = optparse.OptionParser(usage="usage: %prog [options] <test-name>")
p.add_option("-e", "--env", dest="env", type="string", action="append",
help="key=value pairs for environment variables",
default=[])
options, args = p.parse_args()
if len(args) < 1:
p.print_help(sys.stderr)
sys.exit(1)
test_exe = args[0]
test_name, _ = os.path.splitext(os.path.basename(test_exe))
test_dir = os.path.dirname(test_exe)
env = os.environ.copy()
for env_pair in options.env:
(k, v) = env_pair.split("=", 1)
env[k] = v
# Fix the RPATHs of any binaries. During the build, we end up with
# absolute paths from the build machine. This fixes the paths to be
# binary-relative so that we can run it on the new location.
#
# It's important to do this rather than just putting all of the thirdparty
# lib directories into $LD_LIBRARY_PATH below because we need to make sure
# that non-TSAN-instrumented runtime tools (like 'llvm-symbolizer') do _NOT_
# pick up the TSAN-instrumented libraries, whereas TSAN-instrumented test
# binaries (like 'foo_test' or 'kudu-tserver') _DO_ pick them up.
fixup_rpaths(os.path.join(ROOT, "build"))
fixup_rpaths(os.path.join(ROOT, "thirdparty"))
env['LD_LIBRARY_PATH'] = ":".join(
[os.path.join(ROOT, "build/dist-test-system-libs/"),
os.path.abspath(os.path.join(test_dir, "..", "lib"))])
# GTEST_OUTPUT must be canonicalized and have a trailing slash for gtest to
# properly interpret it as a directory.
env['GTEST_OUTPUT'] = 'xml:' + os.path.abspath(
os.path.join(test_dir, "..", "test-logs")) + '/'
env['ASAN_SYMBOLIZER_PATH'] = os.path.join(ROOT, "thirdparty/installed/uninstrumented/bin/llvm-symbolizer")
rc = subprocess.call([os.path.join(ROOT, "build-support/run-test.sh")] + args,
env=env)
sys.exit(rc)
if __name__ == "__main__":
main()
|
Python
| 0.002721
|
@@ -4806,16 +4806,276 @@
+ '/'%0A%0A
+ # Don't pollute /tmp in dist-test setting. If a test crashes, the dist-test slave%0A # will clear up our working directory but won't be able to find and clean up things%0A # left in /tmp.%0A env%5B'TEST_TMPDIR'%5D = os.path.abspath(os.path.join(ROOT, %22test-tmp%22))%0A%0A
env%5B'A
|
7db2f2f9124fd82bbcaf8eabea9ff57306796f58
|
Fix relative path to .gitignore and other minor changes.
|
build/extra_gitignore.py
|
build/extra_gitignore.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
""" Adds extra patterns to the root .gitignore file.
Reads the contents of the filename given as the first argument and appends
them to the root .gitignore file. The new entires are intended to be additional
ignoring patterns, or negating patterns to override existing entries (man
gitignore for more details).
"""
import os
import sys
MODIFY_STRING = '# The following added by %s\n'
def main(argv):
if not argv[1]:
# Special case; do nothing.
return 0
modify_string = (MODIFY_STRING % argv[0])
gitignore_file = os.path.dirname(argv[0]) + '/../.gitignore'
lines = open(gitignore_file, 'r').readlines()
for i, line in enumerate(lines):
if line == modify_string:
lines = lines[:i]
break
lines.append(modify_string)
f = open(gitignore_file, 'w')
f.write(''.join(lines))
f.write(open(argv[1], 'r').read())
f.close()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Python
| 0.00011
|
@@ -797,16 +797,17 @@
%25s%5Cn'%0A%0A
+%0A
def main
@@ -896,17 +896,16 @@
tring =
-(
MODIFY_S
@@ -919,17 +919,16 @@
argv%5B0%5D
-)
%0A gitig
@@ -972,16 +972,19 @@
+ '/../.
+./.
gitignor
@@ -1069,16 +1069,124 @@
lines):%0A
+ # Look for modify_string in the file to ensure we don't append the extra%0A # patterns more than once.%0A
if l
|
b0e71834d1119c2b58624d0e9d674e4aa9e70cd6
|
add option '--exclude-filenames'
|
python/generate_sugar_files.py
|
python/generate_sugar_files.py
|
#!/usr/bin/env python3
# Copyright (c) 2013, 2015, Ruslan Baratov
# All rights reserved.
import argparse
import os
import re
import sys
wiki = 'https://github.com/ruslo/sugar/wiki/Collecting-sources'
base = os.path.basename(__file__)
def first_is_subdirectory_of_second(subdir_name, dir_name):
subdir_name = subdir_name.rstrip(os.sep)
dir_name = dir_name.rstrip(os.sep)
if subdir_name == dir_name:
return True
if not subdir_name.startswith(dir_name):
return False
rest = subdir_name[len(dir_name):]
if rest.startswith(os.sep):
return True
return False
class Generator:
def __init__(self):
self.parser = argparse.ArgumentParser(
description='Generate sugar.cmake files according to directory struct'
)
self.exclude_dirs = []
def parse(self):
self.parser.add_argument(
'--top',
type=str,
required=True,
help='top directory of sources'
)
self.parser.add_argument(
'--var',
type=str,
required=True,
help='variable name'
)
self.parser.add_argument(
'--exclude-dirs',
type=str,
nargs='*',
help='Ignore this directories'
)
def make_header_guard(dir):
dir = dir.upper()
dir = re.sub(r'\W', '_', dir)
dir = re.sub('_+', '_', dir)
dir = dir.lstrip('_')
dir = dir.rstrip('_')
dir += '_'
return dir
def process_file(relative, source_variable, file_id, filelist, dirlist):
file_id.write(
'# This file generated automatically by:\n'
'# {}\n'
'# see wiki for more info:\n'
'# {}\n\n'.format(base, wiki)
)
relative += '/sugar.cmake'
hg = Generator.make_header_guard(relative)
file_id.write(
'if(DEFINED {})\n'
' return()\n'
'else()\n'
' set({} 1)\n'
'endif()\n\n'.format(hg, hg)
)
if filelist:
file_id.write('include(sugar_files)\n')
if dirlist:
file_id.write('include(sugar_include)\n')
if filelist or dirlist:
file_id.write('\n')
if dirlist:
for x in dirlist:
file_id.write("sugar_include({})\n".format(x))
file_id.write('\n')
if filelist:
file_id.write("sugar_files(\n")
file_id.write(" {}\n".format(source_variable))
for x in filelist:
file_id.write(" {}\n".format(x))
file_id.write(")\n")
def is_excluded(self, dir_name):
for x in self.exclude_dirs:
if first_is_subdirectory_of_second(dir_name, x):
return True
return False
def create(self):
args = self.parser.parse_args()
cwd = os.getcwd()
for x in args.exclude_dirs:
x_abs = os.path.abspath(x)
if not os.path.exists(x_abs):
sys.exit('Path `{}` not exists'.format(x_abs))
self.exclude_dirs.append(x_abs)
source_variable = args.var
for rootdir, dirlist, filelist in os.walk(args.top):
try:
filelist.remove('sugar.cmake')
except ValueError:
pass # ignore if not in list
try:
filelist.remove('CMakeLists.txt')
except ValueError:
pass # ignore if not in list
rootdir = os.path.abspath(rootdir)
if self.is_excluded(rootdir):
continue
new_dirlist = []
for x in dirlist:
x_abs = os.path.join(rootdir, x)
if not self.is_excluded(x_abs):
new_dirlist.append(x_abs)
relative = os.path.relpath(rootdir, cwd)
with open('{}/sugar.cmake'.format(rootdir), 'w') as file_id:
Generator.process_file(
relative, source_variable, file_id, filelist, new_dirlist
)
def run():
generator = Generator()
generator.parse()
generator.create()
if __name__ == '__main__':
Generator.run()
|
Python
| 0.001368
|
@@ -1184,24 +1184,166 @@
ies'%0A )%0A%0A
+ self.parser.add_argument(%0A '--exclude-filenames',%0A type=str,%0A nargs='*',%0A help='Ignore this filenames'%0A )%0A%0A
def make_h
@@ -2968,204 +2968,320 @@
-source_variable = args.var%0A for rootdir, dirlist, filelist in os.walk(args.top):%0A try:%0A filelist.remove('sugar.cmake')%0A except ValueError:%0A pass # ignore if not in list%0A
+if args.exclude_filenames:%0A exclude_filenames = args.exclude_filenames%0A else:%0A exclude_filenames = %5B%5D%0A exclude_filenames += %5B'sugar.cmake', 'CMakeLists.txt', '.DS_Store'%5D%0A%0A source_variable = args.var%0A for rootdir, dirlist, filelist in os.walk(args.top):%0A for x in exclude_filenames:%0A
@@ -3287,16 +3287,18 @@
try:%0A
+
@@ -3317,26 +3317,13 @@
ove(
-'CMakeLists.txt')%0A
+x)%0A
@@ -3343,16 +3343,18 @@
eError:%0A
+
@@ -3633,28 +3633,24 @@
ist.append(x
-_abs
)%0A%0A rel
|
e3d195f0e828f135d29bbd3a7f1a1ff748a3ebc7
|
Implement expm approximation.
|
m_layer/m_layer.py
|
m_layer/m_layer.py
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# coding=utf-8
"""Code for creating the M-layer as a keras layer."""
import tensorflow as tf
class MLayer(tf.keras.layers.Layer):
"""The M-layer: Lie Algebra generator-embedding and matrix exponentiation.
This is a Keras implementation of the M-layer described in (2020)[1].
#### References
[1]: Thomas Fischbacher, Iulia M. Comsa, Krzysztof Potempa, Moritz Firsching,
Luca Versari, Jyrki Alakuijala "Intelligent Matrix Exponentiation", ICML 2020.
TODO(firsching): add link to paper.
"""
def __init__(self, dim_m, matrix_init=None, with_bias=False, **kwargs):
"""Initializes the instance.
Args:
dim_m: The matrix to be exponentiated in the M-layer has the shape
(dim_m, dim_m).
matrix_init: What initializer to use for the matrix. `None` defaults to
`normal` initalization.
with_bias: Whether a bias should be included in layer after
exponentiation.
**kwargs: keyword arguments passed to the Keras layer base class.
"""
self._dim_m = dim_m
self._rep_to_exp_tensor = None
self._matrix_init = matrix_init or 'normal'
self._with_bias = with_bias
self._matrix_bias = None
super(MLayer, self).__init__(**kwargs)
def build(self, input_shape):
dim_rep = input_shape[-1]
self._rep_to_exp_tensor = self.add_weight(
name='rep_to_exp_tensor',
shape=(dim_rep, self._dim_m, self._dim_m),
initializer=self._matrix_init,
trainable=True)
if self._with_bias:
self._matrix_bias = self.add_weight(
name='matrix_bias',
shape=(1, self._dim_m, self._dim_m),
initializer='uniform',
trainable=True)
super(MLayer, self).build(input_shape)
def call(self, x):
if not self._with_bias:
return tf.linalg.expm(
tf.einsum('amn,...a->...mn', self._rep_to_exp_tensor, x))
return tf.linalg.expm(
tf.einsum('amn,...a->...mn', self._rep_to_exp_tensor, x) +
self._matrix_bias)
def compute_output_shape(self, input_shape):
return input_shape[0], self._dim_m, self._dim_m
def get_config(self):
config = dict(super().get_config())
config['dim_m'] = self._dim_m
config['matrix_init'] = self._matrix_init
config['with_bias'] = self._with_bias
return config
|
Python
| 0.000022
|
@@ -635,17 +635,16 @@
g=utf-8%0A
-%0A
%22%22%22Code
@@ -1153,15 +1153,45 @@
elf,
- dim_m,
+%0A dim_m,%0A
mat
@@ -1204,16 +1204,31 @@
it=None,
+%0A
with_bi
@@ -1236,16 +1236,73 @@
s=False,
+%0A matrix_squarings_exp=None,%0A
**kwarg
@@ -1425,24 +1425,16 @@
hape
-%0A
(dim_m,
dim
@@ -1429,16 +1429,24 @@
(dim_m,
+%0A
dim_m).
@@ -1642,24 +1642,161 @@
nentiation.%0A
+ matrix_squarings_exp: None to compute tf.linalg.expm(M), an integer %60k%60 to%0A instead approximate it with (I+M/2**k)**(2**k).%0A
**kwar
@@ -2027,24 +2027,78 @@
bias = None%0A
+ self._matrix_squarings_exp = matrix_squarings_exp%0A
super(ML
@@ -2694,99 +2694,237 @@
-return
+mat =
tf.
-linalg.expm(%0A tf.einsum('amn,...a-%3E...mn', self._rep_to_exp_tensor, x))%0A
+einsum('amn,...a-%3E...mn', self._rep_to_exp_tensor, x)%0A else:%0A mat = tf.einsum('amn,...a-%3E...mn', self._rep_to_exp_tensor,%0A x) + self._matrix_bias%0A if self._matrix_squarings_exp is None:%0A
@@ -2949,102 +2949,281 @@
xpm(
+mat)
%0A
- tf.einsum('amn,...a-%3E...mn', self._rep_to_exp_tensor, x) +%0A self._matrix_bias)
+# Approximation of exp(mat) as (1+mat/k)**k with k = 2**MATRIX_SQUARINGS_EXP%0A mat = mat * 0.5**self._matrix_squarings_exp + tf.eye(self._dim_m)%0A for _ in range(self.matATRIX_SQUARINGS_EXP):%0A mat = tf.einsum('...ij,...jk-%3E...ik', mat, mat)%0A return mat
%0A%0A
@@ -3502,24 +3502,88 @@
._with_bias%0A
+ config%5B'matrix_squarings_exp'%5D = self._matrix_squarings_exp%0A
return c
|
948a28d74c2eae3744edec60f9fbd2319873f33f
|
Remove unnecessary else after returns
|
dploy/linkcmd.py
|
dploy/linkcmd.py
|
"""
The logic and workings behind the link sub-commands
"""
import dploy.actions as actions
import dploy.utils as utils
import dploy.error as error
import dploy.main as main
# pylint: disable=too-few-public-methods
class Link(main.AbstractBaseSubCommand):
"""
Concrete class implementation of the link sub-command
"""
# pylint: disable=too-many-arguments
def __init__(self, source, dest, is_silent=True, is_dry_run=False, ignore_patterns=None):
super().__init__("link", [source], dest, is_silent, is_dry_run, ignore_patterns)
def _is_valid_input(self, sources, dest):
"""
Check to see if the input is valid
"""
return LinkInput(self.errors, self.subcmd).is_valid(sources, dest)
def _collect_actions(self, source, dest):
"""
Concrete method to collect required actions to perform a link
sub-command
"""
if dest.exists():
if utils.is_same_file(dest, source):
self.actions.add(actions.AlreadyLinked(self.subcmd, source, dest))
else:
self.errors.add(error.ConflictsWithExistingFile(self.subcmd, source, dest))
elif dest.is_symlink():
self.errors.add(error.ConflictsWithExistingLink(self.subcmd, source, dest))
elif not dest.parent.exists():
self.errors.add(error.NoSuchDirectoryToSubcmdInto(self.subcmd, dest.parent))
else:
self.actions.add(actions.SymbolicLink(self.subcmd, source, dest))
class LinkInput(main.Input):
"""
Input validator for the link command
"""
def _is_valid_dest(self, dest):
if not dest.parent.exists():
self.errors.add(error.NoSuchFileOrDirectory(self.subcmd, dest.parent))
return False
elif (not utils.is_file_writable(dest.parent)
or not utils.is_directory_writable(dest.parent)):
self.errors.add(error.InsufficientPermissionsToSubcmdTo(self.subcmd, dest))
return False
else:
return True
def _is_valid_source(self, source):
if not source.exists():
self.errors.add(error.NoSuchFileOrDirectory(self.subcmd, source))
return False
elif (not utils.is_file_readable(source)
or not utils.is_directory_readable(source)):
self.errors.add(error.InsufficientPermissions(self.subcmd, source))
return False
else:
return True
|
Python
| 0.004957
|
@@ -2014,34 +2014,16 @@
False%0A%0A
- else:%0A
@@ -2425,34 +2425,16 @@
False%0A%0A
- else:%0A
|
86429b75bea758627eeef930b604e819089435a7
|
fix missing toUpper for location message
|
yowsup/layers/protocol_media/layer.py
|
yowsup/layers/protocol_media/layer.py
|
from yowsup.layers import YowLayer, YowLayerEvent, YowProtocolLayer
from .protocolentities import ImageDownloadableMediaMessageProtocolEntity
from .protocolentities import LocationMediaMessageProtocolEntity
from .protocolentities import VCardMediaMessageProtocolEntity
class YowMediaProtocolLayer(YowProtocolLayer):
def __init__(self):
handleMap = {
"message": (self.recvMessageStanza, self.sendMessageEntity)
}
super(YowMediaProtocolLayer, self).__init__(handleMap)
def __str__(self):
return "Media Layer"
def sendMessageEntity(self, entity):
if entity.getType() == "media":
self.entityToLower(entity)
###recieved node handlers handlers
def recvMessageStanza(self, node):
if node.getAttributeValue("type") == "media":
mediaNode = node.getChild("media")
if mediaNode.getAttributeValue("type") == "image":
entity = ImageDownloadableMediaMessageProtocolEntity.fromProtocolTreeNode(node)
self.toUpper(entity)
elif mediaNode.getAttributeValue("type") == "location":
entity = LocationMediaMessageProtocolEntity.fromProtocolTreeNode(node)
elif mediaNode.getAttributeValue("type") == "vcard":
entity = VCardMediaMessageProtocolEntity.fromProtocolTreeNode(node)
self.toUpper(entity)
|
Python
| 0.000013
|
@@ -1202,32 +1202,69 @@
lTreeNode(node)%0A
+ self.toUpper(entity)%0A
elif
|
64ed998229c4af8a5c3b0ff19e6afad4e71a9bf4
|
Fix typo
|
src/nodeconductor_saltstack/sharepoint/views.py
|
src/nodeconductor_saltstack/sharepoint/views.py
|
from rest_framework import decorators, exceptions, mixins, response, viewsets, status
from rest_framework.status import HTTP_200_OK
from nodeconductor.core.exceptions import IncorrectStateException
from nodeconductor.structure import views as structure_views
from nodeconductor_saltstack.saltstack.views import track_exceptions
from ..saltstack.backend import SaltStackBackendError
from . import models, serializers, tasks, filters
class TenantViewSet(structure_views.BaseOnlineResourceViewSet):
queryset = models.SharepointTenant.objects.all()
serializer_class = serializers.TenantSerializer
filter_class = filters.TenantFilter
def perform_provision(self, serializer):
user_count = serializer.validated_data.pop('user_count')
storage = serializer.validated_data.pop('storage')
tenant = serializer.save()
tenant.set_quota_limit(tenant.Quotas.user_count, user_count)
tenant.set_quota_limit(tenant.Quotas.storage, storage)
backend = tenant.get_backend()
backend.provision(tenant)
def get_serializer_class(self):
serializer_class = super(TenantViewSet, self).get_serializer_class()
if self.action == 'initialize':
serializer_class = serializers.MainSiteCollectionSerializer
return serializer_class
@decorators.detail_route(methods=['post'])
def initialize(self, request, **kwargs):
tenant = self.get_object()
if tenant.initialization_status != models.SharepointTenant.InitializationStatuses.NOT_INITIALIZED:
raise IncorrectStateException("Tenant must be in not initialized to perform initialization operation.")
# create main site collection
serializer_class = self.get_serializer_class()
serializer = serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
storage = serializer.validated_data.pop('storage')
user = serializer.validated_data['user']
main_site_collection = models.SiteCollection.objects.create(
name='Main', description='Main site collection', **serializer.validated_data)
main_site_collection.set_quota_limit(main_site_collection.Quotas.storage, storage)
# TODO: Understand what templates we should use for admin and users site collections
admin_site_collection = models.SiteCollection.objects.create(
name='Admin', user=user, site_url='admin',
template=models.Template.objects.first(), description='Admin site collection')
admin_site_collection.set_quota_limit(admin_site_collection.Quotas.storage, 100)
users_site_collection = models.SiteCollection.objects.create(
name='Users', user=user, site_url='my',
template=models.Template.objects.first(), description='Users site collection')
users_site_collection.set_quota_limit(users_site_collection.Quotas.storage,
100 * tenant.quotas.get(name=tenant.Quotas.user_count).limit)
tenant.initialization_status = models.SharepointTenant.InitializationStatuses.INITIALIZING
tenant.save()
tasks.initialize_tenant.delay(tenant.uuid.hex, main_site_collection.uuid.hex, admin_site_collection.uuid.hex,
users_site_collection.uuid.hex)
return response.Response({'status': 'Initialization was scheduled successfully.'}, status=status.HTTP_200_OK)
class TemplateViewSet(structure_views.BaseServicePropertyViewSet):
queryset = models.Template.objects.all()
serializer_class = serializers.TemplateSerializer
lookup_field = 'uuid'
class UserViewSet(viewsets.ModelViewSet):
queryset = models.User.objects.all()
serializer_class = serializers.UserSerializer
filter_class = filters.UserFilter
lookup_field = 'uuid'
def perform_create(self, serializer):
tenant = serializer.validated_data['tenant']
backend = tenant.get_backend()
if tenant.state != models.SharepointTenant.States.ONLINE:
raise IncorrectStateException("Tenant must be online to perform user creation")
try:
backend_user = backend.users.create(
first_name=serializer.validated_data['first_name'],
last_name=serializer.validated_data['last_name'],
username=serializer.validated_data['username'],
email=serializer.validated_data['email'])
except SaltStackBackendError as e:
raise exceptions.APIException(e.traceback_str)
else:
user = serializer.save()
user.password = backend_user.password
user.admin_id = backend_user.admin_id
user.backend_id = backend_user.id
user.save()
def perform_update(self, serializer):
user = self.get_object()
backend = user.tenant.get_backend()
try:
new_password = serializer.validated_data.get('password', None)
if new_password and user.password != new_password:
backend.users.change_password(id=user.backend_id, password=new_password)
changed = {k: v for k, v in serializer.validated_data.items()
if v and getattr(user, k) != v and k != 'password'}
backend.users.change(admin_id=user.admin_id, **changed)
except SaltStackBackendError as e:
raise exceptions.APIException(e.traceback_str)
else:
serializer.save()
def perform_destroy(self, user):
backend = user.tenant.get_backend()
try:
backend.users.delete(id=user.backend_id)
except SaltStackBackendError as e:
raise exceptions.APIException(e.traceback_str)
else:
user.delete()
# XXX: put was added as portal has a temporary bug with widget update
@decorators.detail_route(methods=['post', 'put'])
@track_exceptions
def password(self, request, pk=None, **kwargs):
user = self.get_object()
backend = backend = user.tenant.get_backend()
new_password = backend.users.reset_password(id=user.backend_id)
user.password = new_password.password
user.save()
data = serializers.UserPasswordSerializer(instance=user, context={'request': request}).data
return response.Response(data, status=HTTP_200_OK)
class SiteCollectionViewSet(mixins.CreateModelMixin,
mixins.RetrieveModelMixin,
mixins.DestroyModelMixin,
mixins.ListModelMixin,
viewsets.GenericViewSet):
queryset = models.SiteCollection.objects.all()
serializer_class = serializers.SiteCollectionSerializer
lookup_field = 'uuid'
def perform_create(self, serializer):
user = serializer.validated_data['user']
template = serializer.validated_data.pop('template')
backend = user.tenant.get_backend()
if user.tenant.state != models.SharepointTenant.States.ONLINE:
raise IncorrectStateException("Tenant must be in stable state to perform site creation")
try:
max_quota = serializer.validated_data.pop('max_quota')
backend_site = backend.site_collections.create(
admin_id=user.admin_id,
template_code=template.code,
site_url=serializer.validated_data['site_url'],
name=serializer.validated_data['name'],
description=serializer.validated_data['description'],
warn_quota=serializer.validated_data.pop('warn_quota'),
max_quota=max_quota)
except SaltStackBackendError as e:
raise exceptions.APIException(e.traceback_str)
else:
site = serializer.save()
site.site_url = backend_site.url
site.set_quota_limit(site.Quotas.storage_size, max_quota)
site.save()
def perform_destroy(self, site):
backend = site.user.tenant.get_backend()
try:
backend.site_collections.delete(url=site.site_url)
except SaltStackBackendError as e:
raise exceptions.APIException(e.traceback_str)
else:
site.delete()
|
Python
| 0.998143
|
@@ -6054,26 +6054,16 @@
ackend =
- backend =
user.te
@@ -8269,17 +8269,16 @@
site.delete()
-%0A
|
d17e25f899255bb361f226a022ace0eaa75a9870
|
Remove accidentally left-in opt kwarg
|
numba/cuda/tests/cudapy/test_optimization.py
|
numba/cuda/tests/cudapy/test_optimization.py
|
import numpy as np
from numba.cuda.testing import skip_on_cudasim, CUDATestCase
from numba import cuda, float64
import unittest
def kernel_func(x):
x[0] = 1
def device_func(x, y, z):
return x * y + z
# Fragments of code that are removed from kernel_func's PTX when optimization
# is on
removed_by_opt = ( '__local_depot0', 'call.uni', 'st.param.b64')
@skip_on_cudasim('Simulator does not optimize code')
class TestOptimization(CUDATestCase):
def test_eager_opt(self):
# Optimization should occur by default
kernel = cuda.jit((float64[::1],))(kernel_func)
ptx = kernel.inspect_asm()
for fragment in removed_by_opt:
with self.subTest(fragment=fragment):
self.assertNotIn(fragment, ptx)
def test_eager_noopt(self):
# Optimization disabled
kernel = cuda.jit((float64[::1],), opt=False)(kernel_func)
ptx = kernel.inspect_asm()
for fragment in removed_by_opt:
with self.subTest(fragment=fragment):
self.assertIn(fragment, ptx)
def test_lazy_opt(self):
# Optimization should occur by default
kernel = cuda.jit(opt=True)(kernel_func)
x = np.zeros(1, dtype=np.float64)
kernel[1, 1](x)
# Grab the PTX for the one definition that has just been jitted
ptx = next(iter(kernel.inspect_asm()))[1]
for fragment in removed_by_opt:
with self.subTest(fragment=fragment):
self.assertNotIn(fragment, ptx)
def test_lazy_noopt(self):
# Optimization disabled
kernel = cuda.jit(opt=False)(kernel_func)
x = np.zeros(1, dtype=np.float64)
kernel[1, 1](x)
# Grab the PTX for the one definition that has just been jitted
ptx = next(iter(kernel.inspect_asm().items()))[1]
for fragment in removed_by_opt:
with self.subTest(fragment=fragment):
self.assertIn(fragment, ptx)
def test_device_opt(self):
# Optimization should occur by default
device = cuda.jit(device=True)(device_func)
ptx = device.inspect_ptx((float64, float64, float64)).decode('utf-8')
self.assertIn('fma.rn.f64', ptx)
def test_device_noopt(self):
# Optimization disabled
device = cuda.jit(device=True, opt=False)(device_func)
ptx = device.inspect_ptx((float64, float64, float64)).decode('utf-8')
# Fused-multiply adds should be disabled when not optimizing
self.assertNotIn('fma.rn.f64', ptx)
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
@@ -1169,18 +1169,8 @@
jit(
-opt=True)(
kern
|
50a527c435a56d2056765d33de988a7a5f0450f5
|
Add s3 transport.
|
gitbigfile/transport.py
|
gitbigfile/transport.py
|
# -*- coding: utf-8 -*-
"""
gitbigfile transport module
This module defines git-bigfile transports.
Each transport should implement the methods defined in the Transport class.
"""
import os
import sys
import errno
import shutil
try:
import paramiko
PARAMIKO = True
except ImportError:
PARAMIKO = False
MANDATORY_OPTIONS = {'local': ['path'],
'sftp': ['hostname', 'username', 'path']
}
class Transport(object):
"""A Transport subclass should implement the following methods"""
def get(self, sha, local_file):
"""Copy the sha file from the server"""
raise NotImplementedError
def put(self, local_file, sha):
"""Copy the the local file to the server"""
raise NotImplementedError
def exists(self, sha):
"""Return True if the sha file exists on the server"""
raise NotImplementedError
def pushed(self):
"""Return the list of pushed files"""
raise NotImplementedError
class Local(Transport):
def __init__(self, path):
self.path = path
def _get_file_path(self, sha):
"""Return the path of sha on the server"""
return os.path.join(self.path, sha)
def get(self, sha, local_file):
"""Copy the sha file from the server"""
shutil.copy(self._get_file_path(sha), local_file)
def put(self, local_file, sha):
"""Copy the the local file to the server"""
shutil.copy(local_file, self._get_file_path(sha))
def exists(self, sha):
"""Return True if the sha file exists on the server"""
return os.path.isfile(self._get_file_path(sha))
def pushed(self):
"""Return the list of pushed files"""
return os.listdir(self.path)
class Sftp(Transport):
def __init__(self, path, **ssh_kwargs):
self.sshclient = None
self.sftpclient = None
self.path = path
self.ssh_kwargs = ssh_kwargs
if not PARAMIKO:
sys.stderr.write('paramiko is required to use sftp transport\n')
sys.exit(1)
def _connect(self):
"""Create a ssh client and a sftp client"""
if self.sftpclient is None:
self.sshclient = paramiko.SSHClient()
self.sshclient.load_system_host_keys()
self.sshclient.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.sshclient.connect(**self.ssh_kwargs)
self.sftpclient = self.sshclient.open_sftp()
def _get_file_path(self, sha):
"""Return the path of sha on the server"""
return os.path.join(self.path, sha)
def get(self, sha, local_file):
"""Copy the sha file from the server"""
self._connect()
remote_file = self._get_file_path(sha)
self.sftpclient.get(remote_file, local_file)
def put(self, local_file, sha):
"""Copy the the local file to the server"""
self._connect()
remote_file = self._get_file_path(sha)
self.sftpclient.put(local_file, remote_file)
def exists(self, sha):
"""Return True if the sha file exists on the server"""
self._connect()
remote_file = self._get_file_path(sha)
try:
self.sftpclient.stat(remote_file)
except IOError, e:
if e.errno == errno.ENOENT:
return False
raise
else:
return True
def pushed(self):
"""Return the list of pushed files"""
self._connect()
return self.sftpclient.listdir(self.path)
def close(self):
"""Close the sftp and ssh connection"""
if self.sftpclient is not None:
self.sftpclient.close()
self.sshclient.close()
self.sftpclient = None
def __del__(self):
"""Attempt to clean up if the connection was not closed"""
self.close()
|
Python
| 0.999631
|
@@ -411,16 +411,94 @@
'path'%5D
+,%0A 's3': %5B'access-key', 'secret-key-path', 'bucket-name'%5D,
%0A
@@ -3949,16 +3949,1303 @@
self.close()%0A
+%0A%0Aclass S3(Transport):%0A %22%22%22Use boto to save and retrieve files via s3.%22%22%22%0A%0A def __init__(self, **s3_kwargs):%0A import boto%0A secret_key_path = os.path.expanduser(s3_kwargs%5B'secret-key-path'%5D)%0A if not os.path.exists(secret_key_path):%0A sys.exit(%22You must install the s3 secret key at %25s%22%0A %25 secret_key_path)%0A with open(secret_key_path) as f:%0A secret_key = f.read().strip()%0A self.boto_conn = boto.connect_s3(s3_kwargs%5B'access-key'%5D, secret_key)%0A self.bucket = self.boto_conn.get_bucket(s3_kwargs%5B'bucket-name'%5D)%0A self.key_class = boto.s3.key.Key%0A%0A def get(self, sha, local_file):%0A %22%22%22Copy the sha file from the server%22%22%22%0A key = self.key_class(self.bucket)%0A key.key = sha%0A key.get_contents_to_filename(local_file)%0A%0A def put(self, local_file, sha):%0A %22%22%22Copy the the local file to the server%22%22%22%0A key = self.key_class(self.bucket)%0A key.key = sha%0A key.set_contents_from_filename(local_file)%0A%0A def exists(self, sha):%0A %22%22%22Return True if the sha file exists on the server%22%22%22%0A return sha in self.pushed()%0A%0A def pushed(self):%0A %22%22%22Return the list of pushed files%22%22%22%0A return %5Bk.key for k in self.bucket.list()%5D%0A
|
4f42193b460da9c86222ddd689b2645f9a50b6e2
|
Update poll-sensors.py
|
cron/poll-sensors.py
|
cron/poll-sensors.py
|
#!/usr/bin/env python
import MySQLdb
import datetime
import urllib2
import os
servername = "localhost"
username = "pi"
password = "password"
dbname = "pi_heating_db"
t = datetime.datetime.now().strftime('%s')
cnx = MySQLdb.connect(host=servername, user=username, passwd=password, db=dbname)
cnx.autocommit(True)
cursorread = cnx.cursor()
query = ("SELECT * FROM sensors")
cursorread.execute(query)
results =cursorread.fetchall()
cursorread.close()
cnx.close()
for i in results:
sensor_ip = i[3]
sensor_ref = i[1]
sensor_id = i[0]
sensor_url = "http://"+sensor_ip+":8080/value.php?id="+sensor_ref
#print sensor_url
try:
data = float( urllib2.urlopen(sensor_url).read() )
except:
data = 'na'
print data
print sensor_id
if( data != 'na' ):
print "database"
#print sensor_id
sql = "UPDATE sensors SET value='"+str(data)+"' WHERE id='"+str(sensor_id)+"';"
sql = "UPDATE sensors SET value='666' WHERE id='"+str(sensor_id)+"';"
print sql
#cursorwrite.execute( sql )
try:
cnx = MySQLdb.connect(host=servername, user=username, passwd=password, db=dbname)
cnx.autocommit(True)
cursorwrite = cnx.cursor()
cursorwrite.execute( sql )
print("affected rows = {}".format(cursorwrite.rowcount))
cursorwrite.close()
cnx.close()
#rows = cur.fetchall()
except MySQLdb.Error, e:
try:
print "MySQL Error [%d]: %s" % (e.args[0], e.args[1])
except IndexError:
print "MySQL Error: %s" % str(e)
print "database done"
filename = '/home/pi/pi-heating-hub/data/s-'+str(sensor_id)+'.rrd'
print filename
if( not os.path.exists( filename ) ):
print ( os.path.exists( filename ))
os.system('/usr/bin/rrdtool create '+filename+' --step 60 \
--start now \
DS:data:GAUGE:120:U:U \
RRA:MIN:0.5:1:10080 \
RRA:MIN:0.5:5:51840 \
RRA:MIN:0.5:60:8760 \
RRA:AVERAGE:0.5:1:10080 \
RRA:AVERAGE:0.5:5:51840 \
RRA:AVERAGE:0.5:60:8760 \
RRA:MAX:0.5:1:10080 \
RRA:MAX:0.5:5:51840 \
RRA:MAX:0.5:60:8760')
if( data != 'na' ):
print"rrd"
os.system('/usr/bin/rrdtool update '+filename+" "+str(t)+':'+str(data))
|
Python
| 0
|
@@ -921,81 +921,8 @@
';%22%0A
- sql = %22UPDATE sensors SET value='666' WHERE id='%22+str(sensor_id)+%22';%22
%0A
@@ -936,18 +936,16 @@
sql%0A
-%0A
%0A
|
9cf5ec0f8b56b6fdaef2b2f898f0ae35a642954a
|
Fix requests.get() in horizon
|
stellar_base/horizon.py
|
stellar_base/horizon.py
|
# coding: utf-8
import requests
import json
try:
from sseclient import SSEClient
except ImportError:
SSEClient = None
try:
# Python 3
from urllib.parse import urlencode
except ImportError:
# Python 2
from urllib import urlencode
def query(url, params=None, sse=False):
if sse is False:
p = requests.get(url, params, )
return json.loads(p.text)
else:
if SSEClient is None:
raise ValueError('SSE not supported, missing sseclient module')
if params:
url = url + '?' + urlencode(params)
messages = SSEClient(url)
return messages
class Horizon (object):
def __init__(self, horizon=None):
if horizon is None:
self.horizon = 'https://horizon-testnet.stellar.org'
else:
self.horizon = horizon
def submit(self, te):
params = {'tx': te}
url = self.horizon + '/transactions/'
p = requests.post(url, params=params, ) # timeout=20
return json.loads(p.text)
def query(self, url, params=None, sse=False):
return query(self.horizon+url, params, sse)
def accounts(self, params=None, sse=False):
url = self.horizon + '/accounts/'
return query(url, params, sse)
def account(self, address):
url = self.horizon + '/accounts/' + address
return query(url)
def account_effects(self, address, params=None, sse=False):
url = self.horizon + '/accounts/' + address + '/effects/'
return query(url, params, sse)
def account_offers(self, address, params=None):
url = self.horizon + '/accounts/' + address + '/offers/'
return query(url, params)
def account_operations(self, address, params=None, sse=False):
url = self.horizon + '/accounts/' + address + '/operations/'
return query(url, params, sse)
def account_transactions(self, address, params=None, sse=False):
url = self.horizon + '/accounts/' + address + '/transactions/'
return query(url, params, sse)
def account_payments(self, address, params=None, sse=False):
url = self.horizon + '/accounts/' + address + '/payments/'
return query(url, params, sse)
def transactions(self, params=None, sse=False):
url = self.horizon + '/transactions/'
return query(url, params, sse)
def transaction(self, tx_hash):
url = self.horizon + '/transactions/' + tx_hash
return query(url)
def transaction_operations(self, tx_hash, params=None):
url = self.horizon + '/transactions/' + tx_hash + '/operations/'
return query(url, params)
def transaction_effects(self, tx_hash, params=None):
url = self.horizon + '/transactions/' + tx_hash + '/effects/'
return query(url, params)
def transaction_payments(self, tx_hash, params=None):
url = self.horizon + '/transactions/' + tx_hash + '/payments/'
return query(url, params)
def order_book(self, params=None):
url = self.horizon + '/order_book/'
return query(url, params)
def order_book_trades(self, params=None):
url = self.horizon + '/order_book/trades/'
return query(url, params)
def ledgers(self, params=None, sse=False):
url = self.horizon + '/ledgers/'
return query(url, params, sse)
def ledger(self, ledger_id):
url = self.horizon + '/ledgers/' + ledger_id
return query(url)
def ledger_effects(self, ledger_id, params=None):
url = self.horizon + '/ledgers/' + ledger_id + '/effects/'
return query(url, params)
def ledger_offers(self, ledger_id, params=None):
url = self.horizon + '/ledgers/' + ledger_id + '/offers/'
return query(url, params)
def ledger_operations(self, ledger_id, params=None):
url = self.horizon + '/ledgers/' + ledger_id + '/operations/'
return query(url, params)
def ledger_payments(self, ledger_id, params=None):
url = self.horizon + '/ledgers/' + ledger_id + '/payments/'
return query(url, params)
def effects(self, params=None, sse=False):
url = self.horizon + '/effects/'
return query(url, params, sse)
def operations(self, params=None, sse=False):
url = self.horizon + '/operations/'
return query(url, params, sse)
def operation(self, op_id, params=None):
url = self.horizon + '/operations/' + op_id
return query(url, params)
def operation_effects(self, tx_hash, params=None):
url = self.horizon + '/operations/' + tx_hash + '/effects/'
return query(url, params)
def payments(self, params=None, sse=False):
url = self.horizon + '/payments/'
return query(url, params, sse)
|
Python
| 0.000001
|
@@ -346,18 +346,23 @@
, params
-,
+=params
)%0A
|
42710dccf4ddbd02f71a62b266c31275097f9274
|
ajuste para aceitar arquivo de issns
|
export/xml_rsps.py
|
export/xml_rsps.py
|
# coding: utf-8
"""
Este processamento realiza a exportação de registros SciELO para o formato RSPS
"""
import os
import argparse
import logging
import codecs
import json
import threading
import multiprocessing
from Queue import Queue, Empty
from io import StringIO
import packtools
from packtools.catalogs import XML_CATALOG
import utils
os.environ['XML_CATALOG_FILES'] = XML_CATALOG
logger = logging.getLogger(__name__)
def _config_logging(logging_level='INFO', logging_file=None):
allowed_levels = {
'DEBUG': logging.DEBUG,
'INFO': logging.INFO,
'WARNING': logging.WARNING,
'ERROR': logging.ERROR,
'CRITICAL': logging.CRITICAL
}
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger.setLevel(allowed_levels.get(logging_level, 'INFO'))
if logging_file:
hl = logging.FileHandler(logging_file, mode='a')
else:
hl = logging.StreamHandler()
hl.setFormatter(formatter)
hl.setLevel(allowed_levels.get(logging_level, 'INFO'))
logger.addHandler(hl)
return logger
def summarize(validator):
def _make_err_message(err):
""" An error message is comprised of the message itself and the
element sourceline.
"""
err_msg = {'message': err.message}
try:
err_element = err.get_apparent_element(validator.lxml)
except ValueError:
logger.info('Could not locate the element name in: %s' % err.message)
err_element = None
if err_element is not None:
err_msg['apparent_line'] = err_element.sourceline
else:
err_msg['apparent_line'] = None
return err_msg
dtd_is_valid, dtd_errors = validator.validate()
sps_is_valid, sps_errors = validator.validate_style()
summary = {
'dtd_errors': [_make_err_message(err) for err in dtd_errors],
'sps_errors': [_make_err_message(err) for err in sps_errors],
}
summary['dtd_is_valid'] = validator.validate()[0]
summary['sps_is_valid'] = validator.validate_style()[0]
summary['is_valid'] = bool(validator.validate()[0] and validator.validate_style()[0])
return summary
def analyze_xml(xml):
"""Analyzes `file` against packtools' XMLValidator.
"""
f = StringIO(xml)
try:
xml = packtools.XMLValidator.parse(f, sps_version='sps-1.1')
except packtools.exceptions.PacktoolsError as e:
logger.exception(e)
summary = {}
summary['dtd_is_valid'] = False
summary['sps_is_valid'] = False
summary['is_valid'] = False
summary['parsing_error'] = True
return summary
else:
summary = summarize(xml)
return summary
class Dumper(object):
def __init__(self, collection, issns=None):
self._articlemeta = utils.articlemeta_server()
self.collection = collection
self.issns = issns or [None]
def fmt_json(self, data):
fmt = {}
fmt['code'] = data.publisher_id
fmt['collection'] = data.collection_acronym
fmt['id'] = '_'.join([data.collection_acronym, data.publisher_id])
fmt['document_type'] = data.document_type
fmt['publication_year'] = data.publication_date[0:4]
fmt['document_type'] = data.document_type
fmt['data_version'] = 'legacy' if data.data_model_version == 'html' else 'xml'
return fmt
def prepare_queue(self, q):
for issn in self.issns:
for document in self._articlemeta.documents(collection=self.collection, issn=issn):
q.put(self.fmt_json(document))
def summaryze_xml_validation(self, pid, collection_acronym, output_format):
try:
xml = self._articlemeta.document(pid, collection_acronym, fmt='xmlrsps')
except Exception as e:
logger.exception(e)
logger.error('Fail to read document: %s_%s' % (pid, collection_acronym))
xml = u''
logger.debug('Reading document: %s' % pid)
output_format.update(analyze_xml(xml))
print(json.dumps(output_format))
def _worker(self, q, t):
while True:
try:
doc = q.get(timeout=0.5)
except Empty:
return
logger.debug('Running thread %s' % t)
self.summaryze_xml_validation(doc['code'], doc['collection'], doc)
def run(self):
job_queue = Queue()
self.prepare_queue(job_queue)
jobs = []
max_threads = multiprocessing.cpu_count() * 2
for t in range(max_threads):
thread = threading.Thread(target=self._worker, args=(job_queue, t))
jobs.append(thread)
thread.start()
logger.info('Thread running %s' % thread)
for job in jobs:
job.join()
def main():
parser = argparse.ArgumentParser(
description='Dump languages distribution by article'
)
parser.add_argument(
'issns',
nargs='*',
help='ISSN\'s separated by spaces'
)
parser.add_argument(
'--collection',
'-c',
help='Collection Acronym'
)
parser.add_argument(
'--logging_file',
'-o',
help='Full path to the log file'
)
parser.add_argument(
'--logging_level',
'-l',
default='DEBUG',
choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
help='Logggin level'
)
args = parser.parse_args()
_config_logging(args.logging_level, args.logging_file)
logger.info('Dumping data for: %s' % args.collection)
issns = None
if len(args.issns) > 0:
issns = utils.ckeck_given_issns(args.issns)
dumper = Dumper(args.collection, issns)
dumper.run()
|
Python
| 0.000001
|
@@ -5097,32 +5097,201 @@
spaces'%0A )%0A%0A
+ parser.add_argument(%0A '--issns_file',%0A '-i',%0A default=None,%0A help='Full path to a txt file within a list of ISSNs to be exported'%0A )%0A%0A
parser.add_a
@@ -5924,32 +5924,352 @@
ns(args.issns)%0A%0A
+ issns_from_file = None%0A if args.issns_file:%0A with open(args.issns_file, 'r') as f:%0A issns_from_file = utils.ckeck_given_issns(%5Bi.strip() for i in f%5D)%0A%0A if issns:%0A issns += issns_from_file if issns_from_file else %5B%5D%0A else:%0A issns = issns_from_file if issns_from_file else %5B%5D%0A%0A
dumper = Dum
|
3870248740d83b0292ccca88a494ce19783847f0
|
Raise exception if pyspark Gateway process doesn't start.
|
python/pyspark/java_gateway.py
|
python/pyspark/java_gateway.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import signal
import shlex
import platform
from subprocess import Popen, PIPE
from threading import Thread
from py4j.java_gateway import java_import, JavaGateway, GatewayClient
def launch_gateway():
SPARK_HOME = os.environ["SPARK_HOME"]
gateway_port = -1
if "PYSPARK_GATEWAY_PORT" in os.environ:
gateway_port = int(os.environ["PYSPARK_GATEWAY_PORT"])
else:
# Launch the Py4j gateway using Spark's run command so that we pick up the
# proper classpath and settings from spark-env.sh
on_windows = platform.system() == "Windows"
script = "./bin/spark-submit.cmd" if on_windows else "./bin/spark-submit"
submit_args = os.environ.get("PYSPARK_SUBMIT_ARGS")
submit_args = submit_args if submit_args is not None else ""
submit_args = shlex.split(submit_args)
command = [os.path.join(SPARK_HOME, script), "pyspark-shell"] + submit_args
if not on_windows:
# Don't send ctrl-c / SIGINT to the Java gateway:
def preexec_func():
signal.signal(signal.SIGINT, signal.SIG_IGN)
proc = Popen(command, stdout=PIPE, stdin=PIPE, preexec_fn=preexec_func)
else:
# preexec_fn not supported on Windows
proc = Popen(command, stdout=PIPE, stdin=PIPE)
# Determine which ephemeral port the server started on:
gateway_port = int(proc.stdout.readline())
# Create a thread to echo output from the GatewayServer, which is required
# for Java log output to show up:
class EchoOutputThread(Thread):
def __init__(self, stream):
Thread.__init__(self)
self.daemon = True
self.stream = stream
def run(self):
while True:
line = self.stream.readline()
sys.stderr.write(line)
EchoOutputThread(proc.stdout).start()
# Connect to the gateway
gateway = JavaGateway(GatewayClient(port=gateway_port), auto_convert=False)
# Import the classes used by PySpark
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
java_import(gateway.jvm, "org.apache.spark.sql.SQLContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.HiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.LocalHiveContext")
java_import(gateway.jvm, "org.apache.spark.sql.hive.TestHiveContext")
java_import(gateway.jvm, "scala.Tuple2")
return gateway
|
Python
| 0
|
@@ -1957,16 +1957,29 @@
in=PIPE,
+ stderr=PIPE,
preexec
@@ -1996,16 +1996,16 @@
c_func)%0A
-
@@ -2117,18 +2117,57 @@
din=PIPE
-)%0A
+, stderr=PIPE)%0A %0A try:%0A
@@ -2222,32 +2222,36 @@
ted on:%0A
+
gateway_port = i
@@ -2277,16 +2277,219 @@
line())%0A
+ except:%0A error_code = proc.poll()%0A raise Exception(%22Launching GatewayServer failed with exit code %25d: %25s%22 %25%0A (error_code, %22%22.join(proc.stderr.readlines())))%0A%0A
|
03548610d88cb60fce42b4898aa678f27e1f371a
|
return http:// with url
|
fileup.py
|
fileup.py
|
#!/usr/bin/env python
# -*-Python-*-
import argparse
import base64
import datetime
import ftplib
import os
import re
import subprocess
import tempfile
def get_valid_filename(s):
"""
Return the given string converted to a string that can be used for a clean
filename. Remove leading and trailing spaces; convert other spaces to
underscores; and remove anything that is not an alphanumeric, dash,
underscore, or dot.
>>> get_valid_filename("john's portrait in 2004.jpg")
'johns_portrait_in_2004.jpg'
"""
s = s.strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
def read_config():
# Read the config
with open(os.path.expanduser('~/.config/fileup/config'), 'r') as f:
"""Create a config file at ~/.config/fileup/config with the
following information and structure:
example.com
file_up_folder
my_user_name
my_difficult_password
"""
base_url, base_folder, folder, user, pw = [s.replace('\n', '') for s in f.readlines()]
return base_url, base_folder, folder, user, pw
def remove_old_files(ftp, today):
# Remove all files that are past the limit
files = [f for f in ftp.nlst() if '_delete_on_' in f]
file_dates = [f.rsplit('_delete_on_', 1) for f in files]
for file_name, date in file_dates:
rm_date = datetime.datetime.strptime(date, '%Y-%m-%d').date()
if rm_date < today:
print('removing "{}" because the date passed'.format(file_name))
try:
ftp.delete(file_name)
except:
# File didn't exist anymore for some reason...
pass
ftp.delete(file_name + "_delete_on_" + date)
def main():
# Get arguments
description = ["Publish a file. \n \n",
"Create a config file at ~/.config/fileup/config with the following information and structure:\n",
"example.com",
"base_folder"
"file_up_folder",
"my_user_name",
"my_difficult_password"]
parser = argparse.ArgumentParser(description='\n'.join(description),
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('fname', type=str)
parser.add_argument('-t', '--time', type=int, default=90)
parser.add_argument('-d', '--direct', action='store_true')
parser.add_argument('-i', '--img', action='store_true')
args = parser.parse_args()
fname = os.path.abspath(os.path.expanduser(args.fname))
fname_base = os.path.basename(fname)
base_url, base_folder, folder, user, pw = read_config()
# Connect to server
ftp = ftplib.FTP(base_url, user, pw)
ftp.cwd(os.path.join(base_folder, folder))
# Fix the filename to avoid filename character issues
fname_base = get_valid_filename(fname_base)
today = datetime.datetime.now().date()
remove_old_files(ftp, today)
# Delete first if file already exists, it could happen that there is already
# a file with a specified deletion date, these should be removed.
for f in ftp.nlst():
if f.startswith(fname_base) and '_delete_on_' in f:
ftp.delete(f)
if args.time != 0: # could be negative (used for debugging).
remove_on = today + datetime.timedelta(days=args.time)
fname_date = fname_base + '_delete_on_' + str(remove_on)
with tempfile.TemporaryFile() as f:
print('upload ' + fname_date)
ftp.storbinary('STOR {0}'.format(fname_date), f)
# Upload and open the actuall file
with open(fname, 'rb') as f:
ftp.storbinary('STOR {0}'.format(fname_base), f)
print('upload ' + fname_base)
ftp.quit()
# Create URL
url = '{}/{}/{}'.format(base_url, folder, fname_base)
if args.direct:
# Returns the url as is.
url = 'http://' + url
elif args.img:
url = ''.format(url)
elif fname.endswith('.ipynb'):
# Return the url in the nbviewer
url = 'http://nbviewer.jupyter.org/url/' + url + '?flush_cache=true'
# Put a URL into clipboard only works on OS X
try:
process = subprocess.Popen('pbcopy', env={'LANG': 'en_US.UTF-8'},
stdin=subprocess.PIPE)
process.communicate(url.encode('utf-8'))
except:
pass
print('Your url is: ', url)
if __name__ == "__main__":
main()
|
Python
| 0.000947
|
@@ -1736,16 +1736,17 @@
date)%0A%0A
+%0A
def main
@@ -2808,17 +2808,16 @@
lder))%0A%0A
-%0A
# Fi
@@ -2918,13 +2918,9 @@
se)%0A
-
%0A
+
@@ -3064,24 +3064,30 @@
there is
+%0A #
already
%0A # a
@@ -3078,22 +3078,16 @@
already
-%0A #
a file
@@ -3251,25 +3251,24 @@
.delete(f)%0A%0A
-%0A
if args.
@@ -3597,17 +3597,16 @@
e), f)%0A%0A
-%0A
# Up
@@ -3784,17 +3784,16 @@
quit()%0A%0A
-%0A
# Cr
@@ -4081,16 +4081,16 @@
bviewer%0A
+
@@ -4159,17 +4159,16 @@
=true'%0A%0A
-%0A
# Pu
|
3d4583d69af76cccc3ea32526084857abc808cc5
|
Use label in place of name
|
share/robot.py
|
share/robot.py
|
import abc
import json
import random
import string
import datetime
from django.apps import apps
from django.db import migrations
from django.conf import settings
from django.apps import AppConfig
from django.utils import timezone
class RobotAppConfig(AppConfig, metaclass=abc.ABCMeta):
disabled = False
@abc.abstractproperty
def version(self):
raise NotImplementedError
@abc.abstractproperty
def task(self):
raise NotImplementedError
@abc.abstractproperty
def task_name(self):
raise NotImplementedError
@abc.abstractproperty
def description(self):
raise NotImplementedError
@abc.abstractproperty
def schedule(self):
raise NotImplementedError
@property
def user(self):
from share.models import ShareUser
return ShareUser.objects.get(robot=self.name)
def authorization(self) -> str:
return 'Bearer ' + self.user.accesstoken_set.first().token
class AbstractRobotMigration:
def __init__(self, label):
self.config = apps.get_app_config(label)
if not isinstance(self.config, RobotAppConfig):
raise Exception('Found non-robot app, "{}", in a robot migration.'.format(label))
def deconstruct(self):
return ('{}.{}'.format(__name__, self.__class__.__name__), (self.config.label, ), {})
class RobotMigration:
def __init__(self, app_config):
self.config = app_config
def ops(self):
return [
migrations.RunPython(
RobotUserMigration(self.config.label),
# RobotUserMigration(self.config.label).reverse,
),
migrations.RunPython(
RobotOauthTokenMigration(self.config.label),
# RobotOauthTokenMigration(self.config.label).reverse,
),
migrations.RunPython(
RobotScheduleMigration(self.config.label),
# RobotScheduleMigration(self.config.label).reverse,
),
]
def dependencies(self):
return [
('share', '0001_initial'),
('djcelery', '0001_initial'),
]
def migration(self):
m = migrations.Migration('0001_initial', self.config.label)
m.operations = self.ops()
m.dependencies = self.dependencies()
return m
class RobotUserMigration(AbstractRobotMigration):
def __call__(self, apps, schema_editor):
ShareUser = apps.get_model('share', 'ShareUser')
ShareUser.objects.create_robot_user(
username=self.config.name,
robot=self.config.name,
long_title=self.config.long_title,
home_page=self.config.home_page
)
def reverse(self, apps, schema_editor):
ShareUser = apps.get_model('share', 'ShareUser')
try:
ShareUser.objects.get(username=self.config.name, harvester=self.config.name).delete()
except ShareUser.DoesNotExist:
pass
class RobotOauthTokenMigration(AbstractRobotMigration):
def __call__(self, apps, schema_editor):
ShareUser = apps.get_model('share', 'ShareUser')
Application = apps.get_model('oauth2_provider', 'Application')
AccessToken = apps.get_model('oauth2_provider', 'AccessToken')
migration_user = ShareUser.objects.get(username=self.config.name, robot=self.config.name)
application_user = ShareUser.objects.get(username=settings.APPLICATION_USERNAME)
application = Application.objects.get(user=application_user)
client_secret = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(64))
AccessToken.objects.create(
user=migration_user,
application=application,
expires=(timezone.now() + datetime.timedelta(weeks=20 * 52)), # 20 yrs
scope=settings.HARVESTER_SCOPES,
token=client_secret
)
def reverse(self, apps, schema_editor):
pass
class RobotScheduleMigration(AbstractRobotMigration):
def __call__(self, apps, schema_editor):
from djcelery.models import PeriodicTask
from djcelery.models import CrontabSchedule
tab = CrontabSchedule.from_schedule(self.config.schedule)
tab.save()
PeriodicTask(
enabled=not self.config.disabled,
name=self.config.task_name,
task=self.config.task,
description=self.config.description,
args=json.dumps([self.config.name, 1]), # Note 1 should always be the system user
crontab=tab,
).save()
def reverse(self, apps, schema_editor):
PeriodicTask = apps.get_model('djcelery', 'PeriodicTask')
try:
PeriodicTask.get(
task=self.config.task,
args=json.dumps([self.config.name, 1]), # Note 1 should always be the system user
).delete()
except PeriodicTask.DoesNotExist:
pass
|
Python
| 0.000269
|
@@ -4516,36 +4516,37 @@
ps(%5Bself.config.
-name
+label
, 1%5D), # Note 1
@@ -4859,20 +4859,21 @@
.config.
-name
+label
, 1%5D),
|
d1098119c442a1254ca3f5c7846978060beb9b1c
|
Fix bad return in the weak scaling performance plot generation
|
livvkit/bundles/CISM-glissade/performance.py
|
livvkit/bundles/CISM-glissade/performance.py
|
# Copyright (c) 2015, UT-BATTELLE, LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
CISM-glissade module for performance analysis
"""
import os
import matplotlib
import numpy as np
from livvkit.util.datastructures import LIVVDict
from livvkit.util.datastructures import ElementHelper
def weak_scaling(timing_stats, scaling_var, data_points):
"""
Generate data for plotting weak scaling. The data points keep
a constant amount of work per processor for each data point.
Args:
timing_stats: the result of livvkit.components.performance's
generate_timing_stats function
scaling_var: the variable to select from the timing_stats dictionary
(can be provided in configurations via the 'scaling_var' key)
data_points: the list of size and processor counts to use as data
Returns:
TODO : Currently returns a dict with model,
bench, and proc data containing lists
"""
timing_data = LIVVDict()
proc_counts = []
bench_means = []
bench_mins = []
bench_maxs = []
model_means = []
model_mins = []
model_maxs = []
for point in data_points:
size = point[0]
proc = point[1]
try:
model_data = timing_stats[size][proc]['model'][scaling_var]
bench_data = timing_stats[size][proc]['bench'][scaling_var]
except KeyError:
return timing_data
proc_counts.append(proc)
model_means.append(model_data['mean'])
model_mins.append(model_data['min'])
model_maxs.append(model_data['max'])
bench_means.append(bench_data['mean'])
bench_mins.append(bench_data['min'])
bench_maxs.append(bench_data['max'])
timing_data['bench'] = dict(mins=bench_mins, means=bench_means, maxs=bench_maxs)
timing_data['model'] = dict(mins=model_mins, means=model_means, maxs=model_maxs)
timing_data['proc_counts'] = [int(pc[1:]) for pc in proc_counts]
return timing_data
def strong_scaling(timing_stats, scaling_var, data_points):
"""
Generate data for plotting strong scaling. The data points keep
the problem size the same and varies the number of processors
used to complete the job.
Args:
timing_stats: the result of livvkit.components.performance's
generate_timing_stats function
scaling_var: the variable to select from the timing_stats dictionary
(can be provided in configurations via the 'scaling_var' key)
data_points: the list of size and processor counts to use as data
Returns:
TODO: A LIVVDict() of the form...
"""
timing_data = LIVVDict()
proc_counts = []
bench_means = []
bench_mins = []
bench_maxs = []
model_means = []
model_mins = []
model_maxs = []
for point in data_points:
size = point[0]
proc = point[1]
try:
model_data = timing_stats[size][proc]['model'][scaling_var]
bench_data = timing_stats[size][proc]['bench'][scaling_var]
except KeyError:
continue
proc_counts.append(proc)
model_means.append(model_data['mean'])
model_mins.append(model_data['min'])
model_maxs.append(model_data['max'])
bench_means.append(bench_data['mean'])
bench_mins.append(bench_data['min'])
bench_maxs.append(bench_data['max'])
timing_data['bench'] = dict(mins=bench_mins, means=bench_means, maxs=bench_maxs)
timing_data['model'] = dict(mins=model_mins, means=model_means, maxs=model_maxs)
timing_data['proc_counts'] = [int(pc[1:]) for pc in proc_counts]
return timing_data
|
Python
| 0.000012
|
@@ -2883,34 +2883,24 @@
-return timing_data
+continue
%0A
|
a21cdba4e742890f278c74764554711fb38ef9c1
|
Remove busy_downloads method, info is kept in Redis now
|
bqueryd/util.py
|
bqueryd/util.py
|
import netifaces
import zmq
import random
import os
import tempfile
import zipfile
import binascii
import time
import sys
def get_my_ip():
eth_interfaces = sorted([ifname for ifname in netifaces.interfaces() if ifname.startswith('eth')])
if len(eth_interfaces) < 1:
ifname = 'lo'
else:
ifname = eth_interfaces[-1]
for x in netifaces.ifaddresses(ifname)[netifaces.AF_INET]:
# Return first addr found
return x['addr']
def bind_to_random_port(socket, addr, min_port=49152, max_port=65536, max_tries=100):
"We can't just use the zmq.Socket.bind_to_random_port, as we wan't to set the identity before binding"
for i in range(max_tries):
try:
port = random.randrange(min_port, max_port)
socket.identity = '%s:%s' % (addr, port)
socket.bind('tcp://*:%s' % port)
#socket.bind('%s:%s' % (addr, port))
except zmq.ZMQError as exception:
en = exception.errno
if en == zmq.EADDRINUSE:
continue
else:
raise
else:
return socket.identity
raise zmq.ZMQBindError("Could not bind socket to random port.")
def zip_to_file(file_path, destination):
fd, zip_filename = tempfile.mkstemp(suffix=".zip", dir=destination)
with zipfile.ZipFile(zip_filename, 'w', zipfile.ZIP_DEFLATED, allowZip64=True) as myzip:
if os.path.isdir(file_path):
abs_src = os.path.abspath(file_path)
for root, dirs, files in os.walk(file_path):
for current_file in files:
absname = os.path.abspath(os.path.join(root, current_file))
arcname = absname[len(abs_src) + 1:]
myzip.write(absname, arcname)
else:
myzip.write(file_path, file_path)
zip_info = ''.join(str(zipinfoi.CRC) for zipinfoi in myzip.infolist())
checksum = hex(binascii.crc32(zip_info) & 0xffffffff)
return zip_filename, checksum
def rm_file_or_dir(path):
if os.path.exists(path):
if os.path.isdir(path):
if os.path.islink(path):
os.unlink(path)
else:
shutil.rmtree(path)
else:
if os.path.islink(path):
os.unlink(path)
else:
os.remove(path)
def tree_checksum(path):
allfilenames = set()
for root, dirs, filenames in os.walk(path):
for filename in filenames:
allfilenames.add(os.path.join(root, filename))
buf = ''.join(sorted(allfilenames))
return hex(binascii.crc32(buf) & 0xffffffff)
###################################################################################################
# Various Utility methods for user-friendly info display
def show_workers(info_data, only_busy=False):
'For the given info_data dict, show a humand-friendly overview of the current workers'
nodes = {}
for w in info_data.get('workers', {}).values():
nodes.setdefault(w['node'], []).append(w)
for k, n in nodes.items():
print k
for nn in n:
if only_busy and not nn.get('busy'):
continue
print ' ', time.ctime(nn['last_seen']), nn.get('busy')
def show_busy_downloads(info_data):
all_downloads = info_data['downloads'].copy()
for x in info_data.get('others', {}).values():
all_downloads.update(x.get('downloads'))
for ticket, x in all_downloads.items():
print ticket, time.ctime(x['created'])
for filename, nodelist in x['progress'].items():
print filename
for node, x in nodelist.items():
print ' ', node,
if not x.get('done'):
if 'progress' in x:
print int(float(x['progress']) / x.get('size', x['progress']) * 100), '%'
else:
print x
sys.stdout.write('\n')
|
Python
| 0
|
@@ -3264,718 +3264,4 @@
y')%0A
-%0Adef show_busy_downloads(info_data):%0A all_downloads = info_data%5B'downloads'%5D.copy()%0A for x in info_data.get('others', %7B%7D).values():%0A all_downloads.update(x.get('downloads'))%0A for ticket, x in all_downloads.items():%0A print ticket, time.ctime(x%5B'created'%5D)%0A for filename, nodelist in x%5B'progress'%5D.items():%0A print filename%0A for node, x in nodelist.items():%0A print ' ', node,%0A if not x.get('done'):%0A if 'progress' in x:%0A print int(float(x%5B'progress'%5D) / x.get('size', x%5B'progress'%5D) * 100), '%25'%0A else:%0A print x%0A sys.stdout.write('%5Cn')
|
aa8096c94e0d067eff96d475117c630ffa371cce
|
Save all data from Lorax
|
src/py/rpmostreecompose/installer.py
|
src/py/rpmostreecompose/installer.py
|
#!/usr/bin/env python
# Copyright (C) 2014 Colin Walters <walters@verbum.org>, Andy Grimm <agrimm@redhat.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import json
import os
import sys
import tempfile
import argparse
import shutil
import subprocess
import distutils.spawn
from gi.repository import Gio, OSTree, GLib
import iniparse
from .taskbase import TaskBase
from .utils import run_sync, fail_msg
class InstallerTask(TaskBase):
def create_disks(self, outputdir):
[res,rev] = self.repo.resolve_rev(self.ref, False)
[res,commit] = self.repo.load_variant(OSTree.ObjectType.COMMIT, rev)
commitdate = GLib.DateTime.new_from_unix_utc(OSTree.commit_get_timestamp(commit)).format("%c")
print commitdate
imagestmpdir = os.path.join(self.workdir, 'images')
os.mkdir(imagestmpdir)
generated = []
imgtargetinstaller=os.path.join(imagestmpdir, 'install', '%s-installer.iso' % self.os_name)
self.create_installer_image(self.workdir, imgtargetinstaller)
generated.append(imgtargetinstaller)
for f in generated:
destpath = os.path.join(outputdir, os.path.basename(f))
print "Created: " + destpath
shutil.move(f, destpath)
def create_installer_image(self, tmpdir, target):
lorax_opts = []
if self.local_overrides:
lorax_opts.extend([ '-s', self.local_overrides ])
if self.lorax_additional_repos:
for repourl in self.lorax_additional_repos.split(','):
lorax_opts.extend(['-s', repourl.strip()])
http_proxy = os.environ.get('http_proxy')
if http_proxy:
lorax_opts.extend([ '--proxy', http_proxy ])
lorax_workdir = os.path.join(tmpdir, 'lorax')
os.makedirs(lorax_workdir)
run_sync(['lorax', '--nomacboot',
'--add-template=%s/lorax-embed-repo.tmpl' % self.pkgdatadir,
'--add-template-var=ostree_osname=%s' % self.os_name,
'--add-template-var=ostree_repo=%s' % self.ostree_repo,
'--add-template-var=ostree_ref=%s' % self.ref,
'-p', self.os_pretty_name, '-v', self.release,
'-r', self.release, '-s', self.yum_baseurl,
] + lorax_opts + ['output'],
cwd=lorax_workdir)
os.makedirs(os.path.dirname(target))
# Right now we only take the boot.iso (which is really
# installer.iso since we used a template to inject data)
os.rename(lorax_workdir + '/output/images/boot.iso', target)
## End Composer
def main():
parser = argparse.ArgumentParser(description='Create an installer image')
parser.add_argument('-c', '--config', type=str, required=True, help='Path to config file')
parser.add_argument('-r', '--release', type=str, default='rawhide', help='Release to compose (references a config file section)')
parser.add_argument('-v', '--verbose', action='store_true', help='verbose output')
parser.add_argument('-o', '--outputdir', type=str, required=True, help='Path to image output directory')
args = parser.parse_args()
composer = InstallerTask(args.config, release=args.release)
composer.show_config()
origrev = None
_,newrev = composer.repo.resolve_rev(composer.ref, True)
composer.create_disks(args.outputdir)
composer.cleanup()
|
Python
| 0
|
@@ -1126,22 +1126,16 @@
f create
-_disks
(self, o
@@ -1145,16 +1145,16 @@
utdir):%0A
+
@@ -1415,569 +1415,8 @@
te%0A%0A
- imagestmpdir = os.path.join(self.workdir, 'images')%0A os.mkdir(imagestmpdir)%0A%0A generated = %5B%5D%0A%0A imgtargetinstaller=os.path.join(imagestmpdir, 'install', '%25s-installer.iso' %25 self.os_name)%0A self.create_installer_image(self.workdir, imgtargetinstaller)%0A generated.append(imgtargetinstaller)%0A%0A for f in generated:%0A destpath = os.path.join(outputdir, os.path.basename(f))%0A print %22Created: %22 + destpath%0A shutil.move(f, destpath)%0A%0A def create_installer_image(self, tmpdir, target):%0A
@@ -1864,19 +1864,25 @@
th.join(
-tmp
+self.work
dir, 'lo
@@ -2476,240 +2476,429 @@
-os.makedirs(os.path.dirname(target))%0A # Right now we only take the boot.iso (which is really%0A # installer.iso since we used a template to inject data)%0A os.rename(lorax_workdir + '/output/images/boot.iso', target
+# We injected data into boot.iso, so it's now installer.iso%0A lorax_output = lorax_workdir + '/output'%0A lorax_images = lorax_output + '/images'%0A os.rename(lorax_images + '/boot.iso', lorax_images + '/installer.iso')%0A%0A for p in os.listdir(lorax_output):%0A print %22Generated: %22 + p%0A shutil.move(os.path.join(lorax_output, p),%0A os.path.join(outputdir, p)
)%0A%0A#
@@ -3556,89 +3556,8 @@
()%0A%0A
- origrev = None%0A _,newrev = composer.repo.resolve_rev(composer.ref, True)%0A%0A
@@ -3575,14 +3575,8 @@
eate
-_disks
(arg
|
2f50e7e71b124ae42cab5edb19c030fcc69a4ef5
|
Fix failing attribute lookups
|
saleor/product/models/utils.py
|
saleor/product/models/utils.py
|
from django.utils.encoding import smart_text
def get_attributes_display_map(variant, attributes):
print "in get_attributes_display_map with " + str(variant) + " and " + str(attributes)
display = {}
for attribute in attributes:
value = variant.get_attribute(attribute.pk)
if value:
choices = {smart_text(a.pk): a for a in attribute.values.all()}
attr = choices.get(value)
if attr:
display[attribute.pk] = attr
else:
display[attribute.pk] = value
return display
|
Python
| 0.000001
|
@@ -96,99 +96,8 @@
s):%0A
- print %22in get_attributes_display_map with %22 + str(variant) + %22 and %22 + str(attributes)%0A
|
c296d8221b5785823cb04b462c894fbcb9ede098
|
Fix output for single shoe size search
|
shoeScraper.py
|
shoeScraper.py
|
#imports
import sqlite3
import re
import requests
from bs4 import BeautifulSoup as soup
#DECLARE SQL:
conn = sqlite3.connect('shoes_on_ebay.db')
c = conn.cursor()
def create_table():
c.execute("CREATE TABLE IF NOT EXISTS " + SQL_name + " (id TEXT, list_title TEXT, shoe_size REAL, date_sold TEXT, price REAL)")
def data_entry():
c.execute("INSERT INTO " + SQL_name + " (id, list_title, shoe_size, date_sold, price) VALUES (?, ?, ?, ?, ?)",
(item_id, item_title, shoe_size, item_date, item_price))
conn.commit()
def no_duplicates(x):
c.execute("SELECT id FROM " + SQL_name + " WHERE id = ?", (x,))
data = c.fetchone()
if data is None:
return 0
else:
return 1
def date_format(date):
months = {"Jan" : '01', "Feb" : '02', "Mar" : '03', "Apr" : '04', "May" : '05', "Jun" : '06', "Jul" : '07', "Aug" : '08', "Sep" : '09', "Oct" : '10', "Nov" : '11', "Dec" : '12'}
month = str(date[:3])
day = str(date[4:6])
month_num = months[month]
return month_num + "/" + day
def pricesFromDB():
c.execute("SELECT price FROM " + SQL_name + " WHERE shoe_size >= " + str(sizeMin) + " AND shoe_size <= " + str(sizeMax))
prices = c.fetchall()
return prices
shoe_search = input('What shoe would you like to search for? ')
SQL_name = shoe_search.replace(" ", "_")
sizePrompted = input('''
Would you like to search for a certain size?
(enter "no" to search all sizes)
(enter "11" to search for size 11)
(enter "9.5 to 10.5" to search sizes 9.5, 10, and 10.5)
''')
anyButNumbersAndPeriod = re.compile("([^0-9.])")
if (sizePrompted.lower() == "no"):
sizeMin = 5
sizeMax = 15
print("searching for all sizes 5 through 14")
elif "-" in sizePrompted and not anyButNumbersAndPeriod.match(sizePrompted):
#this is a range
sizePrompted = sizePrompted.split("-");
sizeMin = float(sizePrompted[0]);
sizeMax = float(sizePrompted[1]);
print("searching for sizes " + sizePrompted[0] + " through " + sizePrompted[1])
elif not anyButNumbersAndPeriod.match(sizePrompted):
#single size
sizeMin = float(sizePrompted)
sizeMax = sizeMin
print("searching for size " + sizePrompted)
else:
print("Please enter a valid size\n")
sizes = []
#add minSize through maxSize to sizes []
i = sizeMin;
while (i <= sizeMax):
sizes.append(i);
i += 0.5
#format sizes for params
paramSizes = []
for size in sizes:
size = str(size)
if ".0" in size:
paramSize = size.replace(".0", "")
elif ".5" in size:
paramSize = size.replace(".5", "%2E5")
paramSizes.append(paramSize)
for size in paramSizes:
my_url = 'https://www.ebay.com/sch/i.html'
params = {
'_from' : 'R40',
'_sacat' : 0,
'LH_Complete' : 1,
'LH_Sold' : 1,
'LH_ItemCondition' : 1000,
'_nkw' : shoe_search,
'_dcat' : 15709,
"US%20Shoe%20Size%20%28Men%27s%29" : size,
'rt' : 'nc',
}
r = requests.get(my_url, params=params)
# html parsing
page_soup = soup(r.text, "html.parser")
#class nllclt is only there when there are 0 results
if bool(page_soup.find("span", {"class": "nllclt"})) == True:
continue
#find the first of this only because Ebay sometimes adds suggested results that don't match right away
matches = page_soup.find("ul", {"class": "gv-ic"})
# grabs each sale
containers = matches.findAll("li",{"class":"sresult"})
# Create table, comment out after making it the first time
create_table()
for container in containers:
#extract unique identifier
#all ints in python 3 are long longs
item_id = container.get("id")
#if item_id exists, loop to next entry
if no_duplicates(item_id) == 1:
continue
# extract item title from html
item_title = container.h3.a.text
# extract date and time sold from html
date_container = container.find("span", {"class":"lcol"})
date = date_container.text[:6]
item_date = date_format(date).strip()
# extract price
price_container = container.find("span", {"class":"bidsold"})
# strip data to clean up and fix formatting
item_price = price_container.text.replace("$", "").strip()
item_price = item_price.replace(",", "")
#if has "\xa0to " get the average
if bool(re.search("\xa0to ", item_price)) == False:
item_price = item_price
elif bool(re.search("\xa0to ", item_price)) == True:
arr = item_price.split("\xa0to ")
p1 = float(arr[0])
p2 = float(arr[1])
item_price = (p1 + p2) / 2.00
#reformat half sizes before entering
shoe_size = size.replace("%2E", ".")
float(shoe_size)
#write outputs in csv file and format accordingly
data_entry()
prices = pricesFromDB()
priceSum = 0.00;
for i in range(0, len(prices)):
priceSum += prices[i][0]
avgPrice = round(priceSum/len(prices), 2)
print("The Ebay results for " + shoe_search + " has an average selling point of $" + str(avgPrice) + " for sizes " + str(sizeMin) + " through " + str(sizeMax))
# always close db connection when done writing it
c.close()
conn.close()
|
Python
| 0.998594
|
@@ -4591,16 +4591,207 @@
es), 2)%0A
+if (sizeMax == sizeMin):%0A%09#single shoe size%0A%09print(%22The Ebay results for %22 + shoe_search + %22 has an average selling point of $%22 + str(avgPrice) + %22 for size %22 + str(sizeMin))%0A%0Aelse:%0A%09#range%0A%09
print(%22T
|
9aee2f384e6f11d08518757a9afd7af50002cff7
|
Swap debug mode
|
gpbot/gpbot/settings.py
|
gpbot/gpbot/settings.py
|
"""
Django settings for gpbot project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ddfdk=t2z)q8(^7-9)y$ey#ddbyipf4#kn4*+l51**bjllfege'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# DEBUG = False
ALLOWED_HOSTS = ['127.0.0.1', 'localhost', 'kage-line-bot.herokuapp.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gpbot.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gpbot.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
|
Python
| 0.000001
|
@@ -795,16 +795,18 @@
uction!%0A
+#
DEBUG =
@@ -810,18 +810,16 @@
= True%0A
-#
DEBUG =
|
a5a52979f87599cc7ef9f951c5b3f3d3487a9261
|
Revert "Fixed KeyError in index command"
|
lily/management/commands/index.py
|
lily/management/commands/index.py
|
from optparse import make_option
import time
from django.conf import settings
from django.core.management.base import BaseCommand
from lily.search.analyzers import get_analyzers
from lily.search.connections_utils import get_es_client, get_index_name
from lily.search.indexing import index_objects
from lily.search.scan_search import ModelMappings
class Command(BaseCommand):
help = """Index current model instances into Elasticsearch. It does this by
creating a new index, then changing the alias to point to the new index.
(afterwards removing the old index). It uses 1 index per type.
It uses custom index implementation for fast and synchronous indexing.
There are basically two ways to use this command, the first to index all
configured mappings:
index
The second way is to target a specific model:
index -t contact
or with other names:
index -t lily.contacts.models.Contact
index -t contacts_contact
It is possible to specify multiple models, using comma separation."""
option_list = BaseCommand.option_list + (
make_option('-t', '--target',
action='store',
dest='target',
default='',
help='Choose specific model targets, comma separated (no added space after comma).'
),
make_option('-q', '--queries',
action='store_true',
dest='queries',
help='Show the queries that were executed during the command.'
),
make_option('-l', '--list',
action='store_true',
dest='list',
help='List available models to target.'
),
make_option('-f', '--force',
action='store_true',
dest='force',
help='Force the creation of the new index, removing the old one (leftovers).'
),
)
def handle(self, *args, **options):
es = get_es_client()
if args:
self.stdout.write('Aborting, unexpected arguments %s' % list(args))
return
if options['list']:
self.stdout.write('Possible models to index:\n')
for mapping in ModelMappings.get_model_mappings().values():
self.stdout.write(mapping.get_mapping_type_name())
return
target = options['target']
if target:
targets = target.split(',')
else:
targets = [] # (meaning all)
has_targets = targets != []
self.stdout.write('Please remember that HelloLily needs to be in maintenance mode. \n\n')
if has_targets:
# Do a quick run to check if all targets are valid models.
check_targets = list(targets) # make a copy
for target in check_targets:
for mapping in ModelMappings.get_model_mappings().values():
if self.model_targetted(mapping, [target]):
check_targets.remove(target)
break
if check_targets:
self.stdout.write('Aborting, following targets not recognized: %s' % check_targets)
return
for mapping in ModelMappings.get_model_mappings().values():
model_name = mapping.get_mapping_type_name()
main_index_base = settings.ES_INDEXES['default']
main_index = get_index_name(main_index_base, mapping)
# Skip this model if there are specific targets and not specified.
if has_targets and not self.model_targetted(mapping, targets):
continue
self.stdout.write('==> %s' % model_name)
# Check if we currently have an index for this mapping.
old_index = None
aliases = es.indices.get_aliases(name=main_index)
for key, value in aliases.iteritems():
if value['aliases']:
old_index = key
self.stdout.write('Current index "%s"' % key)
# Check any indices with no alias (leftovers from failed indexing).
# Or it could be that it is still in progress,
aliases = es.indices.get_aliases()
for key, value in aliases.iteritems():
if not key.endswith(model_name):
# Not the model we are looking after.
continue
if key == main_index:
# This is an auto created index. Will be removed at end of command.
continue
if not value['aliases']:
if options.get('force', ''):
self.stdout.write('Removing leftover "%s"' % key)
es.indices.delete(key)
else:
raise Exception('Found leftover %s, proceed with -f to remove.'
' Make sure indexing this model is not already running!' % key)
# Create new index.
index_settings = {
'mappings': {
model_name: mapping.get_mapping()
},
'settings': {
'analysis': get_analyzers()['analysis'],
'number_of_shards': 1,
}
}
temp_index_base = 'index_%s' % (int(time.time()))
temp_index = get_index_name(temp_index_base, mapping)
self.stdout.write('Creating new index "%s"' % temp_index)
es.indices.create(temp_index, body=index_settings)
# Index documents.
self.index_documents(mapping, temp_index_base)
# Switch aliases.
if old_index:
es.indices.update_aliases({
'actions': [
{'remove': {'index': old_index, 'alias': main_index}},
{'remove': {'index': old_index, 'alias': main_index_base}},
{'add': {'index': temp_index, 'alias': main_index}},
{'add': {'index': temp_index, 'alias': main_index_base}},
]
})
self.stdout.write('Removing previous index "%s"' % old_index)
es.indices.delete(old_index)
else:
if es.indices.exists(main_index):
# This is a corner case. There was no alias named index_name, but
# an index index_name nevertheless exists, this only happens when the index
# was already created (because of ES auto creation features).
self.stdout.write('Removing previous (presumably auto created) index "%s"' % main_index)
es.indices.delete(main_index)
es.indices.update_aliases({
'actions': [
{'add': {'index': temp_index, 'alias': main_index}},
{'add': {'index': temp_index, 'alias': main_index_base}},
]
})
self.stdout.write('')
self.stdout.write('Indexing finished.')
if options['queries']:
from django.db import connection
for query in connection.queries:
print query
def index_documents(self, mapping, temp_index_base):
model = mapping.get_model()
self.stdout.write('Indexing %s' % self.full_name(model))
if mapping.has_deleted():
model_objs = model.objects.filter(is_deleted=False)
else:
model_objs = model.objects.all()
index_objects(mapping, model_objs, temp_index_base, print_progress=True)
def model_targetted(self, mapping, specific_targets):
"""
Check if the mapping is targetted for indexing.
"""
model = mapping.get_model()
model_short_name = model.__name__.lower()
model_full_name = self.full_name(model).lower()
model_mappings_name = mapping.get_mapping_type_name().lower()
for target in specific_targets:
if target.lower() in [model_short_name, model_full_name, model_mappings_name]:
return True
return False
def full_name(self, model):
"""
Get the fully qualified name of a model.
"""
return '%s.%s' % (model.__module__, model.__name__)
|
Python
| 0
|
@@ -4702,25 +4702,17 @@
ions
-.get(
+%5B
'force'
-, '')
+%5D
:%0A
|
6a8fba9bc6bb1108b048947b7ffc10c0904fba14
|
Move plugin loading to separate function
|
foob0t.py
|
foob0t.py
|
# Copyright 2017 Christoph Mende
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import telepot
from telepot.loop import MessageLoop
import plugin_loader
commands = {}
users = {}
username = None
for i in plugin_loader.get_plugins():
print('Loading plugin ' + i['name'])
plugin = plugin_loader.load_plugin(i)
commands.update(plugin.commands)
def handle(msg):
content_type, chat_type, chat_id = telepot.glance(msg)
print(content_type, chat_type, chat_id)
# reject non-text messages
if content_type != 'text':
return
# split message in command (first word) and args (rest)
argv = msg['text'].strip().split(' ', 1)
command = argv[0].lower()
args = None
if len(argv) == 2:
args = argv[1]
# reject non-commands
if not command.startswith('/'):
return
# strip / from command
command = command[1:]
# strip username from command
if command.endswith('@'+username):
command = command[:-len(username)-1]
# search for plugin handling command
for c in commands:
if c != command:
continue
# found it => look up user
uid = msg['from']['id']
user = users.setdefault(uid, msg['from'])
retval = commands[c](user, args)
bot.sendMessage(chat_id, retval)
if len(sys.argv) < 2:
sys.exit('Usage: %s <telegram api token>' % sys.argv[0])
bot = telepot.Bot(sys.argv[1])
username = bot.getMe()['username']
MessageLoop(bot, handle).run_as_thread()
while 1:
time.sleep(10)
|
Python
| 0
|
@@ -711,25 +711,33 @@
%7B%7D%0A
-username = None%0A%0A
+%0Adef load_plugins():%0A
for
@@ -770,24 +770,28 @@
gins():%0A
+
+
print('Loadi
@@ -819,16 +819,20 @@
'%5D)%0A
+
+
plugin =
@@ -861,16 +861,20 @@
ugin(i)%0A
+
comm
@@ -1978,16 +1978,31 @@
rgv%5B1%5D)%0A
+load_plugins()%0A
username
|
6903779f0d34145af1f13fef7f4e07b605aec3d0
|
Update __init__.py
|
cactusbot/commands/__init__.py
|
cactusbot/commands/__init__.py
|
"""Handle commands."""
from .command import Command
from .magic import COMMANDS
__all__ = ["Command", "COMMANDS]
|
Python
| 0.000072
|
@@ -106,10 +106,11 @@
COMMANDS
+%22
%5D%0A
|
3fb8fa8b45b31c34f6c12882f870dfe19c0f7109
|
version 0.0.1 update
|
src/parse_anot.py
|
src/parse_anot.py
|
#! /usr/bin/env python
def process_ref_line(line):
# extract last column where the attributes are
attribute = line[:-1].split("\t")[-1]
attribute = attribute.split(";")
return attribute
def update_gene_set(attribute, gene_set):
#determine the association between gene_id and (gene_type, gene_name)
#update in gene_set: {gene_id:[gene_type, gene_name]}
gene_id, gene_type, gene_name = [item.split("=")[-1]
for item in attribute
if item.split("=")[0] in
("gene_id", "gene_type", "gene_name")]
if gene_id not in gene_set:
gene_set[gene_id] = (gene_type, gene_name)
def process_cor_line(line):
#process each line,
#return coordinate info & feature ids
cor_info, feature_ids = line[:-1].split("|")
return cor_info, feature_ids
def determine_feature(feature_ids, gene_set):
# determine appropriate features based on the reference ids
# genecode use following rule to rgister different features
# for gene and transcript: only use ID
# for other features such as exon, UTR, intron, etc. use feature:transcript_ID:number
# if there are no overlap with known gene coordinartes
# it is annotated as intergenic
# if there are overlap with known gene coordinates
# make step-wise determination follwoing order: UTR3, UTR5, exon
# if the above features are not associated, then it is annoated as intron
if feature_ids == "":
gene_type, gene_name, feature, gene_id = "None", "None", "intergenic", "None"
else:
if "UTR5" in feature_ids:
feature = "UTR5"
elif "UTR3" in feature_ids:
feature = "UTR3"
elif "exon" in feature_ids:
feature = "exon"
else:
feature = "intron"
for feature_id in feature_ids.split(";"):
if gene_set.has_key(feature_id):
gene_id = feature_id
gene_type, gene_name = gene_set[gene_id]
break
return "\t".join([gene_type, gene_name, feature, gene_id])
def check_argv(argv):
usage = """
parse_anot.py
version: 0.0.1
authors: Dilmurat Yusuf
Usage:
$ parse_anot.py -r track < bedmapBED > outputBed
$ bedmap_upstream_process ... | parse_anot.py -r track > outputBed
The output consists of coordinate info, gene_type, gene_name, feature, gene_id.
The format is tab delimited bed format.
Parse feature ids from bedmap output and then map to genomic featurs
according to given track which is specified by '-r' or '--track'.
The script assums each cooridinate is only assocaited with a single gene_id.
The reported features include exon, UTR, CDS, intron, intergenic,
gene_type, gene_name, gene_id.
For a coordinate which is assocaited with a gene_id
but NOT with exons, it is annotated as intron.
For a coordiate which is NOT assocaited with a genes_ids,
it is annotated as intergenic.
"""
try:
opts, args = getopt.getopt(argv,"hr:",["track="])
except getopt.GetoptError:
print usage
sys.exit(1)
if opts == []:
print usage
sys.exit(1)
for opt, arg in opts:
if opt == '-h' :
print usage
sys.exit()
elif opt in ("-r", "--track"):
track = arg
return track
if __name__ == '__main__':
import sys, getopt
"""
the script consists of two components
component 1: retrieve reference info
update gene_set = {gene_id:[gene_type, gene_name]}
"""
track = check_argv(sys.argv[1:])
# extract attribute columns
with open(track) as read_track:
attribute_set = [process_ref_line(line) for line in read_track]
# gene_set: {gene_id:[gene_type, gene_name]}
gene_set = {}
# update gene set
[update_gene_set(attribute, gene_set) for attribute in attribute_set]
#----------------------------------------------------------
"""
component 2: map ids to features
"""
read_cor_ids = sys.stdin
#extract coordinate info & feature ids
cor_ID_set = [process_cor_line(line) for line in read_cor_ids]
#determine appropriate features based on the reference ids
# cor_id[0] : coordinate info
# cor_id[1] : feature ids
cor_feature_set = ["\t".join([
cor_id[0], determine_feature(cor_id[1], gene_set)
])
for cor_id in cor_ID_set]
#transform list into strings
print "\n".join(cor_feature_set)
|
Python
| 0
|
@@ -1436,20 +1436,26 @@
= %22
-None%22, %22None
+unknown%22, %22unknown
%22,
@@ -1473,12 +1473,15 @@
%22, %22
-None
+unknown
%22%0A%09%09
@@ -1593,24 +1593,73 @@
%22UTR3%22%0A%09%09%09%0A
+%09%09elif %22CDS%22 in feature_ids:%0A%09%09%09feature = %22CDS%22%0A%0A
%09%09elif %22exon
@@ -1667,32 +1667,33 @@
in feature_ids:
+%09
%0A%09%09%09feature = %22e
|
5e8b82130a0bd0d63629e725fc06380105955274
|
Update data migration
|
osf/migrations/0084_preprint_node_divorce.py
|
osf/migrations/0084_preprint_node_divorce.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-03-12 18:25
from __future__ import unicode_literals
from django.db import migrations
from django.db import transaction
def divorce_preprints_from_nodes(apps, schema_editor):
Preprint = apps.get_model('osf', 'PreprintService')
PreprintContributor = apps.get_model('osf', 'PreprintContributor')
# tried to use F() function here but F() doesn't support table joins
# instead, using the following to make this transaction atomic
with transaction.atomic():
for preprint in Preprint.objects.filter(node__isnull=False):
preprint.title = preprint.node.title
preprint.description = preprint.node.description
preprint.creator = preprint.node.creator
preprint.save()
for preprint in Preprint.objects.all():
if preprint.node:
# preprint.title = preprint.node.title
# preprint.description = preprint.node.description
# preprint.creator = preprint.node.creator
# use bulk create
for contrib in preprint.node._contributors:
# make a PreprintContributor that points to the pp instead of the node
new_contrib = PreprintContributor.objects.create()
new_contrib.primary_identifier_name = contrib.primary_identifier_name
new_contrib.read = contrib.read
new_contrib.write = contrib.write
new_contrib.admin = contrib.admin
new_contrib.visible = contrib.visible
new_contrib.user = contrib.user
new_contrib.preprint = preprint
new_contrib.save()
preprint._contributors.add(new_contrib)
# will existing nodes attached to preprints still by accessible? A: yes!
preprint.save()
class Migration(migrations.Migration):
dependencies = [
('osf', '0083_update_preprint_model_for_divorce'),
]
operations = [
migrations.RunPython(divorce_preprints_from_nodes)
]
|
Python
| 0.000001
|
@@ -862,177 +862,8 @@
de:%0A
- # preprint.title = preprint.node.title%0A # preprint.description = preprint.node.description%0A # preprint.creator = preprint.node.creator%0A
@@ -929,17 +929,16 @@
nt.node.
-_
contribu
@@ -940,17 +940,26 @@
tributor
-s
+_set.all()
:%0A
@@ -1059,58 +1059,66 @@
-new_contrib = PreprintContributor.objects.create()
+# because there's a throughtable, relations are designated
%0A
@@ -1134,175 +1134,171 @@
-new_contrib.primary_identifier_name = contrib.primary_identifier_name%0A new_contrib.read = contrib.read%0A new_contrib.write = contrib.wri
+# solely on the through model, and adds on the related models%0A # are not required.%0A new_contrib = PreprintContributor.objects.crea
te
+(
%0A
@@ -1314,139 +1314,103 @@
-new_contrib.admin = contrib.admin%0A new_contrib.visible = contrib.visible%0A new_contrib.user =
+ preprint=preprint,%0A user=contrib.user,%0A read=
contrib.
user
@@ -1405,20 +1405,21 @@
contrib.
-user
+read,
%0A
@@ -1431,40 +1431,37 @@
-new_contrib.preprint = preprint%0A
+ write=contrib.write,%0A
@@ -1476,20 +1476,22 @@
-new_
+admin=
contrib.
save
@@ -1486,22 +1486,22 @@
contrib.
-save()
+admin,
%0A
@@ -1513,133 +1513,58 @@
-preprint._contributors.add(new_contrib)%0A # will existing nodes attached to preprints still by accessible? A: yes!%0A
+ visible=contrib.visible%0A )%0A
@@ -1567,32 +1567,35 @@
-preprint
+new_contrib
.save()%0A%0A%0Acl
|
401ec02e1c0f967fc53e7b2da5c9d56e4fb23df3
|
Add document
|
cupy/linalg/solve.py
|
cupy/linalg/solve.py
|
import numpy
from numpy import linalg
import six
import cupy
from cupy import cuda
from cupy.cuda import cublas
from cupy.cuda import device
from cupy.linalg import util
if cuda.cusolver_enabled:
from cupy.cuda import cusolver
def solve(a, b):
'''Solves a linear matrix equation.
It computes the exact solution of ``x`` in ``ax = b``,
where ``a`` is a square and full rank matrix.
Args:
a (cupy.ndarray): The matrix with dimension ``(M, M)``
b (cupy.ndarray): The vector with ``M`` elements, or
the matrix with dimension ``(M, K)``
.. seealso:: :func:`numpy.linalg.solve`
'''
# NOTE: Since cusolver in CUDA 8.0 does not support gesv,
# we manually solve a linear system with QR decomposition.
# For details, please see the following:
# http://docs.nvidia.com/cuda/cusolver/index.html#qr_examples
if not cuda.cusolver_enabled:
raise RuntimeError('Current cupy only supports cusolver in CUDA 8.0')
# TODO(Saito): Current implementation only accepts two-dimensional arrays
util._assert_cupy_array(a, b)
util._assert_rank2(a)
util._assert_nd_squareness(a)
if 2 < b.ndim:
raise linalg.LinAlgError(
'{}-dimensional array given. Array must be '
'one or two-dimensional'.format(b.ndim))
if len(a) != len(b):
raise linalg.LinAlgError(
'The number of rows of array a must be '
'the same as that of array b')
# Cast to float32 or float64
if a.dtype.char == 'f' or a.dtype.char == 'd':
dtype = a.dtype.char
else:
dtype = numpy.find_common_type((a.dtype.char, 'f'), ()).char
m, k = (b.size, 1) if b.ndim == 1 else b.shape
a = a.transpose().astype(dtype, order='C', copy=True)
b = b.transpose().astype(dtype, order='C', copy=True)
cusolver_handle = device.get_cusolver_handle()
cublas_handle = device.get_cublas_handle()
dev_info = cupy.empty(1, dtype=numpy.int32)
if dtype == 'f':
geqrf = cusolver.sgeqrf
geqrf_bufferSize = cusolver.sgeqrf_bufferSize
ormqr = cusolver.sormqr
trsm = cublas.strsm
else: # dtype == 'd'
geqrf = cusolver.dgeqrf
geqrf_bufferSize = cusolver.dgeqrf_bufferSize
ormqr = cusolver.dormqr
trsm = cublas.dtrsm
# 1. QR decomposition (A = Q * R)
buffersize = geqrf_bufferSize(cusolver_handle, m, m, a.data.ptr, m)
workspace = cupy.empty(buffersize, dtype=dtype)
tau = cupy.empty(m, dtype=dtype)
geqrf(
cusolver_handle, m, m, a.data.ptr, m,
tau.data.ptr, workspace.data.ptr, buffersize, dev_info.data.ptr)
_check_status(dev_info)
# 2. ormqr (Q^T * B)
ormqr(
cusolver_handle, cublas.CUBLAS_SIDE_LEFT, cublas.CUBLAS_OP_T,
m, k, m, a.data.ptr, m, tau.data.ptr, b.data.ptr, m,
workspace.data.ptr, buffersize, dev_info.data.ptr)
_check_status(dev_info)
# 3. trsm (X = R^{-1} * (Q^T * B))
trsm(
cublas_handle, cublas.CUBLAS_SIDE_LEFT, cublas.CUBLAS_FILL_MODE_UPPER,
cublas.CUBLAS_OP_N, cublas.CUBLAS_DIAG_NON_UNIT,
m, k, 1, a.data.ptr, m, b.data.ptr, m)
return b.transpose()
def _check_status(dev_info):
status = int(dev_info)
if status < 0:
raise linalg.LinAlgError(
'Parameter error (maybe caused by a bug in cupy.linalg?)')
def tensorsolve(a, b, axes=None):
'''Solves tensor equations denoted by ``ax = b``.
Suppose that ``b`` is equivalent to ``cupy.tensordot(a, x)``.
This function computes tensor ``x`` from ``a`` and ``b``.
Args:
a (cupy.ndarray): The tensor with ``len(shape) >= 1``
b (cupy.ndarray): The tensor with ``len(shape) >= 1``
axes (tuple of ints): Axes in ``a`` to reorder to the right
before inversion.
.. seealso:: :func:`numpy.linalg.tensorsolve`
'''
if axes is not None:
allaxes = list(six.moves.range(a.ndim))
for k in axes:
allaxes.remove(k)
allaxes.insert(a.ndim, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(a.ndim - b.ndim):]
prod = numpy.prod(oldshape)
a = a.reshape(-1, prod)
b = b.ravel()
result = solve(a, b)
return result.reshape(oldshape)
# TODO(okuta): Implement lstsq
def inv(a):
'''Computes the inverse of a matrix.
This function computes matrix ``a_inv`` from n-dimensional regular matrix
``a`` such that ``dot(a, a_inv) == eye(n)``.
Args:
a (cupy.ndarray): The regular matrix
.. seealso:: :func:`numpy.linalg.inv`
'''
if not cuda.cusolver_enabled:
raise RuntimeError('Current cupy only supports cusolver in CUDA 8.0')
util._assert_cupy_array(a)
util._assert_rank2(a)
util._assert_nd_squareness(a)
b = cupy.eye(len(a), dtype=a.dtype)
return solve(a, b)
# TODO(okuta): Implement pinv
# TODO(okuta): Implement tensorinv
|
Python
| 0
|
@@ -4553,16 +4553,77 @@
matrix%0A%0A
+ Returns:%0A cupy.ndarray: The inverse of a matrix.%0A%0A
.. s
|
dd2f7da18fb295d58ac763ee7e91b9b1a5bdf1d0
|
Update __about__.py
|
bcrypt/__about__.py
|
bcrypt/__about__.py
|
# Author:: Donald Stufft (<donald@stufft.io>)
# Copyright:: Copyright (c) 2013 Donald Stufft
# License:: Apache License, Version 2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "bcrypt"
__summary__ = "Modern password hashing for your software and your servers"
__uri__ = "https://github.com/dstufft/bcrypt/"
__version__ = "1.0.2"
__author__ = "Donald Stufft"
__email__ = "donald@stufft.io"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2013 Donald Stufft"
|
Python
| 0.00002
|
@@ -1046,15 +1046,12 @@
com/
-dstufft
+pyca
/bcr
|
d7c35749c682cb86356cdf825f3886e22b07942a
|
Add --refresh command line argument to Django admin command build_genome_blastdb
|
src/edge/management/commands/build_genome_blastdb.py
|
src/edge/management/commands/build_genome_blastdb.py
|
from edge.blastdb import build_all_genome_dbs
from django.core.management.base import BaseCommand
class Command(BaseCommand):
def handle(self, *args, **options):
build_all_genome_dbs()
|
Python
| 0.000001
|
@@ -133,67 +133,360 @@
def
-handle(self, *args, **options):%0A build_all_genome_dbs(
+add_arguments(self, parser):%0A parser.add_argument(%0A '--refresh',%0A action='store_true',%0A help='Rebuild BLAST database files',%0A )%0A%0A def handle(self, *args, **options):%0A if options%5B'refresh'%5D:%0A build_all_genome_dbs(refresh=True)%0A else:%0A build_all_genome_dbs(refresh=False
)%0A
|
a3527b9a0ffb494bda40c1e8ac10fb8f4ae2aa51
|
Add outdir option
|
src/periodical.py
|
src/periodical.py
|
#!/usr/bin/env python2
# Copyright 2012 Tom Vincent <http://tlvince.com/contact/>
'''Create a Kindle periodical from given URL(s).'''
import os
import sys
import logging
import argparse
import tempfile
import datetime
import subprocess
import yaml
from urlparse import urlparse
from bs4 import BeautifulSoup
from boilerpipe.extract import Extractor
def extract(url):
'''Extract content from a given URL.'''
# Using python-boilerpipe
extractor = Extractor(extractor='ArticleExtractor', url=url)
return extractor.getHTML()
def format_boilerpipe(html, url):
'''Return a formatted version of boilerpipe's HTML output.'''
soup = BeautifulSoup(html)
style = soup.style.extract()
head = soup.new_tag('head')
title = soup.new_tag('title')
title.string = getattr(soup.h1, 'string', urlparse(url)[1])
meta = soup.new_tag('meta')
meta['http-equiv'] = 'Content-Type'
meta['content'] = 'text/html; charset=UTF-8'
head.append(title)
head.append(meta)
head.append(style)
soup.body.insert_before(head)
return soup
def write_yaml(title, author, subject, out_path):
'''Write document YAML for kindlerb.'''
date = datetime.datetime.now()
mobi = '{0}-{1}.mobi'.format(title.lower(),
date.strftime('%Y%m%d%H%M%S'))
doc = {
'doc_uuid': '{0}-{1}'.format(title.lower(),
date.strftime('%Y%m%d%H%M%S')),
'title': '{0} {1}'.format(title, date.strftime('%Y-%m-%d')),
'author': author,
'publisher': author,
'subject': subject,
'date': date.strftime('%Y-%m-%d'),
'mobi_outfile': mobi,
}
with open(os.path.join(out_path, '_document.yml'), 'w') as out:
yaml.dump(doc, out)
return mobi
def write_html(out_path, html, subject, count):
'''Generate stripped HTML file for the given URL.'''
section = os.path.join(out_path, 'sections', str(count))
os.makedirs(section)
html_path = os.path.join(section, '{0}.html'.format(count))
with open(os.path.join(section, '_section.txt'), 'w') as section_file:
section_file.write(subject)
with open(html_path, 'w') as html_file:
html_file.write(html.encode('utf8'))
def parse_args():
'''Parse the command-line arguments.'''
parser = argparse.ArgumentParser(description=__doc__.split('\n')[0],
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('urls', nargs='+', help='the URL(s) to parse')
meta = parser.add_argument_group('meta', description='Periodical meta data')
meta.add_argument('--title', default='Periodical',
help='the periodical title')
meta.add_argument('--author', default='Tom Vincent',
help='the periodical author')
meta.add_argument('--subject', default='News',
help='the periodical subject')
return parser.parse_args()
def which(cmd):
'''Check if a command is in $PATH
From: http://stackoverflow.com/q/377017
'''
def is_exe(fpath):
'''Helper to check if file is executable.'''
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, _ = os.path.split(cmd)
if fpath:
if is_exe(cmd):
return cmd
else:
for path in os.environ['PATH'].split(os.pathsep):
exe_file = os.path.join(path, cmd)
if is_exe(exe_file):
return exe_file
return None
def have_depends():
'''Exit if required dependencies are not found.'''
deps = ['kindlerb']
for dep in deps:
if which(dep) is None:
logging.error("Dependency '{0}' not installed".format(dep))
sys.exit(1)
def main():
'''Start execution of periodicals.py.'''
args = parse_args()
logging.basicConfig(format='%(filename)s: %(levelname)s: %(message)s',
level=logging.INFO)
have_depends()
tmp = tempfile.mkdtemp()
mobi = write_yaml(args.title, args.author, args.subject, tmp)
for index, url in enumerate(args.urls):
html = extract(url)
formatted = format_boilerpipe(html, url)
write_html(tmp, formatted, args.subject, index)
if os.path.exists(os.path.join(tmp, 'sections', '0', '0.html')):
subprocess.call(['kindlerb', tmp])
logging.info("Periodical created at '{0}'".format(
os.path.join(tmp, mobi)))
if __name__ == '__main__':
main()
|
Python
| 0.000005
|
@@ -2558,16 +2558,110 @@
parse')
+%0A parser.add_argument('--outdir', default='~',%0A help='directory to write mobi file')
%0A%0A me
|
a02a5447482db1288bdc332aab5a544764fb3fa0
|
remove low frequencies
|
index/messung/energy.py
|
index/messung/energy.py
|
#!/usr/bin/env python
import sys, os, time, socket
PORT = 1337
SAMPLES = 3
freqs = [ 30, 48, 60, 72, 84, 96, 120, 132, 144, 156, 168, 180, 192, 204,
216, 240, 264, 288, 336, 360, 384, 408, 480, 528, 600, 648, 672, 696,
720, 744, 768, 816, 864, 912, 960, 1008 ]
def server():
print "server"
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("", PORT))
s.listen(1)
while 1:
conn, addr = s.accept()
print "connected by", addr
while 1:
data = conn.recv(1024)
if data == "": break
print " cpus | workers | freq in MHz | input in MB | time in s"
print "------+---------+-------------+-------------+-----------"
cmd = eval(data)
cpus = cmd["cpus"]
workers = cmd["workers"]
freq = cmd["freq"]
input_len = cmd["input_len"]
os.system("cpufreq-set --governor userspace")
os.system("cpufreq-set --min 30000")
if socket.gethostname() == "raspberrypi":
os.system("cpufreq-set --max 700000")
else:
os.system("cpufreq-set --max 1008000")
os.system("echo %d > /sys/devices/system/cpu/cpu1/online" % int(cpus == 2))
os.system("cpufreq-set --freq %d000" % freq)
time.sleep(1)
f = int(file("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_cur_freq").read()) / 1000
assert freq == f, "%d != %d" % (freq, f)
conn.sendall("start") # tell client to begin tracking current
if input_len > 0:
out = os.popen("../index mr %d ../wiki/test_%d.txt" % (workers, input_len)).read()
conn.sendall(out)
else:
time.sleep(20)
out = "0"
conn.sendall(out)
print " %4d | %7d | %11d | %11d | %9.3f" % (
cpus, workers, freq, input_len, float(out))
print
conn.close()
################################################################################
class Tee(object):
def __init__(self, name="out", mode="a"):
self.file = open(name, mode)
self.stdout = sys.stdout
sys.stdout = self
def __del__(self):
sys.stdout = self.stdout
self.file.close()
def write(self, data):
self.file.write(data)
self.stdout.write(data)
def read_current(cambri, slot):
cambri.write("state %d\r\n" % slot)
q = ""
while q[-5:] != "\r\n>> ": q += cambri.read(1)
return int(q.split("\r\n")[1].split(",")[1])
def client(host):
print "client"
tee = Tee()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, PORT))
import serial
cambri = serial.Serial("/dev/ttyUSB0", 115200, 8, "N", 1)
print " cpus | workers | freq in MHz | input in MB | time in s | current in mA"
print "------+---------+-------------+-------------+-----------+---------------"
for input_len in [0, 25, 50, 100]:
for cpus in [1, 2]:
for workers in [0, 1, 2] if input_len else [0]:
for freq in freqs:
time = 0
current = 0
for i in range(SAMPLES):
# send command
s.sendall(repr({
"cpus": cpus,
"workers": workers,
"freq": freq,
"input_len": input_len
}))
start = s.recv(1024)
# track current
s.setblocking(0)
currents = []
while 1:
currents.append(read_current(cambri, 1))
try:
t = float(s.recv(1024))
break
except: continue
s.setblocking(1)
# estimate mean
c = sum(currents) / float(len(currents))
time += t
current += c
time /= SAMPLES
current /= SAMPLES
print " %4d | %7d | %11d | %11d | %9.3f | %13.2f" % (
cpus, workers, freq, input_len, time, current)
s.close()
if __name__ == "__main__":
if len(sys.argv) > 1:
host = sys.argv[1]
if host == "-": host = "192.168.1.42"
client(host)
else: server()
|
Python
| 0.999966
|
@@ -79,17 +79,19 @@
reqs = %5B
-
+%0A%09#
30, 48,
@@ -144,20 +144,22 @@
192,
-
+%0A%09#
204,
-%0A%09
+
216,
-
+%0A%09
240,
@@ -2623,16 +2623,17 @@
-----%22%0A%0A
+#
%09for inp
@@ -2665,51 +2665,145 @@
%5D:%0A%09
-%09for cpus in %5B1, 2%5D:%0A%09%09%09for workers in %5B0,
+for input_len in %5B100%5D:%0A#%09%09for cpus in %5B1, 2%5D:%0A%09%09for cpus in %5B2%5D:%0A%09%09%09#for workers in %5B0, 1, 2%5D if input_len else %5B0%5D:%0A%09%09%09for workers in %5B
1, 2
|
f7a6dc48c6d65c3937322d9e4f6e2f5e1176b03b
|
fix issue where a field may not exist in the data
|
beaver/transport.py
|
beaver/transport.py
|
import os
def create_transport(beaver_config, file_config, logger):
"""Creates and returns a transport object"""
transport_str = beaver_config.get('transport')
if '.' not in transport_str:
# allow simple names like 'redis' to load a beaver built-in transport
module_path = 'beaver.%s_transport' % transport_str.lower()
class_name = '%sTransport' % transport_str.title()
else:
# allow dotted path names to load a custom transport class
try:
module_path, class_name = transport_str.rsplit('.', 1)
except ValueError:
raise Exception('Invalid transport {0}'.format(beaver_config.get('transport')))
_module = __import__(module_path, globals(), locals(), class_name, -1)
transport_class = getattr(_module, class_name)
transport = transport_class(beaver_config, file_config, logger)
return transport
class Transport(object):
def __init__(self, beaver_config, file_config, logger=None):
"""Generic transport configuration
Will attach the file_config object, setup the
current hostname, and ensure we have a proper
formatter for the current transport
"""
self._current_host = beaver_config.get('hostname')
self._file_config = file_config
self._is_valid = True
self._logger = logger
if beaver_config.get('format') == 'msgpack':
import msgpack
packer = msgpack.Packer()
self._formatter = packer.pack
elif beaver_config.get('format') == 'json':
# priority: ujson > simplejson > jsonlib2 > json
priority = ['ujson', 'simplejson', 'jsonlib2', 'json']
for mod in priority:
try:
json = __import__(mod)
self._formatter = json.dumps
except ImportError:
pass
else:
break
elif beaver_config.get('format') == 'rawjson':
# priority: ujson > simplejson > jsonlib2 > json
priority = ['ujson', 'simplejson', 'jsonlib2', 'json']
for mod in priority:
try:
json = __import__(mod)
def rawjson_formatter(data):
json_data = json.loads(data['@message'])
for field in {'@source', '@type', '@tags', '@source_host', '@source_path'}:
json_data[field] = data[field]
return json.dumps(json_data)
self._formatter = rawjson_formatter
except ImportError:
pass
else:
break
elif beaver_config.get('format') == 'string':
def string_formatter(data):
return "[{0}] [{1}] {2}".format(data['@source_host'], data['@timestamp'], data['@message'])
self._formatter = string_formatter
else:
def null_formatter(data):
return data['@message']
self._formatter = null_formatter
def callback(self, filename, lines):
"""Processes a set of lines for a filename"""
return True
def reconnect(self):
"""Allows reconnection from when a handled
TransportException is thrown"""
return True
def interrupt(self):
"""Allows keyboard interrupts to be
handled properly by the transport
"""
return True
def unhandled(self):
"""Allows unhandled exceptions to be
handled properly by the transport
"""
return True
def format(self, filename, timestamp, line):
"""Returns a formatted log line"""
return self._formatter({
'@source': "file://{0}{1}".format(self._current_host, filename),
'@type': self._file_config.get('type', filename),
'@tags': self._file_config.get('tags', filename),
'@fields': self._file_config.get('fields', filename),
'@timestamp': timestamp,
'@source_host': self._current_host,
'@source_path': filename,
'@message': line.strip(os.linesep),
})
def addglob(self, globname, globbed):
self._file_config.addglob(globname, globbed)
def valid(self):
return self._is_valid
class TransportException(Exception):
pass
|
Python
| 0.000001
|
@@ -2443,16 +2443,67 @@
path'%7D:%0A
+ if field in data:%0A
|
f7f576adfccdfbc386c991bb35f2a52e9db19b5e
|
remove hack
|
tracker.py
|
tracker.py
|
from utils import *
from pprint import pprint
import sys
from State import state
from player import PlayerEventCallbacks
import lastfm
def track(event, args, kwargs):
print "track:", repr(event), repr(args), repr(kwargs)
if event is PlayerEventCallbacks.onSongChange:
oldSong = kwargs["oldSong"]
newSong = kwargs["newSong"]
if oldSong is newSong: print "** something strange. oldSong is newSong" # TODO: fix
elif oldSong: oldSong.close() # in case anyone is holding any ref to it, close at least the file
if "artist" not in newSong.metadata:
print "new song metadata is incomplete:", newSong.metadata
else:
print "new song:", newSong.fileext, ",", newSong.artist, "-", newSong.track, ",", formatTime(newSong.duration)
pprint(newSong.metadata)
lastfm.onSongChange(newSong)
if event is PlayerEventCallbacks.onSongFinished:
song = kwargs["song"]
lastfm.onSongFinished(song)
def trackerMain():
lastfm.login()
for ev,args,kwargs in state.updates.read():
try:
track(ev, args, kwargs)
except:
sys.excepthook(*sys.exc_info())
lastfm.quit()
|
Python
| 0
|
@@ -336,96 +336,8 @@
%09if
-oldSong is newSong: print %22** something strange. oldSong is newSong%22 # TODO: fix%0A%09%09elif
oldS
|
587abec7ff5b90c03885e164d9b6b62a1fb41f76
|
Fix the headers sent by the GitHub renderer.
|
grip/github_renderer.py
|
grip/github_renderer.py
|
from flask import abort, json
import requests
def render_content(text, gfm=False, context=None,
username=None, password=None):
"""Renders the specified markup using the GitHub API."""
if gfm:
url = 'https://api.github.com/markdown'
data = {'text': text, 'mode': 'gfm'}
if context:
data['context'] = context
data = json.dumps(data)
else:
url = 'https://api.github.com/markdown/raw'
data = text
headers = {'content-type': 'text/plain'}
auth = (username, password) if username else None
r = requests.post(url, headers=headers, data=data, auth=auth)
# Relay HTTP errors
if r.status_code != 200:
try:
message = r.json()['message']
except:
message = r.text
abort(r.status_code, message)
return r.text
|
Python
| 0
|
@@ -397,16 +397,71 @@
s(data)%0A
+ headers = %7B'content-type': 'application/json'%7D%0A
else
@@ -462,16 +462,16 @@
else:%0A
-
@@ -530,24 +530,28 @@
data = text%0A
+
headers
@@ -579,16 +579,22 @@
ext/
-plai
+x-markdow
n'%7D%0A
+%0A
@@ -643,17 +643,16 @@
se None%0A
-%0A
r =
|
67f64792dc7321cd9521e927b4eb1a58b67cdcdc
|
Allow passing of direct function reference to url triple
|
brink/server.py
|
brink/server.py
|
from aiohttp import web
from brink.config import config
from brink.db import conn
from brink.handlers import __handler_wrapper, __ws_handler_wrapper
from brink.utils import resolve_func
from brink.cli import print_globe, print_info
import importlib
import aiohttp_autoreload
import logging
def run_server(conf):
for cfg in vars(conf):
if cfg[:2] != "__":
config.set(cfg, getattr(conf, cfg))
# Setup database config for later use
conn.setup(config.get("DATABASE", {}))
# Resolve middleware
middleware = [resolve_func(func) for
func in config.get("MIDDLEWARE", [])]
server = web.Application(middlewares=middleware)
logger = logging.getLogger("brink")
# Iterate over all installed apps and add their routes
for app in config.get("INSTALLED_APPS", []):
__load_app(server, app)
# Enable source code auto reload on change only if DEBUG is enabled
if config.get("DEBUG"):
aiohttp_autoreload.add_reload_hook(
lambda: print_info("Detected code change. Reloading...",
spaced=True))
aiohttp_autoreload.start()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
server.make_handler(access_log=logger)
port = config.get("PORT", 8888)
print_globe("Server listening on port %s\n" % port)
web.run_app(server, port=port, print=lambda *args: None)
def __load_app(server, package):
urls = importlib.import_module("%s.urls" % package)
for url in urls.urls:
__add_route(server, url, package)
def __add_route(server, url, package):
(method, route, handler) = url
handler_wrapper = __ws_handler_wrapper if method == "WS" \
else __handler_wrapper
try:
handler_func = resolve_func(handler)
except ModuleNotFoundError:
handler_func = resolve_func("%s.%s" % (package, handler))
handler_func = handler_wrapper(handler_func)
if method == "GET" or method == "WS":
server.router.add_get(route, handler_func)
elif method == "POST":
server.router.add_post(route, handler_func)
elif method == "PUT":
server.router.add_put(route, handler_func)
elif method == "PATCH":
server.router.add_patch(route, handler_func)
elif method == "DELETE":
server.router.add_delete(route, handler_func)
|
Python
| 0
|
@@ -1820,16 +1820,49 @@
rapper%0A%0A
+ if type(handler) is str:%0A
try:
@@ -1862,32 +1862,36 @@
try:%0A
+
handler_func = r
@@ -1907,24 +1907,28 @@
nc(handler)%0A
+
except M
@@ -1947,32 +1947,36 @@
dError:%0A
+
+
handler_func = r
@@ -2016,16 +2016,57 @@
andler))
+%0A else:%0A handler_func = handler
%0A%0A ha
|
6156960333163e15fd2ddd96e831bbdf2e92163d
|
Correct reference to organization
|
src/sentry/api/bases/organization.py
|
src/sentry/api/bases/organization.py
|
from __future__ import absolute_import
from sentry.api.base import Endpoint
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.permissions import ScopedPermission
from sentry.models import AuthIdentity, Organization, OrganizationMember
class OrganizationPermission(ScopedPermission):
scope_map = {
'GET': ['org:read', 'org:write', 'org:delete'],
'POST': ['org:write', 'org:delete'],
'PUT': ['org:write', 'org:delete'],
'DELETE': ['org:delete'],
}
def has_object_permission(self, request, view, organization):
if request.auth:
if self.is_project_key(request):
return False
return request.auth.organization_id == organization.id
if request.user.is_superuser:
return True
try:
om = OrganizationMember.objects.get(
organization=organization,
user=request.user,
)
except OrganizationMember.DoesNotExist:
return False
try:
auth_identity = AuthIdentity.objects.get(
auth_provider__organization=self.organization_id,
)
except AuthIdentity.DoesNotExist:
pass
else:
# TODO(dcramer): we might simply want to change their scopes to
# something like 'org:read' since we'd still want them to know
# they're part of the org. Alternatively we introduce yet another
# scope that suggests extremely limited read.
if not auth_identity.is_valid(om):
return False
allowed_scopes = set(self.scope_map[request.method])
current_scopes = om.scopes
return any(s in allowed_scopes for s in current_scopes)
class OrganizationEndpoint(Endpoint):
permission_classes = (OrganizationPermission,)
def convert_args(self, request, organization_slug, *args, **kwargs):
try:
organization = Organization.objects.get_from_cache(
slug=organization_slug,
)
except Organization.DoesNotExist:
raise ResourceDoesNotExist
self.check_object_permissions(request, organization)
kwargs['organization'] = organization
return (args, kwargs)
|
Python
| 0.000002
|
@@ -1138,21 +1138,16 @@
ization=
-self.
organiza
@@ -1150,17 +1150,17 @@
nization
-_
+.
id,%0A
|
606213f51e0f887f0d353b072b099b9770cc41af
|
Implement open_repository() as an alias to find_repository()
|
format.py
|
format.py
|
# Foreign branch support for Subversion
#
# Copyright (C) 2006 Jelmer Vernooij <jelmer@samba.org>
from bzrlib.bzrdir import BzrDirFormat, BzrDir
from repository import SvnRepository
from branch import SvnBranch
import svn.client
from libsvn._core import SubversionException
from bzrlib.errors import NotBranchError
from bzrlib.lockable_files import TransportLock
class SvnRemoteAccess(BzrDir):
def __init__(self, _transport, _format):
self.root_transport = self.transport = _transport
self._format = _format
if _transport.url.startswith("svn://") or \
_transport.url.startswith("svn+ssh://"):
self.url = _transport.url
elif _transport.url.startswith("file://"):
self.working_dir = _transport.url
self.url= svn.client.url_from_path(self.working_dir.encode('utf8'),self.pool)
else:
self.url = _transport.url[4:] # Skip svn+
def clone(self, url, revision_id=None, basis=None, force_new_repo=False):
raise NotImplementedError(SvnRemoteAccess.clone)
def find_repository(self):
repos = SvnRepository(self, self.url)
repos._format = self._format
return repos
def open_workingtree(self):
return None
def create_workingtree(self):
return None #FIXME
def open_branch(self, unsupported=True):
try:
branch = SvnBranch(self.find_repository(),self.url)
except SubversionException, (msg, num):
if num == svn.core.SVN_ERR_RA_ILLEGAL_URL or \
num == svn.core.SVN_ERR_WC_NOT_DIRECTORY or \
num == svn.core.SVN_ERR_RA_NO_REPOS_UUID or \
num == svn.core.SVN_ERR_RA_SVN_REPOS_NOT_FOUND or \
num == svn.core.SVN_ERR_RA_DAV_REQUEST_FAILED:
raise NotBranchError(path=self.url)
except:
raise
branch.bzrdir = self
branch._format = self._format
return branch
class SvnFormat(BzrDirFormat):
_lock_class = TransportLock
def _open(self, transport):
return SvnRemoteAccess(transport, self)
def get_format_string(self):
return 'Subversion Smart Server'
def get_format_description(self):
return 'Subversion Smart Server'
|
Python
| 0.000166
|
@@ -1066,20 +1066,20 @@
def
-find
+open
_reposit
@@ -1194,16 +1194,122 @@
repos%0A%0A
+ # Subversion has all-in-one, so a repository is always present%0A find_repository = open_repository%0A%0A
def
|
dca5582062c10f22281f37960823c3d936cf75af
|
Use more specific error for wrong ticket type
|
mama_cas/mixins.py
|
mama_cas/mixins.py
|
import logging
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import logout
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext_lazy as _
from django.views.decorators.cache import never_cache
from mama_cas.models import ServiceTicket
from mama_cas.models import ProxyTicket
from mama_cas.models import ProxyGrantingTicket
from mama_cas.exceptions import InvalidTicket
from mama_cas.exceptions import ValidationError
from mama_cas.utils import get_callable
logger = logging.getLogger(__name__)
class NeverCacheMixin(object):
"""
View mixin for disabling caching.
"""
@method_decorator(never_cache)
def dispatch(self, request, *args, **kwargs):
return super(NeverCacheMixin, self).dispatch(request, *args, **kwargs)
class LoginRequiredMixin(object):
"""
View mixin to require a logged in user.
"""
@method_decorator(login_required(login_url=reverse_lazy('cas_login'),
redirect_field_name=None))
def dispatch(self, request, *args, **kwargs):
return super(LoginRequiredMixin, self).dispatch(request, *args, **kwargs)
class CasResponseMixin(object):
"""
View mixin for building CAS XML responses. Expects the view to
implement get_context_data() and define response_class.
"""
content_type = 'text/xml'
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
def render_to_response(self, context):
return self.response_class(context, content_type=self.content_type)
class ValidateTicketMixin(object):
"""
View mixin providing ticket validation methods.
"""
def validate_service_ticket(self, service, ticket, pgturl, renew):
"""
Validate a service ticket string. Return a triplet containing
a ``ServiceTicket`` and an optional ``ProxyGrantingTicket``,
or a ``ValidationError`` subclass if ticket validation failed.
"""
logger.debug("Service validation request received for %s" % ticket)
# Check for proxy tickets passed to /serviceValidate
if ticket and ticket.startswith(ProxyTicket.TICKET_PREFIX):
e = InvalidTicket('Proxy tickets cannot be validated'
' with /serviceValidate')
logger.warning("%s %s" % (e.code, e))
return None, None, e
try:
st = ServiceTicket.objects.validate_ticket(ticket, service,
renew=renew)
except ValidationError as e:
logger.warning("%s %s" % (e.code, e))
return None, None, e
else:
if pgturl:
logger.debug("Proxy-granting ticket request received for %s" %
pgturl)
pgt = ProxyGrantingTicket.objects.create_ticket(pgturl,
user=st.user,
granted_by_st=st)
else:
pgt = None
return st, pgt, None
def validate_proxy_ticket(self, service, ticket, pgturl):
"""
Validate a proxy ticket string. Return a 4-tuple containing a
``ProxyTicket``, an optional ``ProxyGrantingTicket`` and a list
of proxies through which authentication proceeded, or a
``ValidationError`` subclass if ticket validation failed.
"""
logger.debug("Proxy validation request received for %s" % ticket)
try:
pt = ProxyTicket.objects.validate_ticket(ticket, service)
except ValidationError as e:
logger.warning("%s %s" % (e.code, e))
return None, None, None, e
else:
# Build a list of all services that proxied authentication,
# in reverse order of which they were traversed
proxies = [pt.service]
prior_pt = pt.granted_by_pgt.granted_by_pt
while prior_pt:
proxies.append(prior_pt.service)
prior_pt = prior_pt.granted_by_pgt.granted_by_pt
if pgturl:
logger.debug("Proxy-granting ticket request received for %s" %
pgturl)
pgt = ProxyGrantingTicket.objects.create_ticket(pgturl,
user=pt.user,
granted_by_pt=pt)
else:
pgt = None
return pt, pgt, proxies, None
def validate_proxy_granting_ticket(self, pgt, target_service):
"""
Validate a proxy granting ticket string. Return an ordered
pair containing a ``ProxyTicket``, or a ``ValidationError``
subclass if ticket validation failed.
"""
logger.debug("Proxy ticket request received for %s using %s" %
(target_service, pgt))
try:
pgt = ProxyGrantingTicket.objects.validate_ticket(pgt,
target_service)
except ValidationError as e:
logger.warning("%s %s" % (e.code, e))
return None, e
else:
pt = ProxyTicket.objects.create_ticket(service=target_service,
user=pgt.user,
granted_by_pgt=pgt)
return pt, None
class CustomAttributesMixin(object):
"""
View mixin for including user attributes in a validation response.
"""
def get_attributes(self, user, service):
"""
Build a dictionary of user attributes from a set of custom
callbacks specified with ``MAMA_CAS_ATTRIBUTE_CALLBACKS``.
"""
attributes = {}
callbacks = getattr(settings, 'MAMA_CAS_ATTRIBUTE_CALLBACKS', ())
for path in callbacks:
callback = get_callable(path)
attributes.update(callback(user, service))
return attributes
class LogoutUserMixin(object):
"""
View mixin for logging a user out of a single sign-on session.
"""
def logout_user(self, request):
"""
End a single sign-on session for the current user. This process
occurs in three steps:
1. Consume all valid tickets created for the user.
2. (Optional) Send single sign-out requests to services.
3. Call logout() to end the session and purge all session data.
"""
logger.debug("Logout request received for %s" % request.user)
if request.user.is_authenticated():
ServiceTicket.objects.consume_tickets(request.user)
ProxyTicket.objects.consume_tickets(request.user)
ProxyGrantingTicket.objects.consume_tickets(request.user)
if getattr(settings, 'MAMA_CAS_ENABLE_SINGLE_SIGN_OUT', False):
ServiceTicket.objects.request_sign_out(request.user)
logger.info("Single sign-on session ended for %s" % request.user)
logout(request)
msg = _("You have been successfully logged out")
messages.success(request, msg)
|
Python
| 0
|
@@ -560,24 +560,28 @@
nvalidTicket
+Spec
%0Afrom mama_c
@@ -2428,16 +2428,20 @@
idTicket
+Spec
('Proxy
@@ -2469,16 +2469,20 @@
idated'%0A
+
|
798e51e880374b43c405ce7e4314b3d1a3311c5c
|
Make exceptions for bad behavior (#220)
|
wikilabels/database/db.py
|
wikilabels/database/db.py
|
import logging
from contextlib import contextmanager
from psycopg2.extras import RealDictCursor
from psycopg2.pool import ThreadedConnectionPool
from .campaigns import Campaigns
from .labels import Labels
from .tasks import Tasks
from .worksets import Worksets
logger = logging.getLogger(__name__)
class DB:
def __init__(self, *args, **kwargs):
self.pool_params = (args, kwargs)
self.pool = None
self.campaigns = Campaigns(self)
self.worksets = Worksets(self)
self.tasks = Tasks(self)
self.labels = Labels(self)
self.logger = logging.getLogger(__name__)
def _initialize_pool(self):
if self.pool is None:
logger.info("Initializing connection pool.")
args, kwargs = self.pool_params
self.pool = ThreadedConnectionPool(
*args, cursor_factory=RealDictCursor, **kwargs)
def execute(self, sql):
with self.transaction() as transactor:
cursor = transactor.cursor()
cursor.execute(sql)
return cursor
@contextmanager
def transaction(self):
"""Provides a transactional scope around a series of operations."""
self._initialize_pool()
conn = self.pool.getconn()
try:
yield conn
conn.commit()
except:
conn.rollback()
raise
finally:
self.pool.putconn(conn)
@classmethod
def from_config(cls, config):
# Copy config as kwargs
params = {k: v for k, v in config['database'].items()}
params['minconn'] = params.get('minconn', 1)
params['maxconn'] = params.get('maxconn', 5)
return cls(**params)
|
Python
| 0.000003
|
@@ -1332,16 +1332,102 @@
except:
+ # noqa: E722%0A # We're fine with the bare except cos we raise in any case.
%0A
|
1d863035c1d422811ea92b7532bf3991d3c88ae7
|
Use session+cache only when not debugging
|
dash2012/settings.py
|
dash2012/settings.py
|
# Django settings for dash2012 project.
import os
import sys
DEBUG = os.environ.get('DEBUG', False)
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
ROOT = os.path.abspath(os.path.dirname(__file__))
EXTRA_LIBS_DIR = os.path.join(ROOT, 'lib')
sys.path.append(EXTRA_LIBS_DIR)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': os.environ.get('SQLITE_PATH', os.path.join(ROOT, 'cloudfish.sqlite')), # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
LOGIN_URL = '/auth/login'
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
if DEBUG:
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'cloudfish'
}
}
else:
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
ANALYTICS = os.environ.get('ANALYTICS', '')
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = '/tmp/static/'
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(ROOT, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = os.environ.get('SECRET_KEY', 'sample_key')
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'dash2012.urls'
TEMPLATE_DIRS = (
os.path.join(ROOT, 'templates'),
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'cloudfish',
'auth',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
Python
| 0
|
@@ -1012,16 +1012,35 @@
login'%0D%0A
+if not DEBUG:%0D%0A
SESSION_
@@ -1094,190 +1094,8 @@
e'%0D%0A
-if DEBUG:%0D%0A CACHES = %7B%0D%0A 'default': %7B%0D%0A 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',%0D%0A 'LOCATION': 'cloudfish'%0D%0A %7D%0D%0A%7D%0D%0A%0D%0Aelse:%0D%0A
@@ -1255,27 +1255,51 @@
',%0D%0A
-%7D%0D%0A
+ %7D%0D%0A
%7D%0D%0A%0D%0AANALYTI
|
a12910798763d418f2f14d159d4b79e97dad789d
|
Remove TemporaryDirectory class
|
IPython/utils/tempdir.py
|
IPython/utils/tempdir.py
|
"""TemporaryDirectory class, copied from Python 3.2.
This is copied from the stdlib and will be standard in Python 3.2 and onwards.
"""
import os as _os
import warnings as _warnings
import sys as _sys
# This code should only be used in Python versions < 3.2, since after that we
# can rely on the stdlib itself.
try:
from tempfile import TemporaryDirectory
except ImportError:
from tempfile import mkdtemp, template
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everthing contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self.name = mkdtemp(suffix, prefix, dir)
self._closed = False
def __enter__(self):
return self.name
def cleanup(self, _warn=False):
if self.name and not self._closed:
try:
self._rmtree(self.name)
except (TypeError, AttributeError) as ex:
# Issue #10188: Emit a warning on stderr
# if the directory could not be cleaned
# up due to missing globals
if "None" not in str(ex):
raise
print("ERROR: {!r} while cleaning up {!r}".format(ex, self,),
file=_sys.stderr)
return
self._closed = True
if _warn:
self._warn("Implicitly cleaning up {!r}".format(self),
Warning)
def __exit__(self, exc, value, tb):
self.cleanup()
def __del__(self):
# Issue a ResourceWarning if implicit cleanup needed
self.cleanup(_warn=True)
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_os_error = _os.error
_warn = _warnings.warn
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname)
except self._os_error:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except self._os_error:
pass
try:
self._rmdir(path)
except self._os_error:
pass
class NamedFileInTemporaryDirectory(object):
def __init__(self, filename, mode='w+b', bufsize=-1, **kwds):
"""
Open a file named `filename` in a temporary directory.
This context manager is preferred over `NamedTemporaryFile` in
stdlib `tempfile` when one needs to reopen the file.
Arguments `mode` and `bufsize` are passed to `open`.
Rest of the arguments are passed to `TemporaryDirectory`.
"""
self._tmpdir = TemporaryDirectory(**kwds)
path = _os.path.join(self._tmpdir.name, filename)
self.file = open(path, mode, bufsize)
def cleanup(self):
self.file.close()
self._tmpdir.cleanup()
__del__ = cleanup
def __enter__(self):
return self.file
def __exit__(self, type, value, traceback):
self.cleanup()
class TemporaryWorkingDirectory(TemporaryDirectory):
"""
Creates a temporary directory and sets the cwd to that directory.
Automatically reverts to previous cwd upon cleanup.
Usage example:
with TemporaryWorkingDirectory() as tmpdir:
...
"""
def __enter__(self):
self.old_wd = _os.getcwd()
_os.chdir(self.name)
return super(TemporaryWorkingDirectory, self).__enter__()
def __exit__(self, exc, value, tb):
_os.chdir(self.old_wd)
return super(TemporaryWorkingDirectory, self).__exit__(exc, value, tb)
|
Python
| 0
|
@@ -1,3317 +1,109 @@
-%22%22%22TemporaryDirectory class, copied from Python 3.2.%0A%0AThis is copied from the stdlib and will be standard in Python 3.2 and onwards.%0A%22%22%22%0A%0Aimport os as _os%0Aimport warnings as _warnings%0Aimport sys as _sys%0A%0A# This code should only be used in Python versions %3C 3.2, since after that we%0A# can rely on the stdlib itself.%0Atry:%0A from tempfile import TemporaryDirectory%0A%0Aexcept ImportError:%0A from tempfile import mkdtemp, template%0A%0A class TemporaryDirectory(object):%0A %22%22%22Create and return a temporary directory. This has the same%0A behavior as mkdtemp but can be used as a context manager. For%0A example:%0A%0A with TemporaryDirectory() as tmpdir:%0A ...%0A%0A Upon exiting the context, the directory and everthing contained%0A in it are removed.%0A %22%22%22%0A%0A def __init__(self, suffix=%22%22, prefix=template, dir=None):%0A self.name = mkdtemp(suffix, prefix, dir)%0A self._closed = False%0A%0A def __enter__(self):%0A return self.name%0A%0A def cleanup(self, _warn=False):%0A if self.name and not self._closed:%0A try:%0A self._rmtree(self.name)%0A except (TypeError, AttributeError) as ex:%0A # Issue #10188: Emit a warning on stderr%0A # if the directory could not be cleaned%0A # up due to missing globals%0A if %22None%22 not in str(ex):%0A raise%0A print(%22ERROR: %7B!r%7D while cleaning up %7B!r%7D%22.format(ex, self,),%0A file=_sys.stderr)%0A return%0A self._closed = True%0A if _warn:%0A self._warn(%22Implicitly cleaning up %7B!r%7D%22.format(self),%0A Warning)%0A%0A def __exit__(self, exc, value, tb):%0A self.cleanup()%0A%0A def __del__(self):%0A # Issue a ResourceWarning if implicit cleanup needed%0A self.cleanup(_warn=True)%0A%0A%0A # XXX (ncoghlan): The following code attempts to make%0A # this class tolerant of the module nulling out process%0A # that happens during CPython interpreter shutdown%0A # Alas, it doesn't actually manage it. See issue #10188%0A _listdir = staticmethod(_os.listdir)%0A _path_join = staticmethod(_os.path.join)%0A _isdir = staticmethod(_os.path.isdir)%0A _remove = staticmethod(_os.remove)%0A _rmdir = staticmethod(_os.rmdir)%0A _os_error = _os.error%0A _warn = _warnings.warn%0A%0A def _rmtree(self, path):%0A # Essentially a stripped down version of shutil.rmtree. We can't%0A # use globals because they may be None'ed out at shutdown.%0A for name in self._listdir(path):%0A fullname = self._path_join(path, name)%0A try:%0A isdir = self._isdir(fullname)%0A except self._os_error:%0A isdir = False%0A if isdir:%0A self._rmtree(fullname)%0A else:%0A try:%0A self._remove(fullname)%0A except self._os_error:%0A pass%0A try:%0A self._rmdir(path)%0A except self._os_error:%0A pass%0A
+import os as _os%0Aimport warnings as _warnings%0Aimport sys as _sys%0A%0Afrom tempfile import TemporaryDirectory
%0A%0Acl
|
7d3778ed0d596653fa15a8f28ac1063154ea496e
|
Comment about 1MB for bucket size
|
fqueue.py
|
fqueue.py
|
from __future__ import absolute_import, unicode_literals, print_function
import os
import base64
import hashlib
import fcntl
import errno
import signal
from contextlib import contextmanager
from posix_ipc import Semaphore, O_CREAT, BusyError
import Queue
import marshal
import logging
logger = logging.getLogger(__name__)
class TimeoutError(Exception):
pass
@contextmanager
def _timeout(seconds):
if seconds and seconds > 0:
def timeout_handler(signum, frame):
pass
original_handler = signal.signal(signal.SIGALRM, timeout_handler)
try:
signal.alarm(seconds)
yield
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, original_handler)
else:
yield
def _acquire(fd, timeout):
with _timeout(timeout):
try:
fcntl.flock(fd, fcntl.LOCK_EX)
except IOError as e:
if e.errno != errno.EINTR:
raise e
raise TimeoutError
def _release(fd):
fcntl.flock(fd, fcntl.LOCK_UN)
@contextmanager
def lock(fd, timeout=None):
_acquire(fd, timeout)
try:
yield fd
finally:
_release(fd)
class FileQueue(object):
STOPPED = False
bucket_size = 20 # 1MB
def __init__(self, name=None, log=None):
self.name = name
self.logger = log or logger
semname = b'/' + base64.urlsafe_b64encode(hashlib.md5(self.name.encode('ascii')).digest())
self.sem = Semaphore(semname, O_CREAT, initial_value=1)
fnamepos = "%s.pos" % self.name
if not os.path.exists(fnamepos):
self.sem.unlink()
self.sem.close()
self.sem = Semaphore(semname, O_CREAT, initial_value=1)
open(fnamepos, 'wb').close() # touch file
self.fpos = open(fnamepos, 'r+b')
self.fread = None
self.frnum = None
self.fwrite = None
self.fwnum = None
with lock(self.fpos, 3) as fpos:
fpos.seek(0)
try:
frnum, _ = marshal.load(fpos)
except (EOFError, ValueError, TypeError):
frnum = 0 # New, perhaps empty or corrupt pos file
self._open_write(frnum)
def _cleanup(self, fnum):
"""
Deletes all files for the queue up to, and including, fnum.
"""
while os.path.exists('%s.%s' % (self.name, fnum)):
try:
fname = '%s.%s' % (self.name, fnum)
os.unlink(fname)
# print('cleaned up file:', fname, file=sys.stderr)
except:
pass
fnum -= 1
def _open_read(self, frnum):
if self.frnum == frnum:
return
if self.fread:
self.fread.close()
fname = '%s.%s' % (self.name, frnum)
if not os.path.exists(fname):
open(fname, 'wb').close() # touch file
self.fread = open(fname, 'rb')
self.frnum = frnum
# print('new read bucket:', self.frnum, file=sys.stderr)
def _open_write(self, fwnum):
_fwnum = fwnum
while os.path.exists('%s.%s' % (self.name, _fwnum)):
fwnum = _fwnum
_fwnum += 1
if self.fwnum == fwnum:
return
if self.fwrite:
self.fwrite.close()
self.fwrite = open('%s.%s' % (self.name, fwnum), 'ab')
self.fwnum = fwnum
# print('new write bucket:', self.fwnum, file=sys.stderr)
def __del__(self):
self.fpos.close()
self.sem.close()
if self.fwrite:
self.fwrite.close()
if self.fread:
self.fread.close()
def get(self, block=True, timeout=None):
while True:
try:
# Try acquiring the semaphore (in case there's something to read)
self.sem.acquire(block and timeout or None)
except BusyError:
raise Queue.Empty
try:
with lock(self.fpos, 3) as fpos:
fpos.seek(0)
try:
frnum, offset = marshal.load(fpos)
except (EOFError, ValueError, TypeError):
frnum = offset = 0 # New, perhaps empty or corrupt pos file
# print('@', (frnum, offset), file=sys.stderr)
self._open_read(frnum)
self.fread.seek(offset)
try:
value = marshal.load(self.fread)
offset = self.fread.tell()
if offset > self.bucket_size:
self._cleanup(frnum - 1)
self._open_read(frnum + 1)
offset = 0
# peek = self.fread.read(1)
# if len(peek):
# # If there's something further in the file, release
# # the semaphore. FIXME: There are two releases, which is wrong!
# self.sem.release()
return value
except (EOFError, ValueError, TypeError):
pass # The file could not be read, ignore
finally:
fpos.seek(0)
marshal.dump((self.frnum, offset), fpos)
fpos.flush()
except TimeoutError:
raise Queue.Empty
def put(self, value, block=True, timeout=None):
try:
with lock(self.fwrite, 3) as fwrite:
marshal.dump(value, fwrite)
fwrite.flush()
offset = fwrite.tell()
if offset > self.bucket_size:
self._open_write(self.fwnum + 1)
self.sem.release()
except TimeoutError:
raise Queue.Full
# def main(argv):
# queue = FileQueue('/tmp/testing')
# queue.put('TEST')
# queue.get()
# if __name__ == '__main__':
# main(sys.argv)
|
Python
| 0
|
@@ -1258,16 +1258,30 @@
= 20 #
+ 1024 * 1024 =
1MB%0A%0A
|
3145e6e26cda7e4659cac0f44406d49213648049
|
add timeout to Redis lock
|
echo_listener.py
|
echo_listener.py
|
from multiprocessing import Pool
import echo_listener_settings as settings
from boto import sqs
from boto.sqs.message import RawMessage, Message
from boto.s3.connection import S3Connection
from boto.s3.key import Key
import json
import os.path
import sys
import redis
import time
import random
import string
import datetime
class AgnosticMessage(RawMessage):
"""
A message might originate from SNS or SQS. If from SNS then it will have a wrapper on it.
"""
def get_effective_message(self):
b = json.loads(str(self.get_body()))
if 'Type' in b and b['Type'] == "Notification":
return json.loads(b['Message'])
return b
def main():
if len(sys.argv) < 7:
showUsage()
return
redisHost = sys.argv[1]
redisPort = int(sys.argv[2])
redisDB = int(sys.argv[3])
region = sys.argv[4]
inputQueueName = sys.argv[5]
errorQueueName = sys.argv[6]
input_queue = get_queue(region, inputQueueName)
input_queue.set_message_class(AgnosticMessage)
num_pool_workers = settings.NUM_POOL_WORKERS
messages_per_fetch = settings.MESSAGES_PER_FETCH
pool = Pool(num_pool_workers, initializer=workerSetup, initargs=(redisHost, redisPort, redisDB, region, errorQueueName))
while True:
messages = input_queue.get_messages(num_messages=messages_per_fetch, visibility_timeout=120, wait_time_seconds=20)
if len(messages) > 0:
pool.map(process_message, messages)
def workerSetup(redisHost, redisPort, redisDB, region, errorQueueName):
global s3Connection
s3Connection = S3Connection()
global redisClient
redisClient = redis.Redis(host=redisHost, port=redisPort, db=redisDB)
global errorQueue
errorQueue = get_queue(region, errorQueueName)
def showUsage():
print "Usage: echo_listener.py <Redis IP> <Redis Port> <Redis DB> <AWS region> <AWS input queue name> <AWS error queue name>"
print "Example: echo_listener.py 172.17.0.2 6379 0 eu-west-1 echo-eu-west-1a echo-eu-west-1a-errors"
def process_message(message):
# console_log("process_message called")
message_body = message.get_effective_message()
# console_log("message type=" + message_body['_type'])
try:
if '_type' in message_body and 'message' in message_body and 'params' in message_body:
if message_body['message'] == "echo::cache-item":
cache_item(message_body['params'])
elif message_body['message'] == "echo::item-access":
item_access(message_body['params'])
except Exception, e:
handle_error(e, message)
pass
message.delete()
def handle_error(e, message):
console_log("exception: %s" % str(e))
m = RawMessage()
message_body = message.get_effective_message()
message_body['exception'] = str(e)
m.set_body(str(json.dumps(message_body)))
errorQueue.write(m)
def item_access(payload):
# console_log("item_access: " + payload['target'])
record_access(payload['target'])
def cache_item(payload):
# "source": "s3://my-bucket/key"
# "target": "/my-path/key.maybe-extension-too
# "bucket": "my-bucket"
# "key": "key"
console_log("cache_item: s3://" + payload['bucket'] + '/' + payload['key'] + ' -> ' + payload['target'])
target = settings.CACHE_ROOT + payload['target'].decode('utf-8')
targetPath = '/'.join(target.split('/')[0:-1])
try:
if not os.path.isdir(targetPath):
os.makedirs(targetPath)
except:
pass
if os.path.exists(target):
console_log("already exists in cache")
else:
#console_log("synchronisation lock")
timeout_start = time.time()
timeout = settings.LOCK_TIMEOUT
timeout_occurred = True
# if the flag exists, then loop until timeout for the flag to disappear
if redisClient.exists(payload['target']):
while time.time() < timeout_start + timeout:
if redisClient.exists(payload['target']):
# currently an operation happening for this file
time.sleep(0.01)
else:
timeout_occurred = False
break
if timeout_occurred:
raise Exception("lock timeout")
if not os.path.exists(target):
redisClient.set(payload['target'], payload['target'])
bucket = s3Connection.get_bucket(payload['bucket'])
k = Key(bucket)
k.key = payload['key']
k.get_contents_to_filename(target + ".moving")
console_log("downloaded " + payload['key'] + " -> " + target + ".moving")
os.rename(target + ".moving", target)
console_log("renamed to " + target)
record_access(payload['target'])
redisClient.delete(payload['target'])
def record_access(item):
#print "record_access for " + item
accessTime = int(time.time())
redisClient.zadd('access', item, accessTime)
def get_queue(region, queue):
conn = sqs.connect_to_region(region)
return conn.get_queue(queue)
def console_log(message):
print('{:%Y%m%d %H:%M:%S} '.format(datetime.datetime.now()) + message)
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -3942,16 +3942,18 @@
ient.set
+ex
(payload
@@ -3977,24 +3977,51 @@
ad%5B'target'%5D
+, settings.LOCK_TIMEOUT * 2
)%0A%09%0A%09%09%09bucke
|
5952f8cc4e46e9a1e76666b2c23c5db091500cac
|
Factor out the check for whether a date should be represented by Julian or Gregorian.
|
calexicon/calendars/historical.py
|
calexicon/calendars/historical.py
|
from datetime import date
from base import Calendar
from calexicon.dates import InvalidDate, DateWithCalendar
from main import JulianCalendar, ProlepticGregorianCalendar
class SwitchDateWithCalendar(DateWithCalendar):
def __str__(self):
return "%s (%s - %s)" % (
self.calendar.date_display_string(self._date),
self.calendar.display_name,
self.calendar.period_string(self._date)
)
class JulianToGregorianCalendar(Calendar):
def date(self, year, month, day):
gregorian_date = date(year, month, day)
if gregorian_date < self.first_gregorian_day:
julian_date = JulianCalendar().date(year, month, day)
if not julian_date < self.first_gregorian_day:
raise InvalidDate("This is a 'missing day' when the calendars changed.")
return self.from_date(julian_date._date)
return self.from_date(gregorian_date)
@classmethod
def date_display_string(cls, d):
if d >= cls.first_gregorian_day:
return ProlepticGregorianCalendar.date_display_string(d)
return JulianCalendar.date_display_string(d)
@classmethod
def representation(cls, d):
if d >= cls.first_gregorian_day:
return ProlepticGregorianCalendar.representation(d)
return JulianCalendar.representation(d)
@classmethod
def period_string(cls, d):
if d >= cls.first_gregorian_day:
return 'Gregorian'
else:
return 'Julian'
def from_date(self, d):
return SwitchDateWithCalendar(self.__class__, d)
class EnglishHistoricalCalendar(JulianToGregorianCalendar):
display_name = "English Historical Calendar"
first_gregorian_day = date(1752, 9, 14)
class SpanishHistoricalCalendar(JulianToGregorianCalendar):
display_name = "Spanish Historical Calendar"
first_gregorian_day = date(1582, 10, 15)
class FrenchHistoricalCalendar(JulianToGregorianCalendar):
display_name = "French Historical Calendar"
first_gregorian_day = date(1582, 12, 20)
|
Python
| 0.999989
|
@@ -929,16 +929,113 @@
_date)%0A%0A
+ @classmethod%0A def is_gregorian_date(cls, d):%0A return d %3E= cls.first_gregorian_day%0A%0A
@cla
@@ -1087,38 +1087,30 @@
%0A if
-d %3E= cls.first
+cls.is
_gregorian_d
@@ -1102,33 +1102,37 @@
.is_gregorian_da
-y
+te(d)
:%0A re
@@ -1296,38 +1296,30 @@
%0A if
-d %3E= cls.first
+cls.is
_gregorian_d
@@ -1311,33 +1311,37 @@
.is_gregorian_da
-y
+te(d)
:%0A re
|
3f0932f8fc1277fc5354476470c2931d48f62977
|
bump version
|
callisto_core/utils/version.py
|
callisto_core/utils/version.py
|
__version__ = '0.10.10'
|
Python
| 0
|
@@ -14,11 +14,11 @@
'0.10.1
-0
+1
'%0A
|
26c518ddfeb73e2a8bc64039f481a680e80ced6d
|
fix path for config and template
|
tsstats.py
|
tsstats.py
|
import re
import sys
import configparser
from time import mktime
from datetime import datetime, timedelta
from jinja2 import Environment, FileSystemLoader
# parse config
config = configparser.ConfigParser()
config.read('config.ini')
if 'General' not in config or not ('logfile' in config['General'] and 'outputfile' in config['General']):
print('Invalid configuration!')
import sys
sys.exit()
log_path = config['General']['logfile']
output_path = config['General']['outputfile']
generation_start = datetime.now()
clients = {} # clid: {'nick': ..., 'onlinetime': ..., 'kicks': ..., 'pkicks': ..., 'bans': ..., 'last_connect': ..., 'connected': ...}
cldata = re.compile(r"'(.*)'\(id:(\d*)\)")
cldata_ban = re.compile(r"by\ client\ '(.*)'\(id:(\d*)\)")
cldata_invoker = re.compile(r"invokerid=(\d*)\ invokername=(.*)\ invokeruid")
def add_connect(clid, nick, logdatetime):
check_client(clid, nick)
clients[clid]['last_connect'] = mktime(logdatetime.timetuple())
clients[clid]['connected'] = True
def add_disconnect(clid, nick, logdatetime, set_connected=True):
check_client(clid, nick)
connect = datetime.fromtimestamp(clients[clid]['last_connect'])
delta = logdatetime - connect
minutes = delta.seconds // 60
increase_onlinetime(clid, minutes)
if set_connected:
clients[clid]['connected'] = False
def add_ban(clid, nick):
check_client(clid, nick)
if 'bans' in clients[clid]:
clients[clid]['bans'] += 1
else:
clients[clid]['bans'] = 1
def add_kick(clid, nick):
check_client(clid, nick)
if 'kicks' in clients[clid]:
clients[clid]['kicks'] += 1
else:
clients[clid]['kicks'] = 1
def add_pkick(clid, nick):
check_client(clid, nick)
if 'pkicks' in clients[clid]:
clients[clid]['pkicks'] += 1
else:
clients[clid]['pkicks'] = 1
def increase_onlinetime(clid, onlinetime):
if 'onlinetime' in clients[clid]:
clients[clid]['onlinetime'] += onlinetime
else:
clients[clid]['onlinetime'] = onlinetime
def check_client(clid, nick):
if clid not in clients:
clients[clid] = {}
clients[clid]['nick'] = nick
with open(log_path, 'r') as f:
today = datetime.utcnow()
for line in f:
parts = line.split('|')
logdatetime = datetime.strptime(parts[0], '%Y-%m-%d %H:%M:%S.%f')
sid = int(parts[3].strip())
data = '|'.join(parts[4:]).strip()
if data.startswith('client'):
r = cldata.findall(data)[0]
nick = r[0]
id = r[1]
if data.startswith('client connected'):
add_connect(id, nick, logdatetime)
elif data.startswith('client disconnected'):
add_disconnect(id, nick, logdatetime)
if 'invokerid' in data:
add_pkick(id, nick)
r = cldata_invoker.findall(data)[0]
nick = r[1]
id = r[0]
add_kick(id, nick)
elif data.startswith('ban added') and 'cluid' in data:
r = cldata_ban.findall(data)[0]
nick = r[0]
id = r[1]
add_ban(id, nick)
for clid in clients:
if 'connected' not in clients[clid]:
clients[clid]['connected'] = False
if clients[clid]['connected']:
add_disconnect(clid, clients[clid]['nick'], today, set_connected=False)
generation_end = datetime.now()
generation_delta = generation_end - generation_start
# helper functions
def desc(key):
r = []
values = {}
for clid in clients:
if key in clients[clid]:
values[clid] = clients[clid][key]
for clid in sorted(values, key=values.get, reverse=True):
value = values[clid]
r.append((clid, clients[clid]['nick'], value))
return r
def render_template():
arg = sys.argv[0]
arg_find = arg.rfind('/')
if arg_find == -1:
path = '.'
else:
path = arg[:arg_find] + '/'
env = Environment(loader=FileSystemLoader(path))
template = env.get_template('template.html')
# format onlinetime
onlinetime_desc = desc('onlinetime')
for idx, (clid, nick, onlinetime) in enumerate(onlinetime_desc):
if onlinetime > 60:
onlinetime_str = str(onlinetime // 60) + 'h'
m = onlinetime % 60
if m > 0:
onlinetime_str += ' ' + str(m) + 'm'
else:
onlinetime_str = str(onlinetime) + 'm'
onlinetime_desc[idx] = (clid, nick, onlinetime_str, clients[clid]['connected'])
with open(output_path, 'w+') as f:
f.write(template.render(onlinetime=onlinetime_desc, kicks=desc('kicks'), pkicks=desc('pkicks'), bans=desc('bans'), seconds='{}.{}'.format(generation_delta.seconds, generation_delta.microseconds), date=generation_end.strftime('%d.%m.%Y um %H:%M')))
if len(clients) < 1:
print('Not enough data!')
else:
render_template()
|
Python
| 0
|
@@ -156,16 +156,161 @@
ader%0D%0A%0D%0A
+# get path%0D%0Aarg = sys.argv%5B0%5D%0D%0Aarg_find = arg.rfind('/')%0D%0Aif arg_find == -1:%0D%0A path = '.'%0D%0Aelse:%0D%0A path = arg%5B:arg_find%5D%0D%0Apath += '/'%0D%0A%0D%0A%0D%0A
# parse
@@ -367,16 +367,23 @@
ig.read(
+path +
'config.
@@ -4147,156 +4147,8 @@
):%0D%0A
- arg = sys.argv%5B0%5D%0D%0A arg_find = arg.rfind('/')%0D%0A if arg_find == -1:%0D%0A path = '.'%0D%0A else:%0D%0A path = arg%5B:arg_find%5D + '/'%0D%0A%0D%0A
|
7220621fcdba6de2e0fabb69e2d51dd382e739ba
|
Fix Windows freeze error
|
freeze.py
|
freeze.py
|
#!/usr/bin/env python3
import os
import re
from cx_Freeze import setup, Executable
with open(os.path.join("sacad", "__init__.py"), "rt") as f:
version = re.search("__version__ = \"([^\"]+)\"", f.read()).group(1)
build_exe_options = {"includes": ["lxml._elementpath"],
"packages": ["asyncio"],
"optimize": 0}
setup(name="sacad",
version=version,
author="desbma",
packages=["sacad"],
options={"build_exe": build_exe_options},
executables=[Executable(os.path.join("sacad", "__main__.py"),
targetName="sacad.exe"),
Executable(os.path.join("sacad", "recurse.py"),
targetName="sacad_r.exe")])
|
Python
| 0.000001
|
@@ -311,16 +311,24 @@
asyncio%22
+, %22idna%22
%5D,%0A
|
364ef672f8fc73c2a42521e7fe3096b826fb08b6
|
Fix gitdir -> git
|
project_management/pmtools/controller/project.py
|
project_management/pmtools/controller/project.py
|
"""
Pm Project module
Provide functionality for project management.
Commands:
ls list contents
init initialize a project folder
add add boilerplate code
compress compress files
clean remove files
du calculate disk usage
Synopsis:
The following command creates a directory in the project root
named j_doe_00_00. The '-g' flag adds a git directory to the
project repos, and initializes the project subdirectory j_doe_00_00_git
for use with git.
pm project init j_doe_00_00 -g
FIXME: Boilerplate code can be added to the project by running
pm project add j_doe_00_00
The boilerplate code includes makefiles, sbatch templates, and documentation
templates.
"""
import os
import sys
import re
from cement.core import controller
from pmtools import AbstractBaseController
## Main project controller
class ProjectController(AbstractBaseController):
"""
Functionality for project management.
"""
class Meta:
label = 'project'
description = 'Manage projects'
arguments = [
(['projectid'], dict(help="Scilife project id (e.g. j_doe_00_00)", default="", action="store", nargs="?")),
(['--pbzip2'], dict(help="Use pbzip2 as compressing device", default=False, action="store_true")),
(['--pigz'], dict(help="Use pigz as compressing device", default=False, action="store_true")),
(['-s', '--sbatch'], dict(help="Submit jobs to slurm", default=False, action="store_true")),
(['-f', '--fastq'], dict(help="Workon fastq files", default=False, action="store_true")),
(['-p', '--pileup'], dict(help="Workon pileup files", default=False, action="store_true")),
(['-A', '--uppmax-project'], dict(help="uppmax project id for use with sbatch", action="store")),
(['-t', '--sbatch-time'], dict(help="sbatch time limit", default="00:10:00", action="store")),
(['-N', '--node'], dict(help="run node job", default=False, action="store_true")),
(['-g', '--git'], dict(help="Initialize git directory in repos and project gitdir", default=False, action="store_true")),
]
@controller.expose(hide=True)
def default(self):
print __doc__
@controller.expose(help="List project folder")
def ls(self):
assert os.path.exists(os.path.join(self.config.get("project", "root"), self.pargs.projectid)), "no project directory %s" % self.pargs.projectid
if self.pargs.projectid=="":
out = self.sh(["ls", self.config.get("project", "root")])
else:
self._not_implemented("list projectid contents: only use intermediate and data directories by default" )
if out:
print "\n".join(self._filtered_ls(out.splitlines()))
@controller.expose(help="Initalize project folder")
def init(self):
if self.pargs.projectid=="":
return
self.log.info("Initalizing project %s" % self.pargs.projectid)
## Create directory structure
dirs = ["%s_git" % self.pargs.projectid, "data", "intermediate"]
gitdirs = ["config", "sbatch", "doc", "lib"]
[self.safe_makedir(os.path.join(self.config.get("project", "root"), self.pargs.projectid, x)) for x in dirs]
[self.safe_makedir(os.path.join(self.config.get("project", "root"), self.pargs.projectid, dirs[0], x)) for x in gitdirs]
## Initialize git if repos defined and flag set
if self.config.get("project", "repos") and self.pargs.gitdir:
dirs = {
'repos':os.path.join(self.config.get("project", "repos"), "current", self.pargs.projectid),
'gitdir':os.path.join(self.config.get("project", "root"), self.pargs.projectid, "%s_git" % self.pargs.projectid)
}
self.safe_makedir(dirs['repos'])
self.sh(["cd", dirs['repos'], "&& git init --bare"])
self.sh(["cd", dirs['gitdir'], "&& git init && git remote add origin", dirs['repos']])
@controller.expose(help="Add boilerplate code")
def add(self):
self._not_implemented()
@controller.expose(help="Remove files")
def clean(self):
self._not_implemented()
@controller.expose(help="Compress files")
def compress(self):
assert os.path.exists(os.path.join(self.config.get("project", "root"), self.pargs.projectid)), "no project directory %s" % self.pargs.projectid
if self.pargs.projectid=="":
self.log.warn("Not running compress function on project root directory")
sys.exit()
## Set pattern for compress operations
plist = []
if self.pargs.fastq:
plist += [".fastq$", ".fastq.txt$", ".fq$"]
if self.pargs.pileup:
plist += [".pileup$"]
pattern = "|".join(plist)
def compress_filter(f):
return re.search(pattern, f) != None
flist = []
for root, dirs, files in os.walk(os.path.join(self.config.get("project", "root"), self.pargs.projectid)):
flist = flist + [os.path.join(root, x) for x in filter(compress_filter, files)]
##self.log.info("Going to compress %s files. Are you sure you want to continue?" % len(flist))
if not self.query_yes_no("Going to compress %s files. Are you sure you want to continue?" % len(flist)):
sys.exit()
for f in flist:
self.drmaa(["gzip", "-v", "%s" % f], "compress")
@controller.expose(help="Calculate disk usage in intermediate and data directories")
def du(self):
self._not_implemented()
|
Python
| 0.000036
|
@@ -3564,19 +3564,16 @@
args.git
-dir
:%0A
|
ec51bcd1803a2f576f6a325b9b950d86c5d0b2a9
|
Cut 0.9.1
|
invocations/_version.py
|
invocations/_version.py
|
__version_info__ = (0, 9, 0)
__version__ = '.'.join(map(str, __version_info__))
|
Python
| 0.000001
|
@@ -19,17 +19,17 @@
(0, 9,
-0
+1
)%0A__vers
|
38b281793a52a22b4325814f9389d7d07ed95cbc
|
replace couchforms/by_user with reports_forms view
|
custom/_legacy/pact/reports/chw_list.py
|
custom/_legacy/pact/reports/chw_list.py
|
from django.core.urlresolvers import NoReverseMatch
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.generic import GenericTabularReport
from corehq.apps.reports.standard import CustomProjectReport, ProjectReportParametersMixin
from django.utils import html
from couchforms.models import XFormInstance
from pact.reports.chw import PactCHWProfileReport
class PactCHWDashboard(GenericTabularReport, ProjectReportParametersMixin, CustomProjectReport):
name = "CHW Management"
slug = "chws"
hide_filters = True
fields = ['corehq.apps.reports.fields.FilterUsersField', ]
# asynchronous = False
@property
def headers(self):
headers = DataTablesHeader(
DataTablesColumn("Username"),
DataTablesColumn("Last Submit"),
DataTablesColumn("Total Submits"),
DataTablesColumn("", sortable=False),
)
return headers
def _chw_profile_link(self, user_id):
try:
return html.mark_safe("<a class='ajax_dialog' href='%s'>%s</a>" % (
html.escape(PactCHWProfileReport.get_url(*[self.domain]) + "?chw_id=%s" % user_id),
"View Profile",
))
except NoReverseMatch:
return "Unknown User ID"
@property
def rows(self):
rows = []
def form_count(user_id):
result = XFormInstance.view('couchforms/by_user',
startkey=[user_id],
endkey=[user_id, {}],
group_level=0
).one()
if result:
return result['value']
else:
return 0
def last_submit_time(user_id):
#need to call it directly due to reversed not liking the keys set the regular way
v = XFormInstance.view('couchforms/by_user',
endkey=[user_id],
startkey=[user_id, {}],
reduce=False,
include_docs=True,
descending=True, limit=1)
res = v.one()
if res is None:
return None
else:
return res.received_on.strftime("%m/%d/%Y")
for user in self.users:
rows.append([
user['raw_username'],
last_submit_time(user['user_id']),
form_count(user['user_id']),
self._chw_profile_link(user['user_id'])
])
return rows
|
Python
| 0.000003
|
@@ -305,16 +305,73 @@
ort html
+%0Afrom corehq.apps.reports.util import make_form_couch_key
%0A%0Afrom c
@@ -458,16 +458,46 @@
eReport%0A
+from datetime import datetime%0A
%0A%0Aclass
@@ -1474,32 +1474,100 @@
count(user_id):%0A
+ key = make_form_couch_key(self.domain, user_id=user_id)%0A
resu
@@ -1591,34 +1591,39 @@
e.view('
-couchforms/by_user
+reports_forms/all_forms
',%0A
@@ -1662,33 +1662,27 @@
startkey=
-%5Buser_id%5D
+key
,%0A
@@ -1714,34 +1714,31 @@
endkey=
-%5Buser_id,
+key + %5B
%7B%7D%5D,%0A
@@ -2044,16 +2044,84 @@
lar way%0A
+ key = make_form_couch_key(self.domain, user_id=user_id)%0A
@@ -2152,26 +2152,31 @@
ew('
-couchforms/by_user
+reports_forms/all_forms
',%0A
@@ -2220,17 +2220,11 @@
key=
-%5Buser_id%5D
+key
,%0A
@@ -2269,18 +2269,15 @@
key=
-%5Buser_id,
+key + %5B
%7B%7D%5D,
@@ -2374,19 +2374,20 @@
de_docs=
-Tru
+Fals
e,%0A
@@ -2569,23 +2569,62 @@
urn
-res.received_on
+datetime.strftime(res%5B'key'%5D%5B3%5D, %22%25Y-%25m-%25dT%25H:%25M:%25SZ%22)
.str
|
9f63ee0c05eb71ef612972867fe38791eaf2d86a
|
Fix #dnu implementation to not include self in the arg array
|
src/som/vmobjects/abstract_object.py
|
src/som/vmobjects/abstract_object.py
|
class AbstractObject(object):
def __init__(self):
pass
def send(self, frame, selector_string, arguments, universe, interpreter):
# Turn the selector string into a selector
selector = universe.symbol_for(selector_string)
# Push the receiver onto the stack
frame.push(self)
# Push the arguments onto the stack
for arg in arguments:
frame.push(arg)
# Lookup the invokable
invokable = self.get_class(universe).lookup_invokable(selector)
# Invoke the invokable
invokable.invoke(frame, interpreter)
def send_does_not_understand(self, frame, selector, interpreter):
universe = interpreter.get_universe()
# Compute the number of arguments
number_of_arguments = selector.get_number_of_signature_arguments()
# Allocate an array with enough room to hold all arguments
arguments_array = universe.new_array_with_length(number_of_arguments)
# Remove all arguments and put them in the freshly allocated array
i = number_of_arguments - 1
while i >= 0:
arguments_array.set_indexable_field(i, frame.pop())
i -= 1
args = [selector, arguments_array]
self.send(frame, "doesNotUnderstand:arguments:", args, universe, interpreter)
def send_unknown_global(self, frame, global_name, universe, interpreter):
arguments = [global_name]
self.send(frame, "unknownGlobal:", arguments, universe, interpreter)
def send_escaped_block(self, frame, block, universe, interpreter):
arguments = [block]
self.send(frame, "escapedBlock:", arguments, universe, interpreter)
def get_class(self, universe):
raise NotImplementedError("Subclasses need to implement get_class(universe).")
def quick_add(self, from_method, frame, interpreter, bytecode_index):
interpreter._send(from_method, frame, interpreter._add_symbol,
self.get_class(interpreter.get_universe()),
bytecode_index)
def quick_multiply(self, from_method, frame, interpreter, bytecode_index):
interpreter._send(from_method, frame, interpreter._multiply_symbol,
self.get_class(interpreter.get_universe()),
bytecode_index)
def quick_subtract(self, from_method, frame, interpreter, bytecode_index):
interpreter._send(from_method, frame, interpreter._subtract_symbol,
self.get_class(interpreter.get_universe()),
bytecode_index)
@staticmethod
def is_invokable():
return False
def __str__(self):
from som.vm.universe import get_current
return "a " + self.get_class(get_current()).get_name().get_string()
|
Python
| 0
|
@@ -846,16 +846,39 @@
uments()
+ - 1 ## do ignore self
%0A%0A
@@ -1242,24 +1242,25 @@
i -= 1%0A
+%0A
%0A
@@ -1247,28 +1247,58 @@
1%0A%0A
-
+frame.pop() # pop self from stack
%0A arg
|
7dc660b0a270c0d43f603d02895dec285df85ef6
|
fix telegram restart
|
rasa_core/channels/telegram.py
|
rasa_core/channels/telegram.py
|
import logging
from flask import Blueprint, request, jsonify
from telegram import (
Bot, InlineKeyboardButton, Update, InlineKeyboardMarkup,
KeyboardButton, ReplyKeyboardMarkup)
from rasa_core import constants
from rasa_core.channels import InputChannel
from rasa_core.channels.channel import UserMessage, OutputChannel
from rasa_core.constants import INTENT_MESSAGE_PREFIX, USER_INTENT_RESTART
logger = logging.getLogger(__name__)
class TelegramOutput(Bot, OutputChannel):
"""Output channel for Telegram"""
@classmethod
def name(cls):
return "telegram"
def __init__(self, access_token):
super(TelegramOutput, self).__init__(access_token)
def send_text_message(self, recipient_id, message):
for message_part in message.split("\n\n"):
self.send_message(recipient_id, message_part)
def send_image_url(self, recipient_id, image_url):
self.send_photo(recipient_id, image_url)
def send_text_with_buttons(self, recipient_id, text,
buttons, button_type="inline", **kwargs):
"""Sends a message with keyboard.
For more information: https://core.telegram.org/bots#keyboards
:button_type inline: horizontal inline keyboard
:button_type vertical: vertical inline keyboard
:button_type custom: custom keyboard
"""
if button_type == "inline":
button_list = [[InlineKeyboardButton(s["title"],
callback_data=s["payload"])
for s in buttons]]
reply_markup = InlineKeyboardMarkup(button_list)
elif button_type == "vertical":
button_list = [[InlineKeyboardButton(s["title"],
callback_data=s["payload"])]
for s in buttons]
reply_markup = InlineKeyboardMarkup(button_list)
elif button_type == "custom":
button_list = []
for bttn in buttons:
if isinstance(bttn, list):
button_list.append([KeyboardButton(s['title'])
for s in bttn])
else:
button_list.append([KeyboardButton(bttn["title"])])
reply_markup = ReplyKeyboardMarkup(button_list,
resize_keyboard=True,
one_time_keyboard=True)
else:
logger.error('Trying to send text with buttons for unknown '
'button type {}'.format(button_type))
return
self.send_message(recipient_id, text, reply_markup=reply_markup)
class TelegramInput(InputChannel):
"""Telegram input channel"""
@classmethod
def name(cls):
return "telegram"
@classmethod
def from_credentials(cls, credentials):
if not credentials:
cls.raise_missing_credentials_exception()
return cls(credentials.get("access_token"),
credentials.get("verify"),
credentials.get("webhook_url"))
def __init__(self, access_token, verify, webhook_url, debug_mode=True):
self.access_token = access_token
self.verify = verify
self.webhook_url = webhook_url
self.debug_mode = debug_mode
@staticmethod
def _is_location(message):
return message.location
@staticmethod
def _is_user_message(message):
return message.text
@staticmethod
def _is_button(update):
return update.callback_query
def blueprint(self, on_new_message):
telegram_webhook = Blueprint('telegram_webhook', __name__)
out_channel = TelegramOutput(self.access_token)
@telegram_webhook.route("/", methods=['GET'])
def health():
return jsonify({"status": "ok"})
@telegram_webhook.route("/set_webhook", methods=['GET', 'POST'])
def set_webhook():
s = out_channel.setWebhook(self.webhook_url)
if s:
logger.info("Webhook Setup Successful")
return "Webhook setup successful"
else:
logger.warning("Webhook Setup Failed")
return "Invalid webhook"
@telegram_webhook.route("/webhook", methods=['GET', 'POST'])
def message():
if request.method == 'POST':
if not out_channel.get_me()['username'] == self.verify:
logger.debug("Invalid access token, check it "
"matches Telegram")
return "failed"
update = Update.de_json(request.get_json(force=True),
out_channel)
if self._is_button(update):
msg = update.callback_query.message
text = update.callback_query.data
else:
msg = update.message
if self._is_user_message(msg):
text = msg.text.replace('/bot', '')
elif self._is_location(msg):
text = ('{{"lng":{0}, "lat":{1}}}'
''.format(msg.location.longitude,
msg.location.latitude))
else:
return "success"
sender_id = msg.chat.id
try:
if (text == (INTENT_MESSAGE_PREFIX + USER_INTENT_RESTART) or
text == constants.USER_INTENT_RESTART):
on_new_message(UserMessage(
text, out_channel, sender_id,
input_channel=self.name()))
on_new_message(UserMessage(
'/start', out_channel, sender_id,
input_channel=self.name()))
else:
on_new_message(UserMessage(
text, out_channel, sender_id,
input_channel=self.name()))
except Exception as e:
logger.error("Exception when trying to handle "
"message.{0}".format(e))
logger.debug(e, exc_info=True)
if self.debug_mode:
raise
pass
return "success"
set_webhook()
return telegram_webhook
|
Python
| 0.000003
|
@@ -5526,17 +5526,16 @@
if
-(
text ==
@@ -5583,78 +5583,8 @@
ART)
- or%0A text == constants.USER_INTENT_RESTART)
:%0A%0A
|
4be292c5c38b4eec08c56a872f6cd4f390bc607a
|
make compiler's py3k warning a full deprecation warning #6837
|
Lib/compiler/__init__.py
|
Lib/compiler/__init__.py
|
"""Package for parsing and compiling Python source code
There are several functions defined at the top level that are imported
from modules contained in the package.
parse(buf, mode="exec") -> AST
Converts a string containing Python source code to an abstract
syntax tree (AST). The AST is defined in compiler.ast.
parseFile(path) -> AST
The same as parse(open(path))
walk(ast, visitor, verbose=None)
Does a pre-order walk over the ast using the visitor instance.
See compiler.visitor for details.
compile(source, filename, mode, flags=None, dont_inherit=None)
Returns a code object. A replacement for the builtin compile() function.
compileFile(filename)
Generates a .pyc file by compiling filename.
"""
from warnings import warnpy3k
warnpy3k("the compiler package has been removed in Python 3.0", stacklevel=2)
del warnpy3k
from compiler.transformer import parse, parseFile
from compiler.visitor import walk
from compiler.pycodegen import compile, compileFile
|
Python
| 0
|
@@ -734,20 +734,23 @@
me.%0A%22%22%22%0A
-from
+%0Aimport
warning
@@ -754,36 +754,26 @@
ings
- import warnpy3k%0Awarnpy3k(%22t
+%0A%0Awarnings.warn(%22T
he c
@@ -792,16 +792,25 @@
age
-has been
+is deprecated and
rem
@@ -830,10 +830,45 @@
n 3.
-0%22
+x.%22,%0A DeprecationWarning
, st
@@ -882,21 +882,8 @@
l=2)
-%0Adel warnpy3k
%0A%0Afr
|
c05b179675afd326ca80540cd55cfd1900e2970f
|
Fix wrong import.
|
sugar/graphics/popup.py
|
sugar/graphics/popup.py
|
# Copyright (C) 2007, One Laptop Per Child
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import sys
import logging
import gobject
import gtk
import hippo
from sugar.graphics import units
from sugar.graphics.roundbox import RoundBox
from sugar.graphics import button
from sugar.graphics import color
from sugar.graphics import font
from sugar.graphics.canvasicon import CanvasIcon
class Popup(hippo.CanvasBox, hippo.CanvasItem):
__gtype_name__ = 'SugarPopup'
__gsignals__ = {
'action-completed': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, ([]))
}
def __init__(self, title):
hippo.CanvasBox.__init__(self)
self.props.background_color = color.MENU_BACKGROUND.get_int()
self.props.border_color = color.MENU_BORDER.get_int()
self.props.border = units.points_to_pixels(1)
self._window = None
def add_item(self, action_id, label, icon_name=None, icon_color=None):
box = hippo.CanvasBox(orientation=hippo.ORIENTATION_HORIZONTAL)
box.props.padding = 5
box.props.spacing = 5
if icon_name:
icon = CanvasIcon(icon_name=icon_name,
scale=units.SMALL_ICON_SCALE)
if icon_color:
icon.props.color = icon_color
box.append(icon)
canvas_text = hippo.CanvasText()
canvas_text.props.text = label
canvas_text.props.color = color.LABEL_TEXT.get_int()
canvas_text.props.font_desc = font.DEFAULT.get_pango_desc()
box.append(canvas_text)
box.connect('button-press-event', self._item_button_press_event_cb)
self.append(box)
def add_separator(self):
box = hippo.CanvasBox()
box.props.background_color = color.MENU_SEPARATOR.get_int()
box.props.box_height = units.points_to_pixels(1)
self.append(box)
def popup(self, x, y):
if not self._window:
self._window = hippo.CanvasWindow(gtk.WINDOW_POPUP)
self._window.move(x, y)
self._window.set_root(self)
self._window.show()
def popdown(self):
if self._window:
self._window.destroy()
self._window = None
def _item_button_press_event_cb(self, item, event):
self.emit('action-completed')
|
Python
| 0.000002
|
@@ -911,42 +911,8 @@
Box%0A
-from sugar.graphics import button%0A
from
|
52fa3230a29e10b49b7e6decb68cf5dbbf7a208f
|
Handle Unknown locale errors properly
|
zou/app/__init__.py
|
zou/app/__init__.py
|
import os
import flask_fs
import traceback
from flask import Flask, jsonify
from flasgger import Swagger
from flask_restful import current_app
from flask_jwt_extended import JWTManager
from flask_principal import Principal, identity_changed, Identity
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_mail import Mail
from jwt import ExpiredSignatureError
from . import config, swagger
from .stores import auth_tokens_store
from .services.exception import (
ModelWithRelationsDeletionException,
PersonNotFoundException,
WrongIdFormatException,
WrongParameterException,
WrongTaskTypeForEntityException,
)
from .utils import fs, logs
from zou.app.utils import cache
app = Flask(__name__)
app.config.from_object(config)
logs.configure_logs(app)
if not app.config["FILE_TREE_FOLDER"]:
# Default file_trees are included in Python package: use root_path
app.config["FILE_TREE_FOLDER"] = os.path.join(app.root_path, "file_trees")
db = SQLAlchemy(app)
migrate = Migrate(app, db) # DB schema migration features
app.secret_key = app.config["SECRET_KEY"]
jwt = JWTManager(app) # JWT auth tokens
Principal(app) # Permissions
cache.cache.init_app(app) # Function caching
flask_fs.init_app(app) # To save files in object storage
mail = Mail()
mail.init_app(app) # To send emails
swagger = Swagger(
app, template=swagger.swagger_template, config=swagger.swagger_config
)
@app.teardown_appcontext
def shutdown_session(exception=None):
db.session.remove()
@app.errorhandler(404)
def page_not_found(error):
return jsonify(error=True, message=str(error)), 404
@app.errorhandler(WrongIdFormatException)
def id_parameter_format_error(error):
return (
jsonify(
error=True,
message="One of the ID sent in parameter is not properly formatted.",
),
400,
)
@app.errorhandler(WrongParameterException)
def wrong_parameter(error):
return jsonify(error=True, message=str(error)), 400
@app.errorhandler(ExpiredSignatureError)
def wrong_token_signature(error):
return jsonify(error=True, message=str(error)), 401
@app.errorhandler(ModelWithRelationsDeletionException)
def try_delete_model_with_relations(error):
return jsonify(error=True, message=str(error)), 400
@app.errorhandler(WrongTaskTypeForEntityException)
def wrong_task_type_for_entity(error):
return jsonify(error=True, message=str(error)), 400
if not config.DEBUG:
@app.errorhandler(Exception)
def server_error(error):
stacktrace = traceback.format_exc()
current_app.logger.error(stacktrace)
return (
jsonify(error=True, message=str(error), stacktrace=stacktrace),
500,
)
def configure_auth():
from zou.app.services import persons_service
@jwt.token_in_blacklist_loader
def check_if_token_is_revoked(decrypted_token):
return auth_tokens_store.is_revoked(decrypted_token)
@jwt.user_loader_callback_loader
def add_permissions(callback):
try:
user = persons_service.get_current_user()
if user is not None:
identity_changed.send(
current_app._get_current_object(),
identity=Identity(user["id"]),
)
return user
except PersonNotFoundException:
return None
def load_api():
from . import api
api.configure(app)
fs.mkdir_p(app.config["TMP_DIR"])
configure_auth()
load_api()
|
Python
| 0
|
@@ -384,16 +384,58 @@
ureError
+%0Afrom babel.core import UnknownLocaleError
%0A%0Afrom .
@@ -2486,16 +2486,143 @@
, 400%0A%0A%0A
+@app.errorhandler(UnknownLocaleError)%0Adef wrong_locale_label(error):%0A return jsonify(error=True, message=str(error)), 400%0A%0A%0A
if not c
|
61a187e6064794cb82b2f003c0eb52d96f8038a2
|
Revert "try ruamel.yaml - third iteration"
|
cellpy/parameters/prmreader.py
|
cellpy/parameters/prmreader.py
|
# -*- coding: utf-8 -*-
import glob
import os
import sys
from collections import OrderedDict
import logging
import warnings
from pathlib import Path
import box
from ruamel.yaml import YAML
from cellpy.parameters import prms
from cellpy.exceptions import ConfigFileNotRead, ConfigFileNotWritten
logger = logging.getLogger(__name__)
using_ruamel = True
def _write_prm_file(file_name=None):
logger.debug("saving configuration to %s" % file_name)
config_dict = _pack_prms()
if using_ruamel:
yaml = YAML(typ='safe')
yaml.default_flow_style = False
config_file = Path(file_name)
yaml.dump(config_dict, config_file)
else:
try:
with open(file_name, "w") as config_file:
yaml.dump(config_dict, config_file, default_flow_style=False,
explicit_start=True, explicit_end=True)
except yaml.YAMLError:
raise ConfigFileNotWritten
def _update_prms(config_dict):
logger.debug("updating parameters")
logger.debug("new prms:" + str(config_dict))
for key in config_dict:
if hasattr(prms, key):
_config_attr = getattr(prms, key)
for k in config_dict[key]:
_config_attr[k] = config_dict[key][k]
else:
logger.info("\n not-supported prm: %s" % key)
def _pack_prms():
"""if you introduce new 'save-able' parameter dictionaries, then you have
to include them here"""
config_dict = {
"Paths": prms.Paths.to_dict(),
"FileNames": prms.FileNames.to_dict(),
"Db": prms.Db.to_dict(),
"DbCols": prms.DbCols.to_dict(),
"DataSet": prms.DataSet.to_dict(),
"Reader": prms.Reader.to_dict(),
"Instruments": prms.Instruments.to_dict(),
# "excel_db_cols": prms.excel_db_cols.to_dict(),
# "excel_db_filename_cols": prms.excel_db_filename_cols.to_dict(),
"Batch": prms.Batch.to_dict(),
}
return config_dict
def _read_prm_file(prm_filename):
"""read the prm file"""
logger.debug("Reading config-file: %s" % prm_filename)
if using_ruamel:
yaml = YAML(typ='safe')
prm_dict = yaml.load(prm_filename)
_update_prms(prm_dict)
else:
try:
with open(prm_filename, "r") as config_file:
prm_dict = yaml.load(config_file, Loader=yaml.FullLoader)
except yaml.YAMLError as e:
raise ConfigFileNotRead from e
else:
_update_prms(prm_dict)
def __look_at(file_name):
if using_ruamel:
yaml = YAML(typ='safe')
t = yaml.load(file_name)
else:
with open(file_name, "r") as config_file:
t = yaml.load(config_file)
print(t)
def _get_prm_file(file_name=None, search_order=None):
"""returns name of the prm file"""
if file_name is not None:
if os.path.isfile(file_name):
return file_name
else:
logger.info("Could not find the prm-file")
default_name = prms._prm_default_name
prm_globtxt = prms._prm_globtxt
script_dir = os.path.abspath(os.path.dirname(__file__))
search_path = dict()
search_path["curdir"] = os.path.abspath(os.path.dirname(sys.argv[0]))
search_path["filedir"] = script_dir
search_path["userdir"] = os.path.expanduser("~")
if search_order is None:
search_order = ["userdir", ] # ["curdir","filedir", "userdir",]
else:
search_order = search_order
# The default name for the prm file is at the moment in the script-dir,@
# while default searching is in the userdir (yes, I know):
prm_default = os.path.join(script_dir, default_name)
# -searching-----------------------
search_dict = OrderedDict()
for key in search_order:
search_dict[key] = [None, None]
prm_directory = search_path[key]
default_file = os.path.join(prm_directory, default_name)
if os.path.isfile(default_file):
# noinspection PyTypeChecker
search_dict[key][0] = default_file
prm_globtxt_full = os.path.join(prm_directory, prm_globtxt)
user_files = glob.glob(prm_globtxt_full)
for f in user_files:
if os.path.basename(f) != os.path.basename(default_file):
search_dict[key][1] = f
break
# -selecting----------------------
prm_file = None
for key, file_list in search_dict.items():
if file_list[-1]:
prm_file = file_list[-1]
break
else:
if not prm_file:
prm_file = file_list[0]
if prm_file:
prm_filename = prm_file
else:
prm_filename = prm_default
return prm_filename
def _save_current_prms_to_user_dir():
# This should be put into the cellpy setup script
file_name = os.path.join(prms.user_dir, prms._prm_default_name)
_write_prm_file(file_name)
def info():
"""this function will show only the 'box'-type
attributes and their content in the cellpy.prms module"""
print("convenience function for listing prms")
print(type(prms))
print(prms.__name__)
print(f"prm file: {_get_prm_file()}")
for key in prms.__dict__:
if isinstance(prms.__dict__[key], box.Box):
print()
print(80 * "=")
print(f"prms.{key}:")
print(80 * "-")
for subkey in prms.__dict__[key]:
print(
f"prms.{key}.{subkey} = ",
f"{prms.__dict__[key][subkey]}"
)
print(80 * "=")
def main():
print(" Testing ")
f = _get_prm_file()
print(f"reading {f}")
print("writing parameters")
_write_prm_file(f)
print("reading parameters")
_read_prm_file(f)
print(prms.Reader)
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -155,16 +155,30 @@
ort box%0A
+# import yaml%0A
from rua
@@ -5690,53 +5690,8 @@
nt(f
-%22reading %7Bf%7D%22)%0A print(%22writing parameters%22
)%0A
@@ -5707,24 +5707,25 @@
prm_file(f)%0A
+%0A
print(%22r
@@ -5726,29 +5726,11 @@
int(
-%22reading parameters%22)
+f)%0A
%0A
|
7f9c9c25f5786bf96ff3d89cc8fd840e3e6a4a6d
|
Allow passing of a tuple of three integers to get a datetime.
|
pelican/plugins/jinja_filters/jinja_filters.py
|
pelican/plugins/jinja_filters/jinja_filters.py
|
"""Various filters for Jinja."""
from datetime import datetime as _datetime
from titlecase import titlecase as _titlecase
__all__ = [
"article_date",
"breaking_spaces",
"datetime",
"titlecase",
]
def datetime(value, format_str="%Y/%m/%d %H:%M"):
"""
Convert a datetime to a different format.
The default format looks like --> 2016/11/25 12:34
Args
----
value (datetime.datetime): input date and time
format_str (str): The datetime format string to apply to value
Returns
-------
str: value, after the format_str has been applied
"""
return value.strftime(format_str)
def article_date(value):
"""
Convert a date to the format we want it displayed on the article template.
Format looks like --> Friday, November 4, 2020
Args
----
value (datetime.datetime): input date
Returns
-------
str: value, formatted nicely for displaying the date.
"""
return value.strftime("%A, %B %-d, %Y")
def datetime_from_period(value):
"""
Converts "period" into a datetime object.
On yearly/monthly/daily archive pages, a "period" object is supplied so you
know what timeperiod the particular archive page is for. This converts it
to a datetime.datetime object, so it can be further processed.
If a month is not provided (i.e. the period is for a yearly archive),
January is assumed. If a day is not provided (i.e. the period is for a
yearly or monthly archive), the 1st is assumed.
Args
----
value (tuple): input period
Returns
-------
datetime.datetime: value converted
"""
JANUARY = _datetime(2021, 1, 1).strftime("%B")
new_value = " ".join(
value[0],
value[1] if len(value) > 1 else JANUARY,
value[2] if len(value) > 2 else 1,
)
new_datetime = _datetime.strptime(*new_value, "%Y %B %-d")
return new_datetime
def breaking_spaces(value):
"""
Convert non-breaking spaces to regular spaces.
Args
----
value (str): input value
Returns
-------
str: the input string, now with regular spaces
"""
return value.replace("\u00A0", " ")
def titlecase(value):
"""
Returns the titlecased version of the supplied text.
Args
----
value (str): input value
Returns
-------
str: value, titlecase formatted
"""
return _titlecase(value)
|
Python
| 0.000005
|
@@ -1530,24 +1530,177 @@
s assumed.%0A%0A
+ You can also generate a tuple of (up to three) integers to get a datetime%0A out, using the integer representation for the month (1=January, etc).%0A%0A
Args%0A
@@ -1822,23 +1822,91 @@
%22%22%22%0A
-JANUARY
+if len(value) %3E= 2 and isinstance(value%5B2%5D, int):%0A placeholder_month
= _date
@@ -1916,17 +1916,24 @@
e(2021,
-1
+value%5B2%5D
, 1).str
@@ -1952,104 +1952,205 @@
-new_value = %22 %22.join(%0A value%5B0%5D,%0A value%5B1%5D if len(value) %3E 1 else JANUARY,%0A
+elif len(value) == 1:%0A placeholder_month = _datetime(2021, 1, 1).strftime(%22%25B%22)%0A else:%0A placeholder_month = value%5B2%5D%0A%0A new_value = %22 %22.join(%0A value%5B0%5D, placeholder_month,
val
@@ -2170,18 +2170,19 @@
value) %3E
- 2
+= 3
else 1,
|
addb75bfb13ffa3670090e750eb65026fd55f4f2
|
version bump 3.0.6
|
ella/__init__.py
|
ella/__init__.py
|
VERSION = (3, 0, 5)
__version__ = VERSION
__versionstr__ = '.'.join(map(str, VERSION))
|
Python
| 0
|
@@ -14,9 +14,9 @@
0,
-5
+6
)%0A%0A_
|
280fdec84513c554e11b1dc93b65baed7d6d1438
|
Update refrigeration_loads.py
|
cea/demand/refrigeration_loads.py
|
cea/demand/refrigeration_loads.py
|
# -*- coding: utf-8 -*-
"""
refrigeration loads
"""
from __future__ import division
import numpy as np
import pandas as pd
from cea.technologies import heatpumps
from cea.constants import HOURS_IN_YEAR
from cea.demand.constants import T_C_REF_SUP_0, T_C_REF_RE_0
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2016, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
def has_refrigeration_load(bpr):
"""
Checks if building has a hot water system
:param bpr: BuildingPropertiesRow
:type bpr: cea.demand.building_properties.BuildingPropertiesRow
:return: True or False
:rtype: bool
"""
if bpr.internal_loads['Qcre_Wm2'] > 0:
return True
else:
return False
def calc_Qcre_sys(bpr, tsd, schedules):
# calculate refrigeration loads
tsd['Qcre'] = schedules['Qcre'] * bpr.internal_loads['Qcre_Wm2'] * -1.0 # cooling loads are negative
# calculate distribution losses for refrigeration loads analogously to space cooling distribution losses
Y = bpr.building_systems['Y'][0]
Lv = bpr.building_systems['Lv']
Qcre_d_ls = ((T_C_REF_SUP_0 + T_C_REF_RE_0) / 2 - tsd['T_ext']) * (tsd['Qcre'] / np.nanmin(tsd['Qcre'])) * (Lv * Y)
# calculate system loads for data center
tsd['Qcre_sys'] = tsd['Qcre'] + Qcre_d_ls
# writing values to tsd, replacing function and np.vectorize call with simple for loop
for h in range(HOURS_IN_YEAR):
if tsd['Qcre_sys'][h] > 0:
tsd['mcpcre_sys'][h] = tsd['Qcre_sys'][h] / (T_C_REF_RE_0 - T_C_REF_SUP_0)
tsd['Tcre_sys_re'][h] = T_C_REF_RE_0
tsd['Tcre_sys_sup'][h] = T_C_REF_SUP_0
return tsd
def calc_Qref(locator, bpr, tsd):
"""
it calculates final loads
"""
# GET SYSTEMS EFFICIENCIES
data_systems = pd.read_excel(locator.get_life_cycle_inventory_supply_systems(), "COOLING").set_index('code')
type_system = bpr.supply['type_cs']
energy_source = data_systems.loc[type_system, 'source_cs']
if energy_source == "GRID":
if bpr.supply['type_cs'] in {'T2', 'T3'}:
if bpr.supply['type_cs'] == 'T2':
t_source = (tsd['T_ext'] + 273)
if bpr.supply['type_cs'] == 'T3':
t_source = (tsd['T_ext_wetbulb'] + 273)
# heat pump energy
tsd['E_cre'] = np.vectorize(heatpumps.HP_air_air)(tsd['mcpcre_sys'], (tsd['Tcre_sys_sup'] + 273),
(tsd['Tcre_sys_re'] + 273), t_source)
# final to district is zero
tsd['DC_cre'] = np.zeros(HOURS_IN_YEAR)
elif energy_source == "DC":
tsd['DC_cre'] = tsd['Qcre_sys']
tsd['E_cre'] = np.zeros(HOURS_IN_YEAR)
else:
tsd['E_cre'] = np.zeros(HOURS_IN_YEAR)
return tsd
|
Python
| 0.000001
|
@@ -405,16 +405,53 @@
Fonseca%22
+, %22Martin Mosteiro%22, %22Gabriel Happle%22
%5D%0A__lice
@@ -1337,16 +1337,18 @@
E_0) / 2
+.0
- tsd%5B'
@@ -1599,122 +1599,627 @@
-for h in range(HOURS_IN_YEAR):%0A if tsd%5B'Qcre_sys'%5D%5Bh%5D %3E 0:%0A tsd%5B'mcpcre_sys'%5D%5Bh%5D = tsd%5B'
+tsd%5B'mcpcre_sys'%5D, tsd%5B'Tcre_sys_re'%5D, tsd%5B'Tcre_sys_sup'%5D =%5C%0A np.vectorize(calc_refrigeration_temperature_and_massflow)(tsd%5B'Qcre_sys'%5D)%0A%0A return tsd%0A%0A%0Adef calc_refrigeration_temperature_and_massflow(Qcre_sys):%0A %22%22%22%0A Calculate refrigeration supply and return temperatures and massflows based on the refrigeration load%0A This function is intended to be used in np.vectorize form%0A :param Qcre_sys: refrigeration load including losses%0A :return: refrigeration massflow, refrigeration supply temperature, refrigeration return temperature%0A %22%22%22%0A%0A if Qcre_sys %3E 0.0:%0A mcpcre_sys =
Qcre_sys
'%5D%5Bh
@@ -2214,21 +2214,16 @@
Qcre_sys
-'%5D%5Bh%5D
/ (T_C_
@@ -2252,33 +2252,24 @@
_0)%0A
- tsd%5B'
Tcre_sys_re'
@@ -2267,21 +2267,16 @@
e_sys_re
-'%5D%5Bh%5D
= T_C_R
@@ -2291,25 +2291,16 @@
- tsd%5B'
Tcre_sys
@@ -2307,13 +2307,8 @@
_sup
-'%5D%5Bh%5D
= T
@@ -2320,31 +2320,162 @@
F_SUP_0%0A
-%0A return tsd
+ else:%0A mcpcre_sys = np.nan%0A Tcre_sys_re = np.nan%0A Tcre_sys_sup = np.nan%0A%0A return mcpcre_sys, Tcre_sys_re, Tcre_sys_sup
%0A%0A%0Adef c
|
b548092d480871e402e2d50ab96d864c5851cab2
|
fix __init__ changes
|
ffmpeg/__init__.py
|
ffmpeg/__init__.py
|
from __future__ import unicode_literals
from . import _filters, _ffmpeg, _run
from ._filters import *
from ._ffmpeg import *
from ._run import *
from ._view import *
__all__ = _filters.__all__ + _ffmpeg.__all__ + _run.__all__ + _view.__all__
|
Python
| 0.000057
|
@@ -142,29 +142,8 @@
t *%0A
-from ._view import *%0A
__al
@@ -201,21 +201,5 @@
ll__
- + _view.__all__
%0A
|
429e86ffca22d8a35881f0d9a172efaa900a6e70
|
replace False with None
|
dash/development/component_generator.py
|
dash/development/component_generator.py
|
from __future__ import print_function
from collections import OrderedDict
import json
import sys
import subprocess
import shlex
import os
import argparse
import shutil
import functools
import pkg_resources
import yaml
from ._r_components_generation import write_class_file
from ._r_components_generation import generate_exports
from ._py_components_generation import generate_class_file
from ._py_components_generation import generate_imports
from ._py_components_generation import generate_classes_files
from ._jl_components_generation import generate_struct_file
from ._jl_components_generation import generate_module
reserved_words = [
"UNDEFINED",
"REQUIRED",
"to_plotly_json",
"available_properties",
"available_wildcard_properties",
"_.*",
]
class _CombinedFormatter(
argparse.ArgumentDefaultsHelpFormatter, argparse.RawDescriptionHelpFormatter
):
pass
# pylint: disable=too-many-locals, too-many-arguments
def generate_components(
components_source,
project_shortname,
package_info_filename="package.json",
ignore="^_",
rprefix=None,
rdepends="",
rimports="",
rsuggests="",
jlprefix=None
):
project_shortname = project_shortname.replace("-", "_").rstrip("/\\")
is_windows = sys.platform == "win32"
extract_path = pkg_resources.resource_filename("dash", "extract-meta.js")
reserved_patterns = "|".join("^{}$".format(p) for p in reserved_words)
os.environ["NODE_PATH"] = "node_modules"
cmd = shlex.split(
'node {} "{}" "{}" {}'.format(
extract_path, ignore, reserved_patterns, components_source
),
posix=not is_windows,
)
shutil.copyfile(
"package.json", os.path.join(project_shortname, package_info_filename)
)
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=is_windows
)
out, err = proc.communicate()
status = proc.poll()
if err:
print(err.decode(), file=sys.stderr)
if not out:
print(
"Error generating metadata in {} (status={})".format(
project_shortname, status
),
file=sys.stderr,
)
sys.exit(1)
metadata = safe_json_loads(out.decode("utf-8"))
generator_methods = [generate_class_file]
if rprefix is not None or jlprefix is not None:
with open("package.json", "r") as f:
pkg_data = safe_json_loads(f.read())
if rprefix is not None:
if not os.path.exists("man"):
os.makedirs("man")
if not os.path.exists("R"):
os.makedirs("R")
if os.path.isfile("dash-info.yaml"):
with open("dash-info.yaml") as yamldata:
rpkg_data = yaml.safe_load(yamldata)
else:
rpkg_data = None
generator_methods.append(
functools.partial(write_class_file, prefix=rprefix, rpkg_data=rpkg_data)
)
if jlprefix is not False:
generator_methods.append(
functools.partial(generate_struct_file, prefix=jlprefix)
)
components = generate_classes_files(project_shortname, metadata, *generator_methods)
with open(os.path.join(project_shortname, "metadata.json"), "w") as f:
json.dump(metadata, f, indent=2)
generate_imports(project_shortname, components)
if rprefix is not None:
generate_exports(
project_shortname,
components,
metadata,
pkg_data,
rpkg_data,
rprefix,
rdepends,
rimports,
rsuggests,
)
if jlprefix is not False:
generate_module(
project_shortname,
components,
metadata,
pkg_data,
jlprefix
)
def safe_json_loads(s):
jsondata_unicode = json.loads(s, object_pairs_hook=OrderedDict)
if sys.version_info[0] >= 3:
return jsondata_unicode
return byteify(jsondata_unicode)
def cli():
parser = argparse.ArgumentParser(
prog="dash-generate-components",
formatter_class=_CombinedFormatter,
description="Generate dash components by extracting the metadata "
"using react-docgen. Then map the metadata to python classes.",
)
parser.add_argument("components_source", help="React components source directory.")
parser.add_argument(
"project_shortname", help="Name of the project to export the classes files."
)
parser.add_argument(
"-p",
"--package-info-filename",
default="package.json",
help="The filename of the copied `package.json` to `project_shortname`",
)
parser.add_argument(
"-i",
"--ignore",
default="^_",
help="Files/directories matching the pattern will be ignored",
)
parser.add_argument(
"--r-prefix",
help="Specify a prefix for Dash for R component names, write "
"components to R dir, create R package.",
)
parser.add_argument(
"--r-depends",
default="",
help="Specify a comma-separated list of R packages to be "
"inserted into the Depends field of the DESCRIPTION file.",
)
parser.add_argument(
"--r-imports",
default="",
help="Specify a comma-separated list of R packages to be "
"inserted into the Imports field of the DESCRIPTION file.",
)
parser.add_argument(
"--r-suggests",
default="",
help="Specify a comma-separated list of R packages to be "
"inserted into the Suggests field of the DESCRIPTION file.",
)
parser.add_argument(
"--jl-prefix",
help="Specify a prefix for Dash for R component names, write "
"components to R dir, create R package.",
)
args = parser.parse_args()
generate_components(
args.components_source,
args.project_shortname,
package_info_filename=args.package_info_filename,
ignore=args.ignore,
rprefix=args.r_prefix,
rdepends=args.r_depends,
rimports=args.r_imports,
rsuggests=args.r_suggests,
jlprefix=args.jl_prefix,
)
# pylint: disable=undefined-variable
def byteify(input_object):
if isinstance(input_object, dict):
return OrderedDict(
[(byteify(key), byteify(value)) for key, value in input_object.iteritems()]
)
if isinstance(input_object, list):
return [byteify(element) for element in input_object]
if isinstance(input_object, unicode): # noqa:F821
return input_object.encode("utf-8")
return input_object
if __name__ == "__main__":
cli()
|
Python
| 0.999999
|
@@ -2968,36 +2968,35 @@
jlprefix is not
-Fals
+Non
e:%0A gener
@@ -3658,12 +3658,11 @@
not
-Fals
+Non
e:%0A
|
22a10d3e0e5a0f5c11fa3ebaaa05c6e6e00f95a2
|
Add some metadata
|
cgi-bin/wepp/sm2shape.py
|
cgi-bin/wepp/sm2shape.py
|
#!/mesonet/python/bin/python
# Output by township
from pyIEM import iemdb, wellknowntext
import shapelib, dbflib, os, cgi, mx.DateTime, sys, zipfile, shutil
i = iemdb.iemdb()
mydb = i['wepp']
os.chdir('/tmp/')
# Figure out what date we want
form = cgi.FormContent()
year = int(form["year"][0])
month = int(form["month"][0])
day = int(form["day"][0])
ts = mx.DateTime.DateTime(year, month, day)
fp = "%s_sm" % (ts.strftime("%Y%m%d"), )
print "Content-type: application/octet-stream"
print "Content-Disposition: attachment; filename=%s.zip" % (fp,)
print
if (os.path.isfile(fp+".zip")):
print file(fp+".zip", 'r').read(),
sys.exit(0)
twp = {}
rs = mydb.query("SELECT astext(transform(the_geom,4326)) as tg, model_twp from iatwp").dictresult()
for i in range(len(rs)):
twp[ rs[i]["model_twp"] ] = rs[i]["tg"]
rs = mydb.query("SELECT * from waterbalance_by_twp \
WHERE valid = '%s'" % (ts.strftime("%Y-%m-%d"), ) ).dictresult()
shp = shapelib.create(fp, shapelib.SHPT_POLYGON)
dbf = dbflib.create(fp)
dbf.add_field("VALID", dbflib.FTString, 8, 0)
dbf.add_field("MODL_TWP", dbflib.FTString, 10, 0)
dbf.add_field("VSM", dbflib.FTDouble, 8, 4)
dbf.add_field("VSM_STDD", dbflib.FTDouble, 8, 4)
dbf.add_field("S10CM", dbflib.FTDouble, 8, 4)
dbf.add_field("S20CM", dbflib.FTDouble, 8, 4)
v = ts.strftime("%Y%m%d")
for i in range(len(rs)):
m = rs[i]["model_twp"]
vsm = float(rs[i]["vsm"])
vsms = float(rs[i]["vsm_stddev"])
s10 = float(rs[i]["s10cm"])
s20 = float(rs[i]["s20cm"])
f = wellknowntext.convert_well_known_text( twp[m] )
obj = shapelib.SHPObject(shapelib.SHPT_POLYGON, 1, f )
shp.write_object(-1, obj)
dbf.write_record(i, (v,m,vsm,vsms,s10,s20) )
del(dbf)
del(shp)
shutil.copyfile("/mesonet/data/gis/meta/4326.prj", fp+".prj")
z = zipfile.ZipFile(fp+".zip", 'w', zipfile.ZIP_DEFLATED)
z.write(fp+".shp")
z.write(fp+".shx")
z.write(fp+".dbf")
z.write(fp+".prj")
z.close()
print file(fp+".zip", 'r').read(),
os.remove(fp+".shp")
os.remove(fp+".shx")
os.remove(fp+".dbf")
os.remove(fp+".prj")
|
Python
| 0.000355
|
@@ -1698,16 +1698,515 @@
l(shp)%0A%0A
+o = open(fp+%22.txt%22, 'w')%0Ao.write(%22%22%22%0AIEM Modelled Soil Moisture from the Iowa Daily Erosion Project%0Ahttp://wepp.mesonet.agron.iastate.edu%0A%0ADBF Columns are:%0A MODL_TWP Model township%0A VALID Date data is valid for YYYYMMDD%0A VSM Volumetric Soil Moisture %5B%25%5D%0A VSM_STDD VSM Standard Deviation within model township%0A S10CM 0-10 cm depth soil moisture %5Bmm%5D%0A S20CM 10-20 cm depth soil moisture %5Bmm%5D%0A%0AData Contact:%0A Daryl Herzmann akrherz@iastate.edu 515.294.5978%0A%0A%22%22%22)%0Ao.close()%0A%0A
shutil.c
@@ -2393,16 +2393,35 @@
%22.prj%22)%0A
+z.write(fp+%22.txt%22)%0A
z.close(
@@ -2522,16 +2522,16 @@
%22.dbf%22)%0A
-
os.remov
@@ -2531,20 +2531,41 @@
s.remove(fp+%22.prj%22)%0A
+os.remove(fp+%22.txt%22)%0A
|
96731cebe1c729ed6eceeab916e7420ddb12f435
|
Print branch name
|
support/travis-build.py
|
support/travis-build.py
|
#!/usr/bin/env python
# Build the project on Travis CI.
from __future__ import print_function
import errno, os, re, shutil, sys, tempfile, urllib
from subprocess import call, check_call, check_output, Popen, PIPE, STDOUT
def rmtree_if_exists(dir):
try:
shutil.rmtree(dir)
except OSError as e:
if e.errno == errno.ENOENT:
pass
build = os.environ['BUILD']
if build == 'Doc':
travis = 'TRAVIS' in os.environ
# Install dependencies.
if travis:
if check_output('git rev-parse --abbrev-ref HEAD', shell=True).strip() != 'master':
exit(0) # Ignore non-master branches
check_call('curl -s https://deb.nodesource.com/gpgkey/nodesource.gpg.key | ' +
'sudo apt-key add -', shell=True)
check_call('echo "deb https://deb.nodesource.com/node_0.10 precise main" | ' +
'sudo tee /etc/apt/sources.list.d/nodesource.list', shell=True)
check_call(['sudo', 'apt-get', 'update'])
check_call(['sudo', 'apt-get', 'install', 'python-virtualenv', 'nodejs'])
check_call(['npm', 'install', '-g', 'less', 'less-plugin-clean-css'])
deb_file = 'doxygen_1.8.6-2_amd64.deb'
urllib.urlretrieve('http://mirrors.kernel.org/ubuntu/pool/main/d/doxygen/' +
deb_file, deb_file)
check_call(['sudo', 'dpkg', '-i', deb_file])
cppformat_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, os.path.join(cppformat_dir, 'doc'))
import build
html_dir = build.build_docs()
repo = 'cppformat.github.io'
if travis and 'KEY' not in os.environ:
# Don't update the repo if building on Travis from an account that doesn't
# have push access.
print('Skipping update of ' + repo)
exit(0)
# Clone the cppformat.github.io repo.
rmtree_if_exists(repo)
git_url = 'https://github.com/' if travis else 'git@github.com:'
check_call(['git', 'clone', git_url + 'cppformat/{}.git'.format(repo)])
# Copy docs to the repo.
target_dir = os.path.join(repo, 'dev')
rmtree_if_exists(target_dir)
shutil.copytree(html_dir, target_dir, ignore=shutil.ignore_patterns('.*'))
if travis:
check_call(['git', 'config', '--global', 'user.name', 'amplbot'])
check_call(['git', 'config', '--global', 'user.email', 'viz@ampl.com'])
# Push docs to GitHub pages.
check_call(['git', 'add', '--all'], cwd=repo)
if call(['git', 'diff-index', '--quiet', 'HEAD'], cwd=repo):
check_call(['git', 'commit', '-m', 'Update documentation'], cwd=repo)
cmd = 'git push'
if travis:
cmd += ' https://$KEY@github.com/cppformat/cppformat.github.io.git master'
p = Popen(cmd, shell=True, stdout=PIPE, stderr=STDOUT, cwd=repo)
# Print the output without the key.
print(p.communicate()[0].replace(os.environ['KEY'], '$KEY'))
if p.returncode != 0:
raise CalledProcessError(p.returncode, cmd)
exit(0)
check_call(['git', 'submodule', 'update', '--init'])
check_call(['cmake', '-DCMAKE_BUILD_TYPE=' + build, '-DFMT_PEDANTIC=ON', '.'])
check_call(['make', '-j4'])
env = os.environ.copy()
env['CTEST_OUTPUT_ON_FAILURE'] = '1'
if call(['make', 'test'], env=env):
with open('Testing/Temporary/LastTest.log', 'r') as f:
print(f.read())
|
Python
| 0.000054
|
@@ -464,18 +464,24 @@
is:%0A
-if
+branch =
check_o
@@ -544,21 +544,68 @@
ip()
- != 'master':
+%0A if branch != 'master':%0A print('Branch: ' + branch)
%0A
|
f1630095ffe6c0d54e16c2466a2c1885453ec1af
|
Remove execute permission from exception.py
|
ironic_lib/exception.py
|
ironic_lib/exception.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Ironic base exception handling.
Includes decorator for re-raising Ironic-type exceptions.
SHOULD include dedicated exception logging.
"""
import logging
import six
from oslo_config import cfg
from ironic_lib.openstack.common._i18n import _
from ironic_lib.openstack.common._i18n import _LE
LOG = logging.getLogger(__name__)
exc_log_opts = [
cfg.BoolOpt('fatal_exception_format_errors',
default=False,
help='Make exception message format errors fatal.',
deprecated_group='DEFAULT'),
]
CONF = cfg.CONF
CONF.register_opts(exc_log_opts, group='ironic_lib')
class IronicException(Exception):
"""Base Ironic Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = _("An unknown exception occurred.")
code = 500
headers = {}
safe = False
def __init__(self, message=None, **kwargs):
self.kwargs = kwargs
if 'code' not in self.kwargs:
try:
self.kwargs['code'] = self.code
except AttributeError:
pass
if not message:
try:
message = self.message % kwargs
except Exception as e:
# kwargs doesn't match a variable in the message
# log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation'))
for name, value in kwargs.iteritems():
LOG.error("%s: %s" % (name, value))
if CONF.ironic_lib.fatal_exception_format_errors:
raise e
else:
# at least get the core message out if something happened
message = self.message
super(IronicException, self).__init__(message)
def format_message(self):
if self.__class__.__name__.endswith('_Remote'):
return self.args[0]
else:
return six.text_type(self)
class InstanceDeployFailure(IronicException):
message = _("Failed to deploy instance: %(reason)s")
class FileSystemNotSupported(IronicException):
message = _("Failed to create a file system. "
"File system %(fs)s is not supported.")
|
Python
| 0.000042
| |
71a84ecb772aa5560e35409219c11001ac168c6a
|
Add logging for contact form email.
|
chmvh_website/contact/forms.py
|
chmvh_website/contact/forms.py
|
from django import forms
from django.conf import settings
from django.core import mail
from django.template import loader
class ContactForm(forms.Form):
name = forms.CharField()
email = forms.EmailField()
message = forms.CharField(widget=forms.Textarea(
attrs={'rows': 5}))
template = loader.get_template('contact/email/message.txt')
def send_email(self):
subject = '[CHMVH Website] Message from {}'.format(
self.cleaned_data['name'])
context = {
'name': self.cleaned_data['name'],
'email': self.cleaned_data['email'],
'message': self.cleaned_data['message'],
}
emails_sent = mail.send_mail(
subject,
self.template.render(context),
settings.DEFAULT_FROM_EMAIL,
['info@chapelhillvet.com'],
fail_silently=True)
return emails_sent == 1
|
Python
| 0
|
@@ -1,16 +1,67 @@
+import logging%0A%0Afrom smtplib import SMTPException%0A%0A
from django impo
@@ -168,16 +168,83 @@
oader%0A%0A%0A
+logger = logging.getLogger('chmvh_website.%7B0%7D'.format(__name__))%0A%0A%0A
class Co
@@ -779,16 +779,82 @@
%7D%0A%0A
+ logger.debug(%22Preparing to send email%22)%0A%0A try:%0A
@@ -891,24 +891,28 @@
+
+
subject,%0A
@@ -908,16 +908,20 @@
ubject,%0A
+
@@ -967,16 +967,20 @@
+
+
settings
@@ -1012,16 +1012,20 @@
+
+
%5B'info@c
@@ -1042,17 +1042,18 @@
et.com'%5D
-,
+)%0A
%0A
@@ -1061,25 +1061,234 @@
-fail_silently=Tru
+logger.info(%22Succesfully sent email from %7B0%7D%22.format(%0A self.cleaned_data%5B'email'%5D))%0A except SMTPException as e:%0A emails_sent = 0%0A%0A logger.exception(%22Failed to send email.%22, exc_info=
e)%0A%0A
|
a11c058c520581239a76d1b87920fec7f087eff3
|
Use round brackets
|
readthedocs/builds/managers.py
|
readthedocs/builds/managers.py
|
"""Build and Version class model Managers"""
from __future__ import absolute_import
import logging
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
from .constants import (BRANCH, TAG, LATEST, LATEST_VERBOSE_NAME, STABLE,
STABLE_VERBOSE_NAME)
from .querysets import VersionQuerySet
from readthedocs.core.utils.extend import (SettingsOverrideObject,
get_override_class)
log = logging.getLogger(__name__)
__all__ = ['VersionManager']
class VersionManagerBase(models.Manager):
"""
Version manager for manager only queries.
For queries not suitable for the :py:class:`VersionQuerySet`, such as create
queries.
"""
@classmethod
def from_queryset(cls, queryset_class, class_name=None):
# This is overridden because :py:meth:`models.Manager.from_queryset`
# uses `inspect` to retrieve the class methods, and the proxy class has
# no direct members.
queryset_class = get_override_class(
VersionQuerySet,
VersionQuerySet._default_class # pylint: disable=protected-access
)
return super(VersionManagerBase, cls).from_queryset(queryset_class, class_name)
def create_stable(self, **kwargs):
defaults = {
'slug': STABLE,
'verbose_name': STABLE_VERBOSE_NAME,
'machine': True,
'active': True,
'identifier': STABLE,
'type': TAG,
}
defaults.update(kwargs)
return self.create(**defaults)
def create_latest(self, **kwargs):
defaults = {
'slug': LATEST,
'verbose_name': LATEST_VERBOSE_NAME,
'machine': True,
'active': True,
'identifier': LATEST,
'type': BRANCH,
}
defaults.update(kwargs)
return self.create(**defaults)
def get_object_or_log(self, **kwargs):
try:
return super(VersionManagerBase, self).get(**kwargs)
except ObjectDoesNotExist:
log.warning('Version not found for the pk = {pk}'.format(pk=kwargs.get['pk']))
class VersionManager(SettingsOverrideObject):
_default_class = VersionManagerBase
_override_setting = 'VERSION_MANAGER'
|
Python
| 0.000357
|
@@ -2167,14 +2167,14 @@
.get
-%5B
+(
'pk'
-%5D
+)
))%0A%0A
|
797ab31382a6c92eb4e9496969e36c35a23db20d
|
Bump version to 10.0.1
|
recipe_scrapers/__version__.py
|
recipe_scrapers/__version__.py
|
__version__ = "10.0.0"
|
Python
| 0
|
@@ -13,11 +13,11 @@
= %2210.0.
-0
+1
%22%0A
|
41ceb649fe9af95a6434606996794fee38b09760
|
implement __json__ method
|
channelstream/channel.py
|
channelstream/channel.py
|
import copy
import logging
import six
import uuid
from datetime import datetime
import channelstream
log = logging.getLogger(__name__)
class Channel(object):
""" Represents one of our chat channels - has some config options """
def __init__(self, name, long_name=None, channel_configs=None):
self.name = name
self.long_name = long_name
self.last_active = datetime.utcnow()
self.connections = {}
self.notify_presence = False
self.broadcast_presence_with_user_lists = False
self.salvageable = False
self.store_history = False
self.history_size = 10
self.history = []
if not channel_configs:
channel_configs = {}
self.reconfigure_from_dict(channel_configs.get(self.name))
log.info('%s created' % self)
def reconfigure_from_dict(self, config):
if config:
keys = ('notify_presence', 'store_history', 'history_size',
'broadcast_presence_with_user_lists')
for key in keys:
val = config.get(key)
if val is not None:
setattr(self, key, val)
def add_connection(self, connection):
username = connection.username
if username not in self.connections:
self.connections[username] = []
if not self.connections[username] and self.notify_presence:
self.send_notify_presence_info(username, 'joined')
if connection not in self.connections[connection.username]:
self.connections[connection.username].append(connection)
def remove_connection(self, connection):
username = connection.username
if username not in self.connections:
self.connections[username] = []
if connection in self.connections[username]:
self.connections[username].remove(connection)
self.after_parted(username)
def after_parted(self, username):
"""
Sends parted message if necessary and removed username from
connections if empty
:param username:
:return:
"""
if not self.connections[username]:
del self.connections[username]
if self.notify_presence:
self.send_notify_presence_info(username, 'parted')
def send_notify_presence_info(self, username, action):
"""
Sends a message to other connected parties about a presence change
:param username:
:param action:
:return:
"""
connected_users = []
if self.broadcast_presence_with_user_lists:
for _username in self.connections.keys():
user_inst = channelstream.USERS.get(_username)
user_data = {
'user': user_inst.username,
'state': user_inst.public_state
}
connected_users.append(user_data)
self.last_active = datetime.utcnow()
payload = {
'uuid': str(uuid.uuid4()).replace('-', ''),
'type': 'presence',
'user': username,
'users': connected_users,
'timestamp': self.last_active,
'channel': self.name,
'message': {'action': action}
}
self.add_message(payload, exclude_users=[username])
return payload
def add_message(self, message, pm_users=None, exclude_users=None):
"""
Sends the message to all connections subscribed to this channel
"""
message = copy.deepcopy(message)
pm_users = pm_users or []
exclude_users = exclude_users or []
self.last_active = datetime.utcnow()
if self.store_history and message['type'] == 'message':
self.history.append(message)
self.history = self.history[(self.history_size) * -1:]
message.update({'channel': self.name})
# message everyone subscribed except excluded
total_sent = 0
for user, conns in six.iteritems(self.connections):
if not exclude_users or user not in exclude_users:
for connection in conns:
if not pm_users or connection.user in pm_users:
connection.add_message(message)
total_sent += 1
return total_sent
def __repr__(self):
return '<Channel: %s, connections:%s>' % (
self.name, len(self.connections))
|
Python
| 0.001169
|
@@ -4479,8 +4479,1126 @@
tions))%0A
+%0A def get_info(self, include_history=True, include_connections=False,%0A include_users=False):%0A chan_info = %7B%0A 'name': self.name,%0A 'long_name': self.long_name,%0A 'history': self.history if include_history else %5B%5D,%0A 'last_active': self.last_active,%0A 'total_connections': sum(%0A %5Blen(conns) for conns in self.connections.values()%5D),%0A 'total_users': len(self.connections),%0A 'users': %5B%5D%7D%0A%0A users_to_list = %5B%5D%0A%0A for username in self.connections.keys():%0A user_inst = channelstream.USERS.get(username)%0A if include_users and user_inst.username not in users_to_list:%0A users_to_list.append(user_inst.username)%0A udata = %7B'user': user_inst.username, %22connections%22: %5B%5D%7D%0A if include_connections:%0A udata%5B'connections'%5D = %5B%0A conn.id for conn in self.connections%5Busername%5D%5D%0A chan_info%5B'users'%5D.append(udata)%0A return chan_info%0A%0A def __json__(self, request=None):%0A return self.get_info()%0A
|
82b1e2db9c9175370d40354c2e6851bb26d58183
|
bump plugin version
|
plugins/bountyfunding_plugin_trac/src/setup.py
|
plugins/bountyfunding_plugin_trac/src/setup.py
|
#!/usr/bin/env python
from setuptools import find_packages, setup
setup(
name='BountyFunding', version='0.5',
packages=find_packages(),
entry_points = {
'trac.plugins': [
'bountyfunding = bountyfunding.bountyfunding',
],
},
package_data={'bountyfunding': ['templates/*', 'htdocs/styles/*', 'htdocs/scripts/*']},
)
|
Python
| 0
|
@@ -109,9 +109,9 @@
='0.
-5
+6
',%0A
|
8b542d9602d459a6f38dd17c48d3f9a7680fac78
|
Fixed the lower-case probleme
|
malware_crawl/scan/manual_html.py
|
malware_crawl/scan/manual_html.py
|
from __future__ import print_function, unicode_literals
import requests
import lxml.html
import six
from progress.bar import Bar
_malware_keywords = {'Queen Elizabeth II',}
def check_content(url):
try:
response = requests.get(url, timeout=10.0)
response.raise_for_status()
except requests.exceptions.HTTPError:
return {}
text = response.text.lower()
return {keyword: text.count(keyword) for keyword in _malware_keywords} # will never count any values, because text is lowercase and _malware_keywords.keys() are mixed case.
def malware_scan(url):
"""
Crawls a page depth 1, returns the total count for each of the keywords in _malware_keywords for each of the pages in the crawl.
"""
response = requests.get(url, timeout=10.0)
html = lxml.html.fromstring(
response.content,
base_url=response.url
)
html.make_links_absolute(resolve_base_href=True)
keyword_counts = dict.fromkeys(_malware_keywords, 0)
for url in Bar().iter({link for element, attribute, link, pos in html.iterlinks()}):
for keyword, count in six.iteritems(check_content(url)):
keyword_counts[keyword] += count
return keyword_counts
def manual_html_malware_scan(urls):
for item in urls:
item[opinion].append({"type": "placeholder", "confidence": 1.0})
return urls
if __name__ == '__main__':
url = 'http://uk.movies.yahoo.com/person/emma-watson/'
print(malware_scan(url))
|
Python
| 0.99868
|
@@ -168,16 +168,129 @@
h II',%7D%0A
+_lower_mk = %7B%7D%0Afor k,v in _malware_keywords.items():%0A _lower_mk%5Bk.lower()%5D = v;%0A%0A_malware_keywords = _lower_mk
%0A%0Adef ch
@@ -576,110 +576,8 @@
ds%7D
- # will never count any values, because text is lowercase and _malware_keywords.keys() are mixed case.
%0A%0A%0Ad
|
e09798d5adbdea422d31eeed6fded746c0b8e5eb
|
update reduce options
|
MOSAICpipe/reduce_ALL.py
|
MOSAICpipe/reduce_ALL.py
|
import os
from glob import glob
import sys
''' This file links the MOSAIC pipeline into each folder and then does the
complete reduction on things. It still needs to have the individual association
files created before hand, but it does everything else.
I've updated it to also to the newfirm linking and reduction. You specify which
instrument you want to use as a command line argument. 'mosaic' or 'newfirm'
'''
script_dir = '/home/boada/Projects/planckClusters/MOSAICpipe'
def main():
dirs = [dirs for _, dirs, _ in os.walk('./')][0] # only want top level
cwd = os.getcwd()
for d in dirs:
print(d)
os.chdir(cwd)
if 'PSZ' not in d:
continue
target_dir = './{}'.format(d)
if not os.path.isdir(target_dir):
continue
relpath = os.path.relpath('{}'.format(script_dir), target_dir)
print(relpath)
print(target_dir)
try:
os.symlink('{}/combcat_PROJECTED.py'.format(script_dir),
'{}/combcat_PROJECTED.py'.format(target_dir))
except FileExistsError:
pass
# now do the pipeline
os.chdir(target_dir)
assocFile = glob('*.assoc')[0]
print(os.getcwd())
# build the command
cmd = 'python3 combcat_PROJECTED.py {} ./ ./'.format(assocFile)
cmd += ' --noPhoto --noAstro --noSEx --noBPZ'
print(cmd)
os.system(cmd)
# clean up all of the intermediate data products
cmds = ["find . -path '*/.diagnostics/*' -delete",
"find . -type d -name '.diagnostics' -empty -delete",
"find . -type f -name 'registration_*' -delete",
"find . -type f -name '*ldac*' -delete",
"find . -type f -name 'diagnostics.html' -delete",
"find . -type f -name '*.lst' -delete",
"find . -type f -name '*.xml' -delete",
"find . -type f -name 'GAIA.cat' -delete",
"find . -type f -name 'best_astrometry.dat' -delete"]
for cmd in cmds:
os.system(cmd)
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -28,19 +28,8 @@
glob
-%0Aimport sys
%0A%0A''
@@ -1342,16 +1342,26 @@
cmd += '
+ --noSWarp
--noPho
@@ -1381,19 +1381,11 @@
--no
-SEx --noBPZ
+RGB
'%0A%0A
@@ -2047,16 +2047,17 @@
m(cmd)%0A%0A
+%0A
if __nam
|
48abe132e004dbffa86ad3ccd557aea739332013
|
Add test case for layer_normalization with unnormal batch_axis
|
python/test/function/test_layer_normalization.py
|
python/test/function/test_layer_normalization.py
|
# Copyright 2019,2020,2021 Sony Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
from nbla_test_utils import list_context
from nnabla.normalization_functions import _force_list, _get_axes_excluding
ctxs = list_context('LayerNormalization')
def ref_layer_normalization(x, beta, gamma, batch_axis, eps, output_stat):
batch_axis = _force_list(batch_axis)
axes = tuple(_get_axes_excluding(len(x.shape), batch_axis))
x_mean = x.mean(axis=axes, keepdims=True)
x_var = x.var(axis=axes, keepdims=True)
norm = (x - x_mean) / (x_var + eps) ** 0.5
if gamma is not None:
norm *= gamma
if beta is not None:
norm += beta
if output_stat:
return norm, x_mean, x_var
return norm
def create_inputs(rng, x_shape, batch_axis, no_scale, no_bias):
x = rng.randn(*x_shape).astype(np.float32)
stat_shape = list(x_shape)
for baxis in _force_list(batch_axis):
stat_shape[baxis] = 1
beta = None if no_bias else rng.randn(*stat_shape).astype(np.float32)
gamma = None if no_scale else rng.randn(*stat_shape).astype(np.float32)
return x, beta, gamma
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("x_shape , batch_axis", [((4, 3, 8, 8), 0),
((4, 16, 16, 8), 0),
((16, 1), 0),
((3, 32, 4), 0),
# time-series (T, B, C) or (B, T, C)
((10, 4, 16), [0, 1])
])
@pytest.mark.parametrize("eps", [1e-05])
@pytest.mark.parametrize("output_stat", [False, True])
@pytest.mark.parametrize("no_scale", [False, True])
@pytest.mark.parametrize("no_bias", [False, True])
def test_layer_normalization_forward(ctx, func_name, seed, x_shape, batch_axis, eps, output_stat, no_scale, no_bias):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
x, beta, gamma = create_inputs(rng, x_shape, batch_axis, no_scale, no_bias)
function_tester(rng, F.layer_normalization, ref_layer_normalization, [x, beta, gamma], [batch_axis, eps, output_stat], ctx=ctx,
func_name=func_name, dstep=1e-2, atol_b=1e-2, backward=[False, False, False], disable_half_test=True)
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("x_shape , batch_axis", [((2, 3, 4, 4), 0),
((2, 4, 4, 3), 0),
((16, 1), 0),
((2, 4, 3), 0),
# time-series (T, B, C) or (B, T, C)
((3, 2, 5), [0, 1])
])
@pytest.mark.parametrize("eps", [1e-05])
@pytest.mark.parametrize("output_stat", [False, True])
@pytest.mark.parametrize("no_scale", [False, True])
@pytest.mark.parametrize("no_bias", [False, True])
def test_layer_normalization_forward_backward(ctx, func_name, seed, x_shape, batch_axis, eps, output_stat, no_scale, no_bias):
from nbla_test_utils import function_tester
rng = np.random.RandomState(seed)
x, beta, gamma = create_inputs(rng, x_shape, batch_axis, no_scale, no_bias)
function_tester(rng, F.layer_normalization, ref_layer_normalization, [x, beta, gamma], [batch_axis, eps, output_stat], ctx=ctx,
func_name=func_name, dstep=1e-2, atol_b=1e-2, backward=[True, not no_bias, not no_scale], disable_half_test=True)
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
@pytest.mark.parametrize("x_shape , batch_axis", [((2, 3, 4, 4), 0),
((2, 4, 4, 3), 0),
((16, 1), 0),
((2, 4, 3), 0),
# time-series (T, B, C) or (B, T, C)
((3, 2, 5), [0, 1])
])
@pytest.mark.parametrize("eps", [1e-05])
@pytest.mark.parametrize("output_stat", [False])
@pytest.mark.parametrize("no_scale", [False, True])
@pytest.mark.parametrize("no_bias", [False, True])
def test_layer_normalization_double_backward(ctx, func_name, seed, x_shape, batch_axis, eps, output_stat, no_scale, no_bias):
from nbla_test_utils import backward_function_tester
rng = np.random.RandomState(seed)
x, beta, gamma = create_inputs(rng, x_shape, batch_axis, no_scale, no_bias)
backward = [True, not no_bias, not no_scale]
backward_function_tester(rng, F.layer_normalization,
inputs=[x, beta, gamma],
func_args=[batch_axis, eps, output_stat],
backward=backward,
atol_f=2e-4,
ctx=ctx)
|
Python
| 0.000001
|
@@ -3247,32 +3247,101 @@
, 4, 4, 3), 0),%0A
+ ((2, 4, 4, 3), 1),%0A
@@ -3602,32 +3602,33 @@
, 2, 5), %5B0, 1%5D)
+,
%0A
|
d7cfdbd2bde0cc876db8c1bce020d8a1cf0ea77b
|
Add search filtering for name and booleans in resource API.
|
mdot_rest/views.py
|
mdot_rest/views.py
|
from django.shortcuts import render
from .models import Resource
from .serializers import ResourceSerializer
from rest_framework import generics, permissions
class ResourceList(generics.ListCreateAPIView):
queryset = Resource.objects.all()
serializer_class = ResourceSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
class ResourceDetail(generics.RetrieveUpdateDestroyAPIView):
queryset = Resource.objects.all()
serializer_class = ResourceSerializer
permission_classes = (permissions.IsAuthenticatedOrReadOnly,)
|
Python
| 0
|
@@ -151,16 +151,200 @@
issions%0A
+import django_filters%0A%0A%0Aclass ResourceFilter(django_filters.FilterSet):%0A class Meta:%0A model = Resource%0A fields = ('name', 'featured', 'accessible', 'responsive_web',)%0A
%0A%0Aclass
@@ -531,16 +531,50 @@
dOnly,)%0A
+ filter_class = ResourceFilter%0A
%0A%0Aclass
|
48ba12cb0018930d0674b4124ecf581cfd8bb39f
|
Test cleanup For #9-semweb-model
|
src/test/python/dot/rural/sepake/test_csv_to_rdf.py
|
src/test/python/dot/rural/sepake/test_csv_to_rdf.py
|
'''
Created on 3 Oct 2014
@author: s05nc4
'''
import unittest
import StringIO
from dot.rural.sepake.csv_to_rdf import CSV, CsvGraph, PROV
from rdflib import RDF, RDFS
from rdflib.query import ResultRow
EXAMPLE = '''"A","B","C"
1,2,3
4,5,6
'''
ALL_CELLS_QUERY = '''
SELECT ?h ?v
WHERE {{
?cell <{rdf.type}> <{csv.Cell}> .
?cell <{csv.fieldName}> ?h .
?cell <{csv.fieldValue}> ?v .
}}
'''
STRUCTURE_QUERY = '''
SELECT *
WHERE {{
?cell <{rdf.type}> <{csv.Cell}> .
?row <{rdfs.member}> ?cell .
?row <{rdf.type}> <{csv.Row}> .
?file <{rdfs.member}> ?row .
?file <{rdf.type}> <{csv.File}> .
?import <{prov.generated}> ?file .
?import <{rdf.type}> <{csv.Import}> .
}}
'''
def _pythonify(result_row):
'''@param result_row Row from a query result, instance of rdflib.query.ResultRow
'''
return tuple([v.toPython() for v in result_row])
class Test(unittest.TestCase):
def setUp(self):
self.g = CsvGraph()
self.g.read(StringIO.StringIO(EXAMPLE))
def _query(self, template, transformation = _pythonify):
query = template.format(csv = CSV, rdf = RDF, rdfs = RDFS, prov = PROV)
return [transformation(tupl) for tupl in self.g.query(query)]
def testCells(self):
self.assertEquals(set([('A', 1), ('A', 4),
('B', 2), ('B', 5),
('C', 3), ('C', 6),
]),
set(self._query(ALL_CELLS_QUERY)))
def testStructure(self):
result = self._query(STRUCTURE_QUERY, transformation = ResultRow.asdict)
self.assertEquals(6, len(result), repr(result))
found_csv_rows = {result_row['row'] for result_row in result}
self.assertEquals(2, len(found_csv_rows), repr(result))
found_csv_files = {result_row['file'] for result_row in result}
self.assertEquals(1, len(found_csv_files), repr(result))
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
Python
| 0
|
@@ -195,16 +195,59 @@
esultRow
+%0Aimport csv%0Afrom rdflib.term import Literal
%0A%0AEXAMPL
@@ -1063,25 +1063,99 @@
AMPLE))%0A
+ self.csv = csv.DictReader(StringIO.StringIO(EXAMPLE))%0A
%0A
-
def _que
@@ -1399,200 +1399,128 @@
- self.assertEquals(set(%5B('A', 1), ('A', 4),%0A ('B', 2), ('B', 5),%0A ('C', 3), ('C', 6),%0A %5D
+for csv_row in self.csv:%0A for key, value in csv_row.items():%0A self.assertIn(Literal(int(value)
),%0A
@@ -1552,40 +1552,67 @@
+li
s
-e
t(self.
-_query(ALL_CELLS_QUERY)
+g%5BLiteral(key) : ~CSV.fieldName / CSV.fieldValue%5D
))%0A%0A
|
eddcf78349b09847c5d216ebfb5f2ce46f02d09d
|
Debug commit.
|
socbot/core.py
|
socbot/core.py
|
import logging
from twisted.words.protocols import irc
from twisted.internet import protocol, reactor
from twisted.internet.error import ReactorNotRunning
from socbot.pluginapi import API
from socbot.userdb import UserDB
# Credits to ibid for some helpful code:
# - Ping ponger
class Connection(irc.IRCClient):
nickname = "SocBot"
_ping_deferred = None
_reconnect_deferred = None
def __init__(self):
self.factory = None
self.log = None
self.shutdown = False
self.api = None
self.channels = []
def _idle_ping(self):
self.log.debug("sending idle ping")
self._ping_deferred = None
self._reconnect_deferred = reactor.callLater(
self.factory.pong_timeout, self._timeout_reconnect)
self.sendLine('PING idle-socbot')
def _timeout_reconnect(self):
self.log.info("idle timeout; reconnecting")
self.transport.loseConnection()
def dataReceived(self, data):
irc.IRCClient.dataReceived(self, data)
if self._ping_deferred is not None:
self._ping_deferred.reset(self.factory.ping_interval)
def irc_PONG(self, prefix_unused, params):
if params[-1] == 'idle-socbot' and self._reconnect_deferred:
self.log.debug("received idle pong")
self._reconnect_deferred.cancel()
self._reconnect_deferred = None
self._ping_deferred = reactor.callLater(
self.factory.ping_interval, self._idle_ping)
def doJoins(self):
if self.factory.config["channels"]:
for channel, chanconfig in self.factory.config["channels"].iteritems():
if not chanconfig["autojoin"]:
continue
if chanconfig["password"]:
self.join(channel, chanconfig["password"])
else:
self.join(channel)
def irc_ERR_NOMOTD(self, prefix, params):
self.log.info("no MOTD")
self.doJoins()
def receivedMOTD(self, motd):
self.log.info("received MOTD")
self.doJoins()
def sendLine(self, line):
self.log.debug("sending line `{0}`".format(line))
irc.IRCClient.sendLine(self, str(line))
if self._ping_deferred is not None:
self._ping_deferred.reset(self.factory.ping_interval)
def connectionMade(self):
self.log.info("connected to server")
self.factory.resetDelay()
self.factory.addBot(self)
irc.IRCClient.connectionMade(self)
self._ping_deferred = reactor.callLater(self.factory.ping_interval, self._idle_ping)
def connectionLost(self, reason):
self.log.info("lost connection: {0}".format(reason))
irc.IRCClient.connectionLost(self, reason)
if self.shutdown:
self.factory.removeBot(self)
def privmsg(self, user, channel, msg):
channel = channel.lower()
if self.api.onPrivmsg(user, channel, msg):
irc.IRCClient.privmsg(self, user, channel, message)
def handleCommand(self, command, prefix, params):
if self.api.onCommand(command, prefix, params):
irc.IRCClient.handleCommand(self, command, prefix, params)
def msg(self, target, message, length=irc.MAX_COMMAND_LENGTH):
if not message or not target:
return
irc.IRCClient.msg(self, target, message, length)
def quit(self, message):
self.shutdown = True
irc.IRCClient.quit(self, message)
self.factory.shutdown()
def restart(self, message="Restarting..."):
self.factory.sharedstate['exitcode'] = 3
self.factory.shutdownAll(message)
def joined(self, channel):
self.log.info("joined " + channel)
if not channel.lower() in self.channels:
self.channels.append(channel.lower())
def left(self, channel):
self.log.info("left " + channel)
if channel.lower() in self.channels:
self.channels.remove(channel.lower())
class BotFactory(protocol.ReconnectingClientFactory):
protocol = Connection
log = logging.getLogger("socbot")
ping_interval = 60.0
pong_timeout = 120.0
def __init__(self, name, config, sharedstate, main):
self.name = name
self.sharedstate = sharedstate
self.core = main
self.config = config
self.shuttingdown = False
self.users = UserDB('conf/%s-users.db' % name.lower())
def clientConnectionLost(self, connector, unused_reason):
self.log.info("connection lost")
if not self.shuttingdown:
protocol.ReconnectingClientFactory.clientConnectionLost(
self, connector, unused_reason)
if not self.sharedstate["connections"]:
try:
reactor.stop()
except ReactorNotRunning:
pass
def buildProtocol(self, addr):
self.log.debug("creating new connection")
p = protocol.ReconnectingClientFactory.buildProtocol(self, addr)
p.nickname = self.config['nickname']
p.log = logging.getLogger("socbot.connection."+self.name)
p.api = API(p, self.users, self.sharedstate['pluginmanager'])
p.api.log = logging.getLogger("socbot.connection."+self.name)
return p
def addBot(self, bot):
self.sharedstate["connections"][self.name].append(bot)
def removeBot(self, bot):
self.sharedstate["connections"][self.name].remove(bot)
if not self.sharedstate["connections"][self.name]:
del self.sharedstate["connections"][self.name]
if not self.sharedstate["connections"]:
try:
reactor.stop()
except ReactorNotRunning:
pass
def shutdownAll(self, msg="Shutdown requested."):
self.core.shutdown(msg)
def shutdown(self):
self.shuttingdown = True
|
Python
| 0
|
@@ -2735,16 +2735,112 @@
eason))%0A
+ %0A self._ping_deferred.cancel()%0A self._reconnect_deferred.cancel()%0A
%0A
@@ -2955,24 +2955,76 @@
veBot(self)%0A
+ else:%0A self._timeout_reconnect()%0A
%0A def
@@ -4405,10 +4405,9 @@
t =
-12
+6
0.0%0A
|
ac70763faa0798b2e78a9f2b898570e3b08df5cb
|
Fix method call
|
cla_backend/apps/cla_eventlog/management/commands/find_and_delete_old_cases.py
|
cla_backend/apps/cla_eventlog/management/commands/find_and_delete_old_cases.py
|
import sys
from django.core.management.base import BaseCommand
from dateutil.relativedelta import relativedelta
from legalaid.models import Case
from cla_eventlog.models import Log
from cla_butler.tasks import DeleteOldData
class FindAndDeleteCasesUsingCreationTime(DeleteOldData):
def get_eligible_cases(self):
two_years = self.now - relativedelta(years=2)
return Case.objects.filter(created__lte=two_years).exclude(log__created__gte=two_years)
def get_digital_justice_user_logs(self):
return Log.objects.filter(created_by__email__endswith="digital.justice.gov.uk")
class Command(BaseCommand):
help = """
Use cases:
1. Find or delete cases that are 2 years old or over that were not deleted prior to the task command being fixed
2. Delete logs created by users with a @digital.justice.gov.uk email
"""
def get_user_input(self, qs_type, qs):
return raw_input(
"Number of {0} that will be deleted: {1}\nAre you sure about this? (Yes/No) ".format(
qs_type, qs.count()
)
)
def handle_test_command(self, args, cases):
if args[0] == "delete":
self.instance.run()
elif args[0] == "delete-logs":
digital_justice_user_logs = self.instance.get_digital_justice_user_logs()
self.instance._delete_objects(digital_justice_user_logs)
def handle_terminal_command(self, args, cases):
if args[0] == "delete":
if len(args) > 1 and args[1] == "no-input":
self.instance.run()
else:
answer = get_user_input("cases", cases)
if answer == "Yes":
self.instance.run()
elif args[0] == "delete-logs":
digital_justice_user_logs = self.instance.get_digital_justice_user_logs()
answer = get_user_input("digital justice user logs", digital_justice_user_logs)
if answer == "Yes":
self.instance._delete_objects(digital_justice_user_logs)
def handle(self, *args, **kwargs):
self.instance = FindAndDeleteCasesUsingCreationTime()
cases = self.instance.get_eligible_cases()
django_command = sys.argv[1]
if django_command == "test": # If command is run in test
if args:
self.handle_test_command(args, cases)
else:
return cases
else: # If command is run in terminal
if args:
self.handle_terminal_command(args, cases)
else:
print("Number of cases to be deleted: " + str(cases.count()))
|
Python
| 0.000008
|
@@ -1618,32 +1618,37 @@
answer =
+self.
get_user_input(%22
@@ -1884,16 +1884,21 @@
nswer =
+self.
get_user
|
b31b36253053f5e2739f514c8ae2017b4bc66011
|
enable to assign file_list, topmodule, config file as arguments
|
pyverilog_toolbox/verify_tool/dataflow_facade.py
|
pyverilog_toolbox/verify_tool/dataflow_facade.py
|
#-------------------------------------------------------------------------------
# get_dataflow_facade.py
#
# interface of register map analyzer
#
#
# Copyright (C) 2015, Ryosuke Fukatani
# License: Apache 2.0
#-------------------------------------------------------------------------------
import sys
import os
import pyverilog
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) )
import pyverilog.controlflow.controlflow_analyzer as controlflow_analyzer
from optparse import OptionParser
import pyverilog.utils.util as util
from pyverilog.dataflow.dataflow_analyzer import VerilogDataflowAnalyzer
from pyverilog.dataflow.optimizer import VerilogDataflowOptimizer
from bindlibrary import BindLibrary
from pyverilog.controlflow.controlflow_analyzer import VerilogControlflowAnalyzer
class dataflow_facade(VerilogControlflowAnalyzer):
""" [CLASSES]
Facade pattern for getting dataflow.
You can get dataflow by dataflow_facade(Verilog file name).
If commandline option exists, first argument is regard as verilog file name.
"""
def __init__(self, code_file_name):
topmodule, terms, binddict, resolved_terms, resolved_binddict, constlist = self.get_dataflow(code_file_name)
VerilogControlflowAnalyzer.__init__(self, topmodule, terms, binddict,
resolved_terms, resolved_binddict,constlist)
self.binds = BindLibrary(binddict, terms)
def get_dataflow(self, code_file_name):
optparser = OptionParser()
optparser.add_option("-t","--top",dest="topmodule",
default="TOP",help="Top module, Default=TOP")
optparser.add_option("-I","--include",dest="include",action="append",
default=[],help="Include path")
optparser.add_option("-D",dest="define",action="append",
default=[],help="Macro Definition")
optparser.add_option("-S",dest="config_file",default=[],help="config_file")
(options, args) = optparser.parse_args()
if args:
filelist = args
else:
filelist = (code_file_name,)
for f in filelist:
if not os.path.exists(f): raise IOError("file not found: " + f)
analyzer = VerilogDataflowAnalyzer(filelist, options.topmodule,
preprocess_include=options.include,
preprocess_define=options.define)
analyzer.generate()
directives = analyzer.get_directives()
terms = analyzer.getTerms()
binddict = analyzer.getBinddict()
optimizer = VerilogDataflowOptimizer(terms, binddict)
optimizer.resolveConstant()
resolved_terms = optimizer.getResolvedTerms()
resolved_binddict = optimizer.getResolvedBinddict()
constlist = optimizer.getConstlist()
if options.config_file:
self.config_file = options.config_file
return options.topmodule, terms, binddict, resolved_terms, resolved_binddict, constlist
def make_term_ref_dict(self):
self.term_ref_dict ={}
for tv,tk,bvi,bit,term_lsb in self.binds.walk_reg_each_bit():
if 'Rename' in tv.termtype: continue
target_tree = self.makeTree(tk)
tree_list = self.binds.extract_all_dfxxx(target_tree, set([]), bit - term_lsb, pyverilog.dataflow.dataflow.DFTerminal)
for tree, bit in tree_list:
if str(tree) not in self.term_ref_dict.keys():
self.term_ref_dict[str(tree)] = set([])
self.term_ref_dict[str(tree)].add(str(tk))
def print_bind_info(self):
return_str = ''
binds = BindLibrary(self.resolved_binddict, self.resolved_terms)
for tv,tk,bvi,bit,term_lsb in binds.walk_reg_each_bit():
tree = self.makeTree(tk)
trees = binds.extract_all_dfxxx(tree, set([]), bit - term_lsb, pyverilog.dataflow.dataflow.DFTerminal)
print str(tk) + '[' + str(bit) + ']: ' + str(trees)
return_str += str(tk) + '[' + str(bit) + ']: ' + str(trees)
return return_str
if __name__ == '__main__':
#df = dataflow_facade("../testcode/complex_partselect.v")
df = dataflow_facade("../testcode/regmap2.v")
df.print_bind_info()
|
Python
| 0
|
@@ -1130,32 +1130,64 @@
, code_file_name
+, topmodule='', config_file=None
):%0A topmo
@@ -1513,16 +1513,48 @@
ile_name
+, topmodule='', config_file=None
):%0A
@@ -2182,51 +2182,257 @@
el
-se:%0A filelist = (code_file_name,
+if code_file_name:%0A if hasattr(code_file_name, %22__iter__%22):%0A filelist = code_file_name%0A else:%0A filelist = (code_file_name,)%0A else:%0A raise Exception(%22Verilog file is not assigned.%22
)%0A%0A
@@ -2534,16 +2534,85 @@
%22 + f)%0A%0A
+ if not topmodule:%0A topmodule = options.topmodule%0A%0A
@@ -2656,24 +2656,16 @@
lelist,
-options.
topmodul
@@ -3247,16 +3247,85 @@
if
+config_file:%0A self.config_file = config_file%0A elif
options.
|
08d2ade71e6fb69512cb6d39cb7ef8712a44172a
|
update mediumRegex
|
mediumRegexUTF8.py
|
mediumRegexUTF8.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# FIX PARA WINDOWS CONSOLE ----------------------
import codecs,sys
sys.stdout = codecs.getwriter("utf8")(sys.stdout)
# -----------------------------------------------
import re
cadena = u"""—¡Joven «emponzoñado» con el whisky, qué fin… te aguarda exhibir!
El pingüino Wenceslao hizo kilómetros bajo exhaustiva
lluvia y frío, añoraba a su querido cachorro."""
patrones = [
(ur"(\w)", u"Busca todos los caracteres de palabra"),
(ur"(\W)", u"Busca todos los caracteres que no son de palabra"),
(ur"(\s)", u"Busca todos los caracteres de espaciado"),
(ur"(\S)", u"Busca todos los caracteres que no son de espaciado"),
(ur"(\w+)", u"Busca todas las palabras"),
(ur"(\w+)\s+(\w+)", u"Busca pares de palabras separadas por un espacio"),
(ur"([^\s]+)\s+([^\s]+)", u"Busca dos grupos de caracteres que no sean espacios seguidos, separados por un espacio"),
(ur"(\w+)[^\w\s]?\s+[^\w\s]?(\w+)", u"Busca dos palabras separadas por un espacio que pueden o no tener un caractes no de palabra a los lados"),
(ur"(\w+)\s+(?=(\w+))","Busca todos los pares de palabras (separadas por espacio) con lookahead"),
(ur"(\w+)(?=(?:\s+(\W*)(\w+))|([^\w\r\n]+))","Busca pares de palabra/palabra o palabra/otro, puede incluir caracteres que anteceden la segunda palabra"),
]
print u"\nCadena:",cadena
for i,patron in enumerate(patrones):
if (len(sys.argv)>1 and sys.argv[1]!=str(i)):
continue
paco = re.compile(patron[0], re.UNICODE)
match = paco.findall(cadena)
print "\n",patron[1]
print "\t",patron[0],"\n"
if len(match)>0:
for ii,m in enumerate(match):
if isinstance(match[0], tuple):
m = filter(None, m) # Elimina los valores vacíos
print ii,"\t", "\t".join(m)
else:
print ii,"\t", m
else:
print "\tNo hubo coincidencias"
print
|
Python
| 0
|
@@ -90,16 +90,34 @@
-------%0A
+# Usar: chcp 1252%0A
import c
@@ -122,16 +122,23 @@
codecs,
+locale,
sys%0Asys.
@@ -167,14 +167,37 @@
ter(
-%22utf8%22
+locale.getpreferredencoding()
)(sy
|
76dcc6cd050172af50c0721b312ea499f0bb7b71
|
modify build option
|
build/config.py
|
build/config.py
|
# -*- coding: utf-8 -*-
cflags = [
'-std=c99',
'-Wall',
'-g',
'-O2',
# '-fno-strict-aliasing',
'-D_GNU_SOURCE',
]
libs = [
'pthread',
'ev',
'json',
]
includes = [
'ext',
]
headers = [
'stdint.h',
'stdbool.h',
'unistd.h',
'sys/stat.h',
'sys/types.h',
'sys/socket.h',
'sys/un.h',
'sys/ioctl.h',
'arpa/inet.h',
'netinet/in.h',
'netdb.h',
'signal.h',
'errno.h',
'pthread.h',
'ev.h',
]
funcs = [
'sigaction',
'sigignore',
]
|
Python
| 0.000001
|
@@ -65,16 +65,17 @@
%0A '-g
+0
',%0A '
@@ -80,9 +80,9 @@
'-O
-2
+3
',%0A#
|
583560040fc16cb7171d04d297d84d228a7f34f2
|
Add ability to exclude files when using copy/move
|
synthtool/transforms.py
|
synthtool/transforms.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import shutil
from typing import Iterable, Union
import os
import re
import sys
from synthtool import _tracked_paths
from synthtool import log
PathOrStr = Union[str, Path]
ListOfPathsOrStrs = Iterable[Union[str, Path]]
def _expand_paths(
paths: ListOfPathsOrStrs, root: PathOrStr = None) -> Iterable[Path]:
"""Given a list of globs/paths, expands them into a flat sequence,
expanding globs as necessary."""
if isinstance(paths, (str, Path)):
paths = [paths]
if root is None:
root = Path('.')
# ensure root is a path
root = Path(root)
# record name of synth script so we don't try to do transforms on it
synth_script_name = sys.argv[0]
for path in paths:
if isinstance(path, Path):
if path.is_absolute():
anchor = Path(path.anchor)
remainder = str(path.relative_to(path.anchor))
yield from anchor.glob(remainder)
else:
yield path
else:
yield from (p for p in root.glob(path)
if p.absolute() != Path(synth_script_name).absolute())
def _filter_files(paths: Iterable[Path]) -> Iterable[Path]:
"""Returns only the paths that are files (no directories)."""
return (path for path in paths if path.is_file())
def _copy_dir_to_existing_dir(source: Path, destination: Path):
"""
copies files over existing files to an existing directory
this function does not copy empty directories
"""
for root, _, files in os.walk(source):
for name in files:
rel_path = str(Path(root).relative_to(source)).lstrip('.')
dest_dir = os.path.join(str(destination), rel_path)
os.makedirs(dest_dir, exist_ok=True)
dest_path = os.path.join(dest_dir, name)
shutil.copyfile(os.path.join(root, name), dest_path)
def move(sources: ListOfPathsOrStrs, destination: PathOrStr = None):
"""
copy file(s) at source to current directory
"""
for source in _expand_paths(sources):
if destination is None:
canonical_destination = _tracked_paths.relativize(source)
else:
canonical_destination = Path(destination)
if source.is_dir():
_copy_dir_to_existing_dir(source, canonical_destination)
else:
# copy individual file
shutil.copy2(source, canonical_destination)
def _replace_in_file(path, expr, replacement):
with path.open('r+') as fh:
content = fh.read()
content, count = expr.subn(replacement, content)
# Don't bother writing the file if we didn't change
# anything.
if not count:
return False
fh.seek(0)
fh.write(content)
fh.truncate()
return True
def replace(
sources: ListOfPathsOrStrs,
before: str,
after: str,
flags: int = re.MULTILINE):
"""Replaces occurrences of before with after in all the given sources."""
expr = re.compile(before, flags=flags or 0)
paths = _filter_files(_expand_paths(sources, '.'))
for path in paths:
replaced = _replace_in_file(path, expr, after)
if replaced:
log.info(f"Replaced {before!r} in {path}.")
|
Python
| 0.000005
|
@@ -1972,24 +1972,90 @@
nation: Path
+,%0A excludes: ListOfPathsOrStrs = None
):%0A %22%22%22%0A
@@ -2382,57 +2382,8 @@
th)%0A
- os.makedirs(dest_dir, exist_ok=True)%0A
@@ -2431,16 +2431,269 @@
, name)%0A
+%0A exclude = %5Bexcluded for excluded in excludes%0A if excluded.samefile(dest_path) or%0A excluded.samefile(dest_dir)%5D%0A if not exclude:%0A os.makedirs(dest_dir, exist_ok=True)%0A
@@ -2808,24 +2808,69 @@
PathOrStr =
+ None,%0A excludes: ListOfPathsOrStrs =
None):%0A
@@ -2921,32 +2921,87 @@
rectory%0A %22%22%22%0A
+ expanded_excludes = list(_expand_paths(excludes))%0A%0A
for source i
@@ -3292,30 +3292,128 @@
tination
-)%0A else
+,%0A excludes=expanded_excludes)%0A elif source not in expanded_excludes
:%0A
|
eb4ad794a9213c20c800fa59199a29f353ca7994
|
Add a docstring to testing default gate domain constant (#3047)
|
cirq/testing/random_circuit.py
|
cirq/testing/random_circuit.py
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Union, Sequence, Dict, Optional, TYPE_CHECKING
from cirq import ops, value
from cirq.circuits import Circuit
if TYPE_CHECKING:
import cirq
DEFAULT_GATE_DOMAIN: Dict[ops.Gate, int] = {
ops.CNOT: 2,
ops.CZ: 2,
ops.H: 1,
ops.ISWAP: 2,
ops.CZPowGate(): 2,
ops.S: 1,
ops.SWAP: 2,
ops.T: 1,
ops.X: 1,
ops.Y: 1,
ops.Z: 1
}
def random_circuit(qubits: Union[Sequence[ops.Qid], int],
n_moments: int,
op_density: float,
gate_domain: Optional[Dict[ops.Gate, int]] = None,
random_state: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None
) -> Circuit:
"""Generates a random circuit.
Args:
qubits: If a sequence of qubits, then these are the qubits that
the circuit should act on. Because the qubits on which an
operation acts are chosen randomly, not all given qubits
may be acted upon. If an int, then this number of qubits will
be automatically generated, and the qubits will be
`cirq.NamedQubits` with names given by the integers in
`range(qubits)`.
n_moments: The number of moments in the generated circuit.
op_density: The probability that a gate is selected to operate on
randomly selected qubits. Note that this is not the expected number
of qubits that are acted on, since there are cases where the
number of qubits that a gate acts on does not evenly divide the
total number of qubits.
gate_domain: The set of gates to choose from, specified as a dictionary
where each key is a gate and the value of the key is the number of
qubits the gate acts on. If not provided, the default gate domain is
{X, Y, Z, H, S, T, CNOT, CZ, SWAP, ISWAP, CZPowGate()}. Only gates
which act on a number of qubits less than len(qubits) (or qubits if
provided as an int) are selected from the gate domain.
random_state: Random state or random state seed.
Raises:
ValueError:
* op_density is not in (0, 1].
* gate_domain is empty.
* qubits is an int less than 1 or an empty sequence.
Returns:
The randomly generated Circuit.
"""
if not 0 < op_density <= 1:
raise ValueError(f'op_density must be in (0, 1] but was {op_density}.')
if gate_domain is None:
gate_domain = DEFAULT_GATE_DOMAIN
if not gate_domain:
raise ValueError('gate_domain must be non-empty.')
if isinstance(qubits, int):
qubits = tuple(ops.NamedQubit(str(i)) for i in range(qubits))
n_qubits = len(qubits)
if n_qubits < 1:
raise ValueError('At least one qubit must be specified.')
gate_domain = {k: v for k, v in gate_domain.items() if v <= n_qubits}
if not gate_domain:
raise ValueError(f'After removing gates that act on less that '
'{n_qubits}, gate_domain had no gates.')
max_arity = max(gate_domain.values())
prng = value.parse_random_state(random_state)
moments: List[ops.Moment] = []
gate_arity_pairs = sorted(gate_domain.items(), key=repr)
num_gates = len(gate_domain)
for _ in range(n_moments):
operations = []
free_qubits = set(qubits)
while len(free_qubits) >= max_arity:
gate, arity = gate_arity_pairs[prng.randint(num_gates)]
op_qubits = prng.choice(sorted(free_qubits),
size=arity,
replace=False)
free_qubits.difference_update(op_qubits)
if prng.rand() <= op_density:
operations.append(gate(*op_qubits))
moments.append(ops.Moment(operations))
return Circuit(moments)
|
Python
| 0
|
@@ -712,16 +712,47 @@
Circuit
+%0Afrom cirq._doc import document
%0A%0Aif TYP
@@ -1001,16 +1001,202 @@
.Z: 1%0A%7D%0A
+document(%0A DEFAULT_GATE_DOMAIN,%0A %22%22%22The default gate domain for %60cirq.testing.random_circuit%60.%0A%0AThis includes the gates CNOT, CZ, H, ISWAP, CZ, S, SWAP, T, X, Y,%0Aand Z gates.%0A%22%22%22)%0A
%0A%0Adef ra
|
cd9dc7ff1069e80543f6af32d625bdefaf19f178
|
Fix notificatiosn
|
autostew_back/plugins/chat_notifications.py
|
autostew_back/plugins/chat_notifications.py
|
"""
Show a message when a player logs in (and other messages, too)
"""
from autostew_back.gameserver.event import EventType, BaseEvent, LapEvent, MemberEvent
from autostew_back.gameserver.server import Server
from autostew_back.gameserver.session import SessionStage, SessionState, SessionFlags
from autostew_web_users.models import SteamUser
name = 'chat_notifications'
welcome_message = [
"",
"Welcome {player_name}, current setup is {setup_name}",
"{safety_class_message}",
"{elo_rating_message}",
"See more information at autostew.selfhost.eu",
"",
]
new_session_starts = [
"",
"This server is connected to autostew.selfhost.eu"
"",
]
race_starts = [
"",
"",
" ### RACE IS STARTING ###",
"Keep the race safe and fair! Good luck!",
"Be EXTRA CAREFUL on the first turn.",
"Remind that players who crash too much will be kicked.",
]
leader_in_last_lap = [
"",
"The leader {leader_name} just entered their last lap!"
]
first_player_finished = [
"",
"",
"Congratulations to {winner_name} for winning this race!",
"See this race results and more at autostew.selfhost.eu"
]
def event(server: Server, event: BaseEvent):
if event.type == EventType.authenticated:
send_welcome_message(event, server)
if (
event.type == EventType.lap and
event.lap == server.session.race1_length.get() - 1 and
event.race_position == 1 and
server.session.session_stage.get_nice() == SessionStage.race1 and
SessionFlags.timed_race not in server.session.flags.get_flags()
):
send_winner_message(event, server)
if (
event.type == EventType.lap and
event.lap == server.session.race1_length.get() - 2 and
event.race_position == 1 and
server.session.session_stage.get_nice() == SessionStage.race1 and
SessionFlags.timed_race not in server.session.flags.get_flags()
):
send_leader_in_last_lap_message(event, server)
if event.type == EventType.state_changed and event.new_state == SessionState.lobby:
send_new_session_message(server)
if event.type == EventType.stage_changed and event.new_stage == SessionStage.race1:
send_race_start_message(server)
def send_race_start_message(server: Server):
for message in race_starts:
server.api.send_chat(message)
def send_new_session_message(server: Server):
for message in new_session_starts:
server.api.send_chat(message)
def send_leader_in_last_lap_message(event: LapEvent, server: Server):
for message in first_player_finished:
server.api.send_chat(message.format(winner_name=event.participant.name.get()))
def send_winner_message(event: LapEvent, server: Server):
for message in leader_in_last_lap:
server.api.send_chat(message.format(leader_name=event.participant.name.get()))
def send_welcome_message(event: MemberEvent, server: Server):
if not event.member:
return
try:
steam_user = SteamUser.objects.get(steam_id=event.member.steam_id.get())
if not steam_user.safety_class:
safety_class_message = "You will be assigned a safety class"
elif steam_user.safety_class.kick_on_impact_threshold:
safety_class_message = "Your current safety class is {}. Drive carefully or you will be kicked!".format(
steam_user.safety_class.name
)
else:
safety_class_message = "Your current safety class is {}.".format(
steam_user.safety_class.name
)
if not steam_user.elo_rating:
rating_message = "You are currently unrated"
else:
rating_message = "Your current rating is {}".format(steam_user.elo_rating)
for message in welcome_message:
event.member.send_chat(
message.format(
setup_name=server.get_current_setup_name(),
player_name=event.member.name.get(),
safety_class_message=safety_class_message,
elo_rating_message=rating_message
)
)
except SteamUser.DoesNotExist:
pass
|
Python
| 0.001748
|
@@ -1396,12 +1396,8 @@
et()
- - 1
and
@@ -1741,9 +1741,9 @@
) -
-2
+1
and
@@ -2580,37 +2580,34 @@
sage in
-first_play
+lead
er_
-f
in
-ished
+_last_lap
:%0A
@@ -2766,34 +2766,37 @@
sage in
-lead
+first_play
er_
+f
in
-_last_lap
+ished
:%0A
|
946fdcc8305d7277f0de4a6ee27a3ecbecd5d02f
|
Change case of navbar names
|
source/conf.py
|
source/conf.py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
#import sphinx_bootstrap_theme
# http://www.sphinx-doc.org/en/stable/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = u' '
copyright = u'2016-2020 by Effective Quadratures'
author = u'Pranay Seshadri, Nicholas Wong, James Gross, Irene Vidris, Joe Zhou, Ashley Scillitoe'
# -- Bryn: Main Vuepress website location ------------------------------------
land_page = "http://localhost:8080"
outdir = sys.argv[-1] #Bryn: Assuming last system argument is output directory
#Compile command: "sphinx-build -b html source/ ../effective-quadratures.github.io/"
# The short X.Y version
version = u'9.0'
# The full version, including alpha/beta/rc tags
release = u'v9.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_docstemplates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.txt'
# The master toctree document.
master_doc = '_documentation/index'
#html_theme = "sphinx_rtd_theme"
#html_theme_path = ["_themes", ]
html_theme = 'eq_press'
html_logo = 'logo-5-black-text-lowres.png'
html_favicon = 'eq-logo-favicon.png'
def setup(app):
app.add_stylesheet('styles.css')
#html_theme_options = {
# 'style_nav_header_background': 'grey',
#}
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = [u'_docsbuild', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'EffectiveQuadraturesdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '12pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'EffectiveQuadratures.tex', u'Effective Quadratures Documentation',
u'Pranay Seshadri, Nicholas Wong, Henry Yuchi, Irene Virdis', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'effectivequadratures', u'Effective Quadratures Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'EffectiveQuadratures', u' ',
author, 'EffectiveQuadratures', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
#todo_include_todos = True
html_theme_options = {
# A list of tuples containing pages or urls to link to.
# Valid tuples should be in the following forms:
# (name, page) # a link to a page
# (name, "/aa/bb", 1) # a link to an arbitrary relative url
# (name, "http://example.com", True) # arbitrary absolute url
# Note the "1" or "True" value above as the third argument to indicate
# an arbitrary url.
'landing_page': land_page,
'custom_links': [
('EQuadratures', '/equadratures/'),
('EIntegrator', '/eintegrator/'),
('Documentation', '/docs/_documentation/'),
('Workshops', '/workshops/'),
('Motivation', '/motivation/'),
('Team', '/team/'),
],
'external_links': [
('Discourse', 'https://discourse.effective-quadratures.org/'),
('Github', 'https://github.com/Effective-Quadratures')
],
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "true",
}
|
Python
| 0.000001
|
@@ -6668,17 +6668,17 @@
('E
-Q
+q
uadratur
@@ -6732,17 +6732,17 @@
('E
-I
+i
ntegrato
|
40688da356ef5e086d7482824c31c90d193b39f9
|
Update conf.py
|
source/conf.py
|
source/conf.py
|
# -*- coding: utf-8 -*-
#
# Hydrology Thematic Exploitation Platform documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 10 15:34:34 2015.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo', 'sphinx.ext.pngmath', 'sphinx.ext.jsmath', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Hydrology Thematic Exploitation Platform'
copyright = u'2015, HEP consortium'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1'
# The full version, including alpha/beta/rc tags.
release = '1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'HydrologyThematicExploitationPlatformdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'HydrologyThematicExploitationPlatform.tex', u'Hydrology Thematic Exploitation Platform Documentation',
u'HEP consortium', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
Python
| 0.000001
|
@@ -1002,51 +1002,8 @@
do',
- 'sphinx.ext.pngmath', 'sphinx.ext.jsmath',
'sp
|
f9d911091f01d91485f21c01850798892ed28dd0
|
add right arrow
|
scottsright/manual_readline.py
|
scottsright/manual_readline.py
|
char_sequences = {}
def on(seq):
def add_to_char_sequences(func):
char_sequences[seq] = func
return func
return add_to_char_sequences
@on('[D')
@on('')
@on('\x02')
def left_arrow(cursor_offset, line):
return max(0, cursor_offset - 1), line
if __name__ == '__main__':
print repr(char_sequences)
|
Python
| 0.001032
|
@@ -178,27 +178,117 @@
%02')%0A
-@on('%5Cx02
+def left_arrow(cursor_offset, line):%0A return max(0, cursor_offset - 1), line%0A%0A@on('%1B%5BC')%0A@on('%06
')%0Adef
-lef
+righ
t_ar
@@ -317,36 +317,44 @@
e):%0A return m
-ax(0
+in(len(line)
, cursor_offset
@@ -345,33 +345,33 @@
, cursor_offset
--
++
1), line%0A%0Aif __
|
42fcc098fe2b89f50b28b5d158ae6eead46dd382
|
Add command to help description of timez
|
hamper/plugins/timez.py
|
hamper/plugins/timez.py
|
import requests
import json
from hamper.interfaces import ChatCommandPlugin, Command
class Timez(ChatCommandPlugin):
name = 'timez'
priority = 2
def setup(self, loader):
try:
self.api_key = loader.config['timez']['api-key']
except (KeyError, TypeError):
self.api_key = None
api_url = "http://api.worldweatheronline.com/free/v1/tz.ashx"
self.api_url = "%s?key=%s&q=%%s&format=json" % (api_url, self.api_key)
super(Timez, self).setup(loader)
class Timez(Command):
''' '''
name = 'timez'
regex = '^timez (.*)'
long_desc = short_desc = (
"Look up time for [ZIP code | City, State (US Only) | "
"City Name, State, Country | City Name, Country | "
"Airport Code | IP "
)
def command(self, bot, comm, groups):
if not self.plugin.api_key:
bot.reply(
comm, "This plugin is missconfigured. Its missing an API "
"key. Go register one at "
"http://developer.worldweatheronline.com/apps/register"
)
return
query = comm['message'].strip('timez ')
resp = requests.get(self.plugin.api_url % query)
if resp.status_code != 200:
bot.reply(comm, "Error: A non 200 status code was returned")
jresp = json.loads(resp.text)
try:
tz = jresp['data']['time_zone'][0]
bot.reply(
comm,
"For %s, local time is %s at UTC offset %s" % (
query, tz['localtime'], tz['utcOffset']
)
)
except KeyError:
bot.reply(
comm, "Sorry, the internet didn't understand your request."
)
# Always let the other plugins run
return False
|
Python
| 0.000001
|
@@ -659,16 +659,24 @@
%22
+timez -
Look up
|
3555b002aae386220bc02d662a9b188426afc08f
|
Create a specific group for the Facebook plugins - makes it a bit neater in the list of plugins.
|
cmsplugin_facebook/cms_plugins.py
|
cmsplugin_facebook/cms_plugins.py
|
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from cmsplugin_facebook import models
class BasePlugin(CMSPluginBase):
name = None
def render(self, context, instance, placeholder):
context.update({'instance': instance,
'name': self.name,
'url': instance.pageurl or \
context['request'].build_absolute_uri()})
return context
class FacebookLikeBoxPlugin(BasePlugin):
model = models.FacebookLikeBox
name = 'Facebook Like Box'
render_template = 'cmsplugin_facebook/likebox.html'
change_form_template = 'cmsplugin_facebook/likebox_change_form.html'
class FacebookLikeButtonPlugin(BasePlugin):
model = models.FacebookLikeButton
name = 'Facebook Like Button'
render_template = 'cmsplugin_facebook/likebutton.html'
change_form_template = 'cmsplugin_facebook/likebutton_change_form.html'
plugin_pool.register_plugin(FacebookLikeBoxPlugin)
plugin_pool.register_plugin(FacebookLikeButtonPlugin)
|
Python
| 0
|
@@ -564,16 +564,40 @@
ke Box'%0A
+ module = 'Facebook'%0A
rend
@@ -834,16 +834,40 @@
Button'%0A
+ module = 'Facebook'%0A
rend
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.