commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
6ab7c268d21ada1c30414551bdbb03190560ae55
|
Fix typo in runserver.py breaking debug mode.
|
runserver.py
|
runserver.py
|
#!/usr/bin/env python
import argparse
from wake import app
parser = argparse.ArgumentParser()
parser.add_argument(
'--host',
default='127.0.0.1',
help='hostname to listen on',
)
parser.add_argument(
'--port',
type=int,
default=5000,
help='port to listen on',
)
parser.add_argument(
'--debug',
type=bool,
default=True,
help='toggle tracebacks and debugger',
)
args = parser.parse_args()
app.run(host=args.host, port=args.port, debug=app.debug)
|
Python
| 0
|
@@ -474,18 +474,19 @@
debug=a
-pp
+rgs
.debug)%0A
|
c929ea6b201a5cfed6b53a81ba6dbe8dc6c3241f
|
rename password keyword to passwd
|
tests.py
|
tests.py
|
from __future__ import (absolute_import, division,
print_function, unicode_literals)
from datetime import date
from tornado.ioloop import IOLoop
from tornado.testing import AsyncTestCase, gen_test
import tornado.platform.twisted
import mysql.connector
import MySQLdb
tornado.platform.twisted.install()
from toradbapi import ConnectionPool
class MysqlConnectorConnectionPoolTestCase(AsyncTestCase):
DB_CONFIG = dict(
user='root', password='', host='127.0.0.1', port=3306,
db='test_toradbapi')
DB_DRIVER = 'mysql.connector'
DATABASE_ERROR = mysql.connector.errors.DatabaseError
PROGRAMMING_ERROR = mysql.connector.errors.ProgrammingError
def get_new_ioloop(self):
# use singleton ioloop and reactor across all tests
return IOLoop.instance()
def setUp(self):
super(MysqlConnectorConnectionPoolTestCase, self).setUp()
# create test database and test table
self.cnx = mysql.connector.connect(**self.DB_CONFIG)
self.cursor = self.cnx.cursor()
self.cursor.execute('CREATE TABLE IF NOT EXISTS `person` ('
' `name` varchar(255) NOT NULL,'
' `dob` date DEFAULT NULL,'
' UNIQUE KEY (`name`))')
# create connection pool
self.pool = ConnectionPool(self.DB_DRIVER, **self.DB_CONFIG)
def tearDown(self):
self.cursor.execute('DROP TABLE `person`')
self.cursor.close()
self.cnx.close()
self.pool.close()
super(MysqlConnectorConnectionPoolTestCase, self).tearDown()
@gen_test
def test_run_query_empty(self):
result = yield self.pool.run_query('SELECT * FROM `person`')
self.assertEqual(result, [])
@gen_test
def test_insert_select(self):
yield self.pool.run_operation(
'INSERT INTO `person` (`name`, `dob`) VALUES (%s, %s)',
('testname', date(1000, 10, 10)))
result = yield self.pool.run_query('SELECT * FROM `person`')
self.assertEqual(result, [('testname', date(1000, 10, 10))])
@gen_test
def test_transaction_error(self):
def _interaction(txn):
txn.execute(
'INSERT INTO `person` (`dob`) VALUES (%s)')
txn.execute(
'INSERT INTO `person` (`name`, `dob`) VALUES (%s, %s)',
('testname', date(1000, 10, 10)))
try:
yield self.pool.run_interaction(_interaction)
except self.PROGRAMMING_ERROR:
pass
else:
self.fail()
result = yield self.pool.run_query('SELECT * FROM `person`')
self.assertEqual(result, [])
@gen_test
def test_transaction_rollback(self):
def _interaction(txn):
txn.execute(
'INSERT INTO `person` (`name`, `dob`) VALUES (%s, %s)',
('testname', date(1000, 10, 10)))
txn.execute('SELECT * FROM `person`')
self.assertEqual(txn.fetchall(), [('testname', date(1000, 10, 10))])
txn.execute(
'INSERT INTO `person` (`name`, `dob`) VALUES (%s, %s)',
('testname', date(1000, 10, 10)))
try:
yield self.pool.run_interaction(_interaction)
except self.DATABASE_ERROR:
pass
else:
self.fail()
result = yield self.pool.run_query('SELECT * FROM `person`')
self.assertEqual(result, [])
@gen_test
def test_transaction_success(self):
def _interaction(txn):
txn.execute(
'INSERT INTO `person` (`name`, `dob`) VALUES (%s, %s)',
('testname0', date(1000, 10, 10)))
txn.execute(
'INSERT INTO `person` (`name`, `dob`) VALUES (%s, %s)',
('testname1', date(1111, 11, 11)))
yield self.pool.run_interaction(_interaction)
result = yield self.pool.run_query('SELECT * FROM `person`')
self.assertEqual(result, [
('testname0', date(1000, 10, 10)),
('testname1', date(1111, 11, 11))])
class MySQLdbConnectionPoolTestCase(MysqlConnectorConnectionPoolTestCase):
DB_DRIVER = 'MySQLdb'
DATABASE_ERROR = MySQLdb.DatabaseError
PROGRAMMING_ERROR = MySQLdb.ProgrammingError
|
Python
| 0.000157
|
@@ -468,18 +468,16 @@
', passw
-or
d='', ho
|
e69deed27176406c499cb332c76d3c9ffcecd786
|
Add test for get_dates().
|
tests.py
|
tests.py
|
#!/usr/bin/env python
# encoding: utf-8
import datetime
import unittest
import mock
import pandas as pd
import pandas_finance
from pandas.util.testing import assert_frame_equal
from nose.tools import assert_equal
class GetStockTestCase(unittest.TestCase):
@mock.patch('pandas_finance.web.DataReader')
def test_get_stock_called_correctly(self, mock_datareader):
start = datetime.datetime(1999, 4, 3, 0, 0)
end = datetime.datetime(2005, 2, 5, 0, 0)
pandas_finance.get_stock('AAPL', start, end)
mock_datareader.assert_called_with('AAPL', 'yahoo', start, end)
def test_get_required_tickers_parses_tickers_with_newline(self):
m = mock.mock_open(read_data='TWTR,FB,AAPL,MSFT\n')
textfile = None # Only used to provide valid argument.
with mock.patch('pandas_finance.open', m, create=True):
result = pandas_finance.get_required_tickers(textfile)
assert_equal('TWTR,FB,AAPL,MSFT', result)
class ScrapeStockTestCase(unittest.TestCase):
def setUp(self):
"""Run once before each test in this test class."""
self.start = datetime.datetime(2014, 04, 29).date()
self.end = self.start
input_values = {'Volume': [12033400],
'Adj Close': [592.33],
'High': [595.98],
'Low': [589.51],
'Close': [592.33],
'Open': [593.74]}
index_label = [self.start]
input_columns = ['Open', 'High', 'Low', 'Close', 'Volume', 'Adj Close']
self.input_frame = pd.DataFrame(input_values,
columns=input_columns,
index=index_label)
self.input_frame.index.name = 'Date'
output_values = input_values
# get_stock converts datetime to isoformat string.
output_values['Date'] = '2014-04-29'
output_values['Stock'] = 'AAPL'
output_columns = ['Date'] + input_columns + ['Stock']
self.output_frame = pd.DataFrame(output_values, columns=output_columns)
@mock.patch('pandas_finance.write_frame_to_sql')
@mock.patch('pandas_finance.get_stock')
def test_scrape_stock_gives_a_valid_frame(self, mock_get_stock,
mock_write_frame):
mock_get_stock.return_value = self.input_frame
pandas_finance.scrape_stock('AAPL', self.start, self.end)
# Hacky workaround:
# Can't seem to use mock.assert_called_with; problem when comparing
# dataframes, grab argument directly and compare it to expected frame.
frame_called_with = mock_write_frame.call_args_list[0][0][0]
assert_frame_equal(self.output_frame, frame_called_with)
class InstallCrontabTestCase(unittest.TestCase):
pass
|
Python
| 0
|
@@ -208,16 +208,50 @@
t_equal%0A
+from freezegun import freeze_time%0A
%0A%0Aclass
@@ -2872,8 +2872,289 @@
pass%0A
+%0A%0Aclass GetDatesTestCase(unittest.TestCase):%0A @freeze_time('2014-04-10 15:05:05')%0A def test_get_dates(self):%0A start, end = pandas_finance.get_dates()%0A assert_equal(datetime.datetime(1900, 1, 1, 0, 0), start)%0A assert_equal(datetime.datetime.today(), end)%0A
|
64605573382f1c9fb2170d0cdbcd007f5ddae8d6
|
Fix to pass all test
|
tests/players/sample/console_player_test.py
|
tests/players/sample/console_player_test.py
|
from tests.base_unittest import BaseUnitTest
from pypokerengine.players.sample.console_player import PokerPlayer as ConsolePlayer
class ConsolePlayerTest(BaseUnitTest):
def setUp(self):
self.valid_actions = [\
{'action': 'fold', 'amount': 0},\
{'action': 'call', 'amount': 10},\
{'action': 'raise', 'amount': {'max': 105, 'min': 15}}\
]
def test_declare_fold(self):
mock_input = self.__gen_raw_input_mock(['f'])
player = ConsolePlayer(mock_input)
action, amount = player.declare_action(None, self.valid_actions, None, None)
self.eq('fold', action)
self.eq(0, amount)
def test_declare_call(self):
mock_input = self.__gen_raw_input_mock(['c'])
player = ConsolePlayer(mock_input)
action, amount = player.declare_action(None, self.valid_actions, None, None)
self.eq('call', action)
self.eq(10, amount)
def test_declare_valid_raise(self):
mock_input = self.__gen_raw_input_mock(['r', '15'])
player = ConsolePlayer(mock_input)
action, amount = player.declare_action(None, self.valid_actions, None, None)
self.eq('raise', action)
self.eq(15, amount)
def test_correct_invalid_raise(self):
mock_input = self.__gen_raw_input_mock(['r', '14', '105'])
player = ConsolePlayer(mock_input)
action, amount = player.declare_action(None, self.valid_actions, None, None)
self.eq('raise', action)
self.eq(105, amount)
def __gen_raw_input_mock(self, mock_returns):
counter = []
def raw_input_wrapper(self):
mock_return = mock_returns[len(counter)]
counter.append(0)
return mock_return
return raw_input_wrapper
|
Python
| 0.000002
|
@@ -365,16 +365,685 @@
%7D%5C%0A %5D
+%0A self.round_state = %7B%0A 'dealer_btn': 1,%0A 'street': 'preflop',%0A 'seats': %5B%0A %7B'stack': 85, 'state': 'participating', 'name': u'player1', 'uuid': 'ciglbcevkvoqzguqvnyhcb'%7D,%0A %7B'stack': 100, 'state': 'participating', 'name': u'player2', 'uuid': 'zjttlanhlvpqzebrwmieho'%7D%0A %5D,%0A 'next_player': 1,%0A 'community_card': %5B%5D,%0A 'pot': %7B%0A 'main': %7B'amount': 15%7D,%0A 'side': %5B%5D%0A %7D%0A %7D%0A self.action_histories = %7B%0A 'action_histories': %5B%0A %7B'action': 'SMALLBLIND', 'amount': 5, 'add_amount': 5%7D,%0A %7B'action': 'BIGBLIND', 'amount': 10, 'add_amount': 5%7D%0A %5D%0A %7D
%0A%0A def
@@ -1148,32 +1148,61 @@
yer(mock_input)%0A
+ player.set_uuid(%22dummy%22)%0A
action, amou
@@ -1250,34 +1250,63 @@
id_actions,
-None, None
+self.round_state, self.action_histories
)%0A self.e
@@ -1459,32 +1459,61 @@
yer(mock_input)%0A
+ player.set_uuid(%22dummy%22)%0A
action, amou
@@ -1561,34 +1561,63 @@
id_actions,
-None, None
+self.round_state, self.action_histories
)%0A self.e
@@ -1784,32 +1784,61 @@
yer(mock_input)%0A
+ player.set_uuid(%22dummy%22)%0A
action, amou
@@ -1886,34 +1886,63 @@
id_actions,
-None, None
+self.round_state, self.action_histories
)%0A self.e
@@ -2123,24 +2123,53 @@
mock_input)%0A
+ player.set_uuid(%22dummy%22)%0A
action,
@@ -2229,18 +2229,47 @@
ns,
-None, None
+self.round_state, self.action_histories
)%0A
@@ -2545,8 +2545,9 @@
wrapper%0A
+%0A
|
c9cd0ed7b8d9d43c4143074489fcd5e14137b45a
|
implement list method main loop and quit action
|
queue.py
|
queue.py
|
#!/usr/bin/python3.4
# -*-coding:Utf-8 -*
'''module that contain queue class'''
from renderingTask import renderingTask
class queue:
'''class who contain the list of all the rendering task to manage'''
def __init__(self,xml=False):
'''initialize queue object with empty queue who is filled with values extract from an xml object if paste to the function'''
self.tasks = []
if xml != False:
self.fromXml(xml)
def fromXml(self,xml):
'''extract rendering task parameters from an xml object and add them to the queue'''
if xml.tag == 'queue':
for t in xml.findall('task'):
self.add(renderingTask(xml = t))
def toXmlStr(self,head=False):
'''export rendering task queue to an xml syntax string '''
txt =''
if head:
txt+= '<?xml version="1.0" encoding="UTF-8"?>\n'
txt += '<queue>\n'
for r in self.tasks:
txt += r.toXmlStr()
txt += '</queue>\n'
return txt
def add(self,added):
'''add rendering task to the queue'''
if type(added) == renderingTask:
self.tasks.append(added)
def list(self, log, scriptSetting):
'''list task and access editing functions'''
|
Python
| 0
|
@@ -112,16 +112,26 @@
ringTask
+%0Aimport os
%0A%0Aclass
@@ -1151,16 +1151,354 @@
ns'''%0A%09%09
+os.system('clear')%0A%09%09log.menuIn('Rendering Queue')%0A%09%09%0A%09%09while True:%0A%09%09%09choice = input(%22action?('q' to quit)%22).strip().lower()%0A%09%09%09%0A%09%09%09try:%0A%09%09%09%09if choice in %5B'q', 'quit', 'cancel'%5D:%0A%09%09%09%09%09choice = -1%0A%09%09%09%09else:%0A%09%09%09%09%09choice = int(choice)%0A%09%09%09except ValueError:%0A%09%09%09%09choice = -9999%0A%09%09%09%0A%09%09%09if choice == -1:%0A%09%09%09%09log.menuOut()%0A%09%09%09%09return%0A%09%09%09%0A%09%09%09%0A%09%09%09
%0A%09%0A%09%0A%09%0A%09
|
e37ef9a477c081d1dca11e99ab8d831dd4f9102a
|
fix some bug
|
radio.py
|
radio.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import getopt
import sys
from core import *
from player import Player
class radio():
def __init__(self, _player):
self.player = _player
def play(self):
pass
def forward(self):
self.player.forward()
def backward(self):
self.player.backward()
def volume_dec(self):
self.player.volume_dec()
def volume_inc(self):
self.player.volume_inc()
def pause(self):
self.player.pause()
class online_radio(radio):
def __init__(self, _player, channel_txt):
radio.__init__(self, _player)
self.channel_txt = channel_txt
def play(self):
print "open online radio channel list"
self.player.set_callback(None)
self.player.open_list(self.channel_txt)
class xiami_player(radio):
def __init__(self, _player, list_id_txt):
radio.__init__(self, _player)
# load list_id from list_id_txt
self.list_ids = [ [ int(i) for i in j.split()] for j in open(list_id_txt).readlines()]
self.list_counts = len(self.list_ids)
# bind player
logger.debug("%s"%self.list_ids)
self._list = None
self.index = 0
def play(self):
self.player.set_callback(self)
try:
if self.list_counts > 0:
if self._list and self.index >= self._list.size():
# end of album or classic jump to next
self.list_ids.append(self.list_ids.pop(0))
self._list = None
logger.debug("%s"%self.list_ids)
if not self._list:
self.index = 0
cur_id = self.list_ids[0]
if (cur_id[0] == 1):
# album
self._list = album_list(cur_id[1])
elif (cur_id[0] == 3):
# classic
self._list = classic_list(cur_id[1])
self._list.load_songs()
if self._list.size() > 0:
url = self._list.songs[self.index].url
self.index += 1
logger.info("play %s"%url)
self.player.open(url)
else:
logger.debug("list id is empty");
except:
logger.error("load song fail, list_id=%d"%self.list_id)
def forward(self):
self.play()
def backward(self):
if self.index-1 > 0:
self.index -= 2
self.play()
def next_list(self):
if self._list:
self.index = self._list.size()
self.play()
def pause(self):
# stop thread callback
if self.player.get_callback():
self.player.set_callback(None)
else:
self.player.set_callback(self)
self.player.pause()
def usage(name):
print "Usage:"
print " %s -c channel_file -l list_id_file"%name
if __name__ == "__main__":
channel_file = None
list_id_file = None
try:
opts, args = getopt.getopt(sys.argv[1:], "c:l:")
except getopt.GetoptError as err:
print err
usage(sys.argv[0])
sys.exit(1)
for o, a in opts:
if o == "-c":
channel_file = a
elif o == "-l":
list_id_file = a
else:
assert False, "unhandled option"
if not channel_file or not list_id_file:
print "not specific channel file or list id file"
usage(sys.argv[0])
sys.exit(1)
myplayer = Player()
myplayer.player_start()
radios = [online_radio(myplayer, channel_file), xiami_player(myplayer, list_id_file)]
my_radio = radios[0]
my_radio.play()
while True:
input_char = raw_input("")
if input_char == "q":
break
elif input_char == "p":
my_radio.pause()
elif input_char == ">":
my_radio.forward()
elif input_char == "<":
my_radio.backward()
elif input_char == "z":
my_radio.next_list()
elif input_char == "9":
my_radio.volume_dec()
elif input_char == "0":
my_radio.volume_inc()
elif input_char == "x":
# must restart player, player may be stuck on radio mode
myplayer.player_restart()
radios.append(radios.pop(0))
my_radio = radios[0]
my_radio.play()
myplayer.player_close()
myplayer.destroy()
|
Python
| 0.000004
|
@@ -65,16 +65,26 @@
ort sys%0A
+import os%0A
from cor
@@ -514,16 +514,55 @@
ause()%0A%0A
+ def next_list(self):%0A pass%0A%0A
class on
@@ -2442,34 +2442,9 @@
fail
-, list_id=%25d%22%25self.list_id
+%22
)%0A%0A
@@ -3605,16 +3605,172 @@
xit(1)%0A%0A
+ if not os.path.isfile(channel_file) or not os.path.isfile(list_id_file):%0A print %22file not exist%22%0A usage(sys.argv%5B0%5D)%0A sys.exit(1)%0A%0A
mypl
|
33ed99891ba95b1299000b90d03868d5652459b7
|
The RHS of epsilon rules can now be empty
|
purplex/grammar.py
|
purplex/grammar.py
|
import collections
EPSILON = '<empty>'
END_OF_INPUT = '<$>'
class Production(object):
"""Represents a grammar production rule."""
def __init__(self, rule, func):
items = rule.split(':', 1)
self.lhs = items[0].strip()
self.rhs = items[1].strip().split()
self.func = func
def __str__(self):
return '{} : {}'.format(self.lhs, ' '.join(self.rhs))
def __hash__(self):
return hash(str(self))
def __len__(self):
return len(self.rhs) - int(EPSILON in self.rhs)
class DottedRule(object):
"""Represents a "dotted rule" during closure construction."""
def __init__(self, production, pos, lookahead):
self.production = production
self.pos = pos
self.lookahead = lookahead
def __hash__(self):
return hash(repr(self))
def __repr__(self):
return '[{} : {}, {}]'.format(
self.production.lhs,
' '.join(self.production.rhs[:self.pos] + ['·']
+ self.production.rhs[self.pos:]),
self.lookahead,
)
def __eq__(self, other):
return repr(self) == repr(other)
def __len__(self):
return len(self.production)
@property
def lhs(self):
return self.production.lhs
@property
def rhs(self):
return self.production.rhs
@property
def at_end(self):
return self.pos == len(self.production.rhs)
def move_dot(self):
"""Returns the DottedRule that results from moving the dot."""
return self.__class__(self.production, self.pos + 1, self.lookahead)
class Grammar(object):
"""Represents a context-free grammar."""
def __init__(self, terminals, productions, start):
self.terminals = set(terminals)
self.nonterminals = collections.defaultdict(set)
for production in productions:
self.nonterminals[production.lhs].add(production)
# Augment the grammar to have a definite start symbol
self.start_symbol = 'START_{}'.format(start)
self.start = Production("{} : {}".format(self.start_symbol, start),
lambda a: a)
self.nonterminals[self.start_symbol].add(self.start)
self._first = collections.defaultdict(set)
self._compute_first()
self._follow = collections.defaultdict(set)
self._compute_follow()
def first(self, symbols):
"""Computes the intermediate FIRST set using symbols."""
ret = set()
if EPSILON in symbols:
return set([EPSILON])
for symbol in symbols:
ret |= self._first[symbol] - set([EPSILON])
if EPSILON not in self._first[symbol]:
break
else:
ret.add(EPSILON)
return ret
def _compute_first(self):
"""Computes the FIRST set for every symbol in the grammar.
Tenatively based on _compute_first in PLY.
"""
for terminal in self.terminals:
self._first[terminal].add(terminal)
self._first[END_OF_INPUT].add(END_OF_INPUT)
while True:
changed = False
for nonterminal, productions in self.nonterminals.items():
for production in productions:
new_first = self.first(production.rhs)
if new_first - self._first[nonterminal]:
self._first[nonterminal] |= new_first
changed = True
if not changed:
break
def _compute_follow(self):
"""Computes the FOLLOW set for every non-terminal in the grammar.
Tenatively based on _compute_follow in PLY.
"""
self._follow[self.start_symbol].add(END_OF_INPUT)
while True:
changed = False
for nonterminal, productions in self.nonterminals.items():
for production in productions:
for i, symbol in enumerate(production.rhs):
if symbol not in self.nonterminals:
continue
first = self.first(production.rhs[i + 1:])
new_follow = first - set([EPSILON])
if EPSILON in first or i == (len(production.rhs) - 1):
new_follow |= self._follow[nonterminal]
if new_follow - self._follow[symbol]:
self._follow[symbol] |= new_follow
changed = True
if not changed:
break
def initial_closure(self):
"""Computes the initial closure using the START_foo production."""
first_rule = DottedRule(self.start, 0, END_OF_INPUT)
return self.closure([first_rule])
def goto(self, rules, symbol):
"""Computes the next closure for rules based on the symbol we got.
Args:
rules - an iterable of DottedRules
symbol - a string denoting the symbol we've just seen
Returns: frozenset of DottedRules
"""
return self.closure(
{rule.move_dot() for rule in rules
if not rule.at_end and rule.rhs[rule.pos] == symbol},
)
def closure(self, rules):
"""Fills out the entire closure based on some initial dotted rules.
Args:
rules - an iterable of DottedRules
Returns: frozenset of DottedRules
"""
closure = set(rules)
while True:
changed = False
for rule in closure.copy():
# If the dot is at the end, there's no need to process it.
if rule.at_end:
continue
symbol = rule.rhs[rule.pos]
for production in self.nonterminals[symbol]:
rest = rule.production.rhs[rule.pos + 1:]
rest.append(rule.lookahead)
for first in self.first(rest):
if EPSILON in production.rhs:
# Move immediately to the end if the production
# goes to epsilon
new_rule = DottedRule(production, 1, first)
else:
new_rule = DottedRule(production, 0, first)
if new_rule not in closure:
closure.add(new_rule)
changed = True
if not changed:
break
return frozenset(closure)
def closures(self):
"""Computes all LR(1) closure sets for the grammar."""
initial = self.initial_closure()
closures = collections.OrderedDict()
goto = collections.defaultdict(dict)
todo = set([initial])
while todo:
closure = todo.pop()
closures[closure] = closure
symbols = {rule.rhs[rule.pos] for rule in closure
if not rule.at_end}
for symbol in symbols:
next_closure = self.goto(closure, symbol)
if next_closure in closures or next_closure in todo:
next_closure = (closures.get(next_closure)
or todo.get(next_closure))
else:
closures[next_closure] = next_closure
todo.add(next_closure)
goto[closure][symbol] = next_closure
return initial, closures, goto
|
Python
| 0.999251
|
@@ -281,16 +281,29 @@
.split()
+ or %5BEPSILON%5D
%0A
|
1bafd352110d186d1371d14714abd8de7e6e590f
|
Update prompt
|
pwndbg/__init__.py
|
pwndbg/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import gdb
import pwndbg.arch
import pwndbg.arguments
import pwndbg.disasm
import pwndbg.disasm.arm
import pwndbg.disasm.jump
import pwndbg.disasm.mips
import pwndbg.disasm.ppc
import pwndbg.disasm.sparc
import pwndbg.disasm.x86
import pwndbg.vmmap
import pwndbg.dt
import pwndbg.memory
import pwndbg.inthook
import pwndbg.elf
import pwndbg.proc
import pwndbg.regs
import pwndbg.stack
import pwndbg.stdio
import pwndbg.color
import pwndbg.typeinfo
import pwndbg.constants
import pwndbg.argv
import pwndbg.commands
import pwndbg.commands.hexdump
import pwndbg.commands.context
import pwndbg.commands.telescope
import pwndbg.commands.vmmap
import pwndbg.commands.dt
import pwndbg.commands.search
import pwndbg.commands.start
import pwndbg.commands.procinfo
import pwndbg.commands.auxv
import pwndbg.commands.windbg
import pwndbg.commands.ida
import pwndbg.commands.reload
import pwndbg.commands.rop
import pwndbg.commands.shell
import pwndbg.commands.aslr
import pwndbg.commands.misc
import pwndbg.commands.next
import pwndbg.commands.dumpargs
import pwndbg.commands.cpsr
import pwndbg.commands.argv
import pwndbg.commands.heap
__all__ = [
'arch',
'auxv',
'chain',
'color',
'compat',
'disasm',
'dt',
'elf',
'enhance',
'events',
'file',
'function',
'hexdump',
'ida',
'info',
'linkmap',
'malloc',
'memoize',
'memory',
'proc',
'regs',
'remote',
'search',
'stack',
'strings',
'symbol',
'typeinfo',
'ui',
'vmmap'
]
prompt = "pwn> "
prompt = "\x01" + prompt + "\x02" # SOH + prompt + STX
prompt = pwndbg.color.red(prompt)
prompt = pwndbg.color.bold(prompt)
pre_commands = """
set confirm off
set verbose off
set output-radix 0x10
set prompt %s
set height 0
set history expansion on
set history save on
set disassembly-flavor intel
set follow-fork-mode child
set backtrace past-main on
set step-mode on
set print pretty on
set width 0
set print elements 15
set input-radix 16
handle SIGALRM nostop print nopass
handle SIGSEGV stop print nopass
""".strip() % prompt
for line in pre_commands.strip().splitlines():
gdb.execute(line)
msg = "Loaded %i commands. Type pwndbg for a list." % len(pwndbg.commands._Command.commands)
print(pwndbg.color.red(msg))
@pwndbg.memoize.reset_on_stop
def prompt_hook(*a):
with pwndbg.stdio.stdio:
pwndbg.commands.context.context()
gdb.prompt_hook = prompt_hook
|
Python
| 0.000001
|
@@ -1464,16 +1464,19 @@
t = %22pwn
+dbg
%3E %22%0Aprom
|
ea2faeb88d2b6ddc98a9a10c760574dca993673c
|
change func1 to accept args
|
py/decorator_ex.py
|
py/decorator_ex.py
|
def entryExitFunc(f):
def newFunc():
print "inside decorator function"
print "entering", f.__name__
f()
print "exited", f.__name__
return newFunc
class entryExit(object):
def __init__(self, f):
'''
If there are no decorator arguments, the function to be decorated is passed
to the constructor.
'''
self.f = f
'''
The major constraint on the result of a decorator is that it be callable.
The __call__ method here achieves that.
'''
'''
The __call__ method is not called until the decorated function is called.
'''
def __call__(self):
print "entering", self.f.__name__
self.f()
print "exited", self.f.__name__
@entryExit
def func1():
print "inside function 1"
@entryExit
def func2():
print "inside function 2"
@entryExitFunc
def func3():
print "inside function 3"
if __name__ == "__main__":
func1()
print '\n'
func2()
print '\n'
func3()
|
Python
| 0.000001
|
@@ -212,35 +212,8 @@
):%0A%0A
- def __init__(self, f):%0A
@@ -336,22 +336,84 @@
- self.f = f
+def __init__(self, f):%0A self.f = f%0A print %22entryExit.__init__%22
%0A%0A
@@ -417,24 +417,30 @@
%0A '''%0A
+ Note:
The major c
@@ -669,18 +669,60 @@
l__(self
-):
+, *args):%0A print %22entryExit.__call__%22
%0A
@@ -771,16 +771,21 @@
self.f(
+*args
)%0A
@@ -840,16 +840,26 @@
f func1(
+a1, a2, a3
):%0A p
@@ -882,16 +882,53 @@
ction 1%22
+%0A print %22spell args: %22, a1, a2, a3
%0A%0A@entry
@@ -974,16 +974,36 @@
ction 2%22
+%0A print %22no args%22
%0A%0A@entry
@@ -1094,16 +1094,98 @@
func1(
+%22test%22, %22multiple%22, %22args%22)%0A print '%5Cn'%0A func1(%22another%22, %22round%22, %22of args%22
)%0A pr
@@ -1232,8 +1232,51 @@
func3()%0A
+%0A print '%5Cn'%0A print %22end of example%22%0A
|
e40205bb8a3267396bc8d40a7e83779344866d15
|
Add support for Pages.
|
pybossa/hateoas.py
|
pybossa/hateoas.py
|
# -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
"""Hateoas module for PYBOSSA."""
from flask import url_for
class Hateoas(object):
"""Hateoas class."""
def link(self, rel, title, href):
"""Return hateoas link."""
return "<link rel='%s' title='%s' href='%s'/>" % (rel, title, href)
def create_link(self, item_id, title, rel='self'):
"""Create hateoas link."""
# title = item.__class__.__name__.lower()
method = ".api_%s" % title
href = url_for(method, oid=item_id, _external=True)
return self.link(rel, title, href)
def create_links(self, item):
"""Create Hateoas links."""
cls = item.__class__.__name__.lower()
links = []
if cls == 'result':
link = self. create_link(item.id, title='result')
if item.project_id is not None:
links.append(self.create_link(item.project_id, title='project',
rel='parent'))
if item.task_id is not None:
links.append(self.create_link(item.task_id, title='task',
rel='parent'))
return links, link
elif cls == 'taskrun':
link = self.create_link(item.id, title='taskrun')
if item.project_id is not None:
links.append(self.create_link(item.project_id,
title='project', rel='parent'))
if item.task_id is not None:
links.append(self.create_link(item.task_id,
title='task', rel='parent'))
return links, link
elif cls == 'task':
link = self.create_link(item.id, title='task')
if item.project_id is not None:
links = [self.create_link(item.project_id,
title='project', rel='parent')]
return links, link
elif cls == 'category':
return None, self.create_link(item.id, title='category')
elif cls == 'project':
link = self.create_link(item.id, title='project')
if item.category_id is not None:
links.append(self.create_link(item.category_id,
title='category', rel='category'))
return links, link
elif cls == 'user':
link = self.create_link(item.id, title='user')
# TODO: add the projects created by the user as the
# links with rel=? (maybe 'project'??)
return None, link
elif cls == 'blogpost':
link = self.create_link(item.id, title='blogpost')
if item.project_id is not None:
links = [self.create_link(item.project_id,
title='project', rel='parent')]
return links, link
elif cls == 'announcement':
return None, self.create_link(item.id, title='announcement')
elif cls == 'helpingmaterial':
link = self.create_link(item.id, title='helpingmaterial')
if item.project_id is not None:
links = [self.create_link(item.project_id,
title='project', rel='parent')]
return links, link
elif cls == 'projectstats':
link = self.create_link(item.id, title='projectstats')
if item.project_id is not None:
links = [self.create_link(item.project_id,
title='project', rel='parent')]
return links, link
else: # pragma: no cover
return False
def remove_links(self, item):
"""Remove HATEOAS link and links from item."""
if item.get('link'):
item.pop('link')
if item.get('links'):
item.pop('links')
return item
|
Python
| 0
|
@@ -4003,32 +4003,296 @@
rel='parent')%5D%0A
+ elif cls == 'page':%0A link = self.create_link(item.id, title='page')%0A if item.project_id is not None:%0A links = %5Bself.create_link(item.project_id,%0A title='project', rel='parent')%5D%0A
retu
|
90e87e8d06393e8049b26299d3295a2ff46da521
|
switch homepage to dashboard
|
seqr/urls.py
|
seqr/urls.py
|
"""seqr URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
"""
from seqr.views.apis.dataset_api import add_dataset_handler
from settings import ENABLE_DJANGO_DEBUG_TOOLBAR
from django.conf.urls import url, include
from seqr.views.apis.family_api import \
update_family_field_handler, \
edit_families_handler, \
delete_families_handler
from seqr.views.apis.individual_api import \
update_individual_field_handler, \
edit_individuals_handler, \
delete_individuals_handler, \
receive_individuals_table_handler, \
save_individuals_table_handler
from seqr.views.apis.phenotips_api import \
proxy_to_phenotips_handler, \
phenotips_pdf_handler, \
phenotips_edit_handler
from seqr.views.apis.case_review_api import \
save_case_review_status, \
save_internal_case_review_notes, \
save_internal_case_review_summary
from seqr.views.pages.case_review_page import \
case_review_page, \
case_review_page_data, \
export_case_review_families_handler, \
export_case_review_individuals_handler
from seqr.views.pages.dashboard_page import \
dashboard_page, \
dashboard_page_data, \
export_projects_table_handler
from seqr.views.pages.project_page import \
project_page, \
project_page_data, \
export_project_families_handler, \
export_project_individuals_handler
from seqr.views.pages.staff.staff_pages import \
staff_dashboard, \
users_page, \
discovery_sheet
from seqr.views.pages.variant_search_page import \
variant_search_page, \
variant_search_page_data
from seqr.views.apis.awesomebar_api import awesomebar_autocomplete_handler
from seqr.views.apis.auth_api import login_required_error, API_LOGIN_REQUIRED_URL
from seqr.views.apis.project_api import create_project_handler, update_project_handler, delete_project_handler
from seqr.views.apis.project_categories_api import update_project_categories_handler
from seqr.views.apis.variant_search_api import query_variants_handler
page_endpoints = {
'dashboard': {
'html': dashboard_page,
'initial_json': dashboard_page_data,
},
'project/(?P<project_guid>[^/]+)/project_page': {
'html': project_page,
'initial_json': project_page_data,
},
'project/(?P<project_guid>[^/]+)/case_review': {
'html': case_review_page,
'initial_json': case_review_page_data,
},
'(project/(?P<project_guid>[^/]+)/)?(family/(?P<family_guid>[^/]+)/)?variant_search': {
'html': variant_search_page,
'initial_json': variant_search_page_data,
},
}
# NOTE: the actual url will be this with an '/api' prefix
api_endpoints = {
'individuals/save_case_review_status': save_case_review_status,
'individual/(?P<individual_guid>[\w.|-]+)/update/(?P<field_name>[\w.|-]+)': update_individual_field_handler,
'family/(?P<family_guid>[\w.|-]+)/save_internal_case_review_notes': save_internal_case_review_notes,
'family/(?P<family_guid>[\w.|-]+)/save_internal_case_review_summary': save_internal_case_review_summary,
'family/(?P<family_guid>[\w.|-]+)/update/(?P<field_name>[\w.|-]+)': update_family_field_handler,
'dashboard/export_projects_table': export_projects_table_handler,
'project/(?P<project_guid>[^/]+)/export_case_review_families': export_case_review_families_handler,
'project/(?P<project_guid>[^/]+)/export_case_review_individuals': export_case_review_individuals_handler,
'project/(?P<project_guid>[^/]+)/export_project_families': export_project_families_handler,
'project/(?P<project_guid>[^/]+)/export_project_individuals': export_project_individuals_handler,
'project/create_project': create_project_handler,
'project/(?P<project_guid>[^/]+)/update_project': update_project_handler,
'project/(?P<project_guid>[^/]+)/delete_project': delete_project_handler,
'project/(?P<project_guid>[^/]+)/update_project_categories': update_project_categories_handler,
'project/(?P<project_guid>[^/]+)/query_variants': query_variants_handler,
'project/(?P<project_guid>[^/]+)/edit_families': edit_families_handler,
'project/(?P<project_guid>[^/]+)/delete_families': delete_families_handler,
'project/(?P<project_guid>[^/]+)/edit_individuals': edit_individuals_handler,
'project/(?P<project_guid>[^/]+)/delete_individuals': delete_individuals_handler,
'project/(?P<project_guid>[^/]+)/upload_individuals_table': receive_individuals_table_handler,
'project/(?P<project_guid>[^/]+)/save_individuals_table/(?P<upload_file_id>[^/]+)': save_individuals_table_handler,
'project/(?P<project_guid>[^/]+)/add_dataset': add_dataset_handler,
'awesomebar': awesomebar_autocomplete_handler,
}
# page templates
urlpatterns = []
for url_endpoint, handler_functions in page_endpoints.items():
urlpatterns.append( url("^%(url_endpoint)s$" % locals() , handler_functions['html']) )
urlpatterns.append( url("^api/%(url_endpoint)s$" % locals() , handler_functions['initial_json']) )
# api
for url_endpoint, handler_function in api_endpoints.items():
urlpatterns.append( url("^api/%(url_endpoint)s$" % locals(), handler_function) )
# login redirect for ajax calls
urlpatterns += [
url(API_LOGIN_REQUIRED_URL.lstrip('/'), login_required_error)
]
# phenotips urls
phenotips_urls = '^(?:%s)' % ('|'.join([
'ssx', 'skin', 'skins', 'get', 'lock', 'preview', 'download', 'export',
'XWiki', 'cancel', 'resources', 'rollback', 'rest', 'webjars', 'bin', 'jsx'
]))
urlpatterns += [
url(phenotips_urls, proxy_to_phenotips_handler, name='proxy_to_phenotips'),
]
urlpatterns += [
url('project/(?P<project_guid>[^/]+)/patient/(?P<patient_id>[^/]+)/phenotips_pdf', phenotips_pdf_handler),
url('project/(?P<project_guid>[^/]+)/patient/(?P<patient_id>[^/]+)/phenotips_edit', phenotips_edit_handler),
]
#urlpatterns += [
# url("^api/v1/%(url_endpoint)s$" % locals(), handler_function) for url_endpoint, handler_function in api_endpoints.items()]
# other staff-only endpoints
urlpatterns += [
url("^staff/?$", staff_dashboard, name="staff_dashboard"),
url("^staff/users/?", users_page, name="users_page"),
url("^staff/discovery_sheet/?(?P<project_guid>[^/]+)?/?", discovery_sheet, name="discovery_sheet"),
]
urlpatterns += [
url(r'^hijack/', include('hijack.urls')),
]
# django debug toolbar
if ENABLE_DJANGO_DEBUG_TOOLBAR:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
Python
| 0.000001
|
@@ -2118,26 +2118,20 @@
= %7B%0A
-'dashboard
+r'%5E$
': %7B%0A
|
b553029859b55db2963b15694f5f17714ac8c079
|
Update Futures_demo.py
|
examples/Futures_demo.py
|
examples/Futures_demo.py
|
import matplotlib.pyplot as plt
import OnePy as op
####### Strategy Demo
class MyStrategy(op.StrategyBase):
# 可用参数:
# list格式: self.cash, self.position, self.margin,
# self.total, self.unre_profit
def __init__(self,marketevent):
super(MyStrategy,self).__init__(marketevent)
def prenext(self):
# print sum(self.re_profit)
# print self.unre_profit[-1]
pass
def next(self):
"""这里写主要的策略思路"""
if self.i.SMA(period=5, index=-1) > self.i.SMA(period=10,index=-1):
self.Buy(2)
else:
self.Sell(1)
go = op.OnePiece()
data = op.Futures_CSVFeed(datapath='../data/IF0000_1min.csv',instrument='IF0000',
fromdate='2010-04-19',todate='2010-04-20',
timeframe=1)
data_list = [data]
portfolio = op.PortfolioBase
strategy = MyStrategy
broker = op.SimulatedBroker
go.set_backtest(data_list,[strategy],portfolio,broker,'Futures') # 期货模式
go.set_commission(commission=15,margin=0.13,mult=10,commtype='FIX') # 固定手续费
# go.set_commission(commission=0.00025,margin=0.15,mult=10,commtype='PCT') # 百分比手续费
go.set_cash(100000) # 设置初始资金
# go.set_pricetype(‘close’) # 设置成交价格为close,若不设置,默认为open
# go.set_notify() # 打印交易日志
go.sunny() # 开始启动策略
# print go.get_tlog() # 打印交易记录
go.plot(instrument='IF0000')
# 简易的画图,将后面想要画的选项后面的 1 删掉即可
# go.oldplot(['un_profit','re_profit','position1','cash1','total','margin1','avg_price1'])
|
Python
| 0.000001
|
@@ -793,46 +793,8 @@
-20'
-,%0A timeframe=1
)%0A%0A%0A
@@ -1154,16 +1154,18 @@
h(100000
+00
)
|
a49c58450316fd87676da9ca8b58acc71b1062b9
|
Removed import of __future__ print
|
pydy/viz/server.py
|
pydy/viz/server.py
|
#!/usr/bin/env python
from __future__ import print_function
import os
import signal
import socket
import webbrowser
import BaseHTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
__all__ = ['Server']
class StoppableHTTPServer(BaseHTTPServer.HTTPServer):
"""
Overrides BaseHTTPServer.HTTPServer to include a stop
function.
"""
def server_bind(self):
BaseHTTPServer.HTTPServer.server_bind(self)
self.socket.settimeout(1)
self.run = True
def get_request(self):
while self.run:
try:
sock, addr = self.socket.accept()
sock.settimeout(None)
return (sock, addr)
except socket.timeout:
pass
def stop(self):
self.run = False
def serve(self):
while self.run:
try:
self.handle_request()
except TypeError:
# When server is being closed, while loop can run once
# after setting self.run = False depending on how it
# is scheduled.
pass
class Server(object):
"""
Parameters
----------
:param port : integer
Defines the port on which the server will run.
:param scene_file : name of the scene_file generated for visualization
Used here to display the url
:param directory : path of the directory which contains static and scene files.
Server is started in this directory itself.
Example
-------
>>> server = Server(scene_file=_scene_json_file)
>>> server.run_server()
"""
def __init__(self, port=8000, scene_file="Null"):
self.port = port
self.scene_file = scene_file
self.directory = "static/"
def run_server(self):
# Change dir to static first.
os.chdir(self.directory)
handler_class = SimpleHTTPRequestHandler
server_class = StoppableHTTPServer
protocol = "HTTP/1.0"
server_address = ('127.0.0.1', self.port)
handler_class.protocol_version = protocol
self.httpd = server_class(server_address, handler_class)
sa = self.httpd.socket.getsockname()
print("Serving HTTP on", sa[0], "port", sa[1], "...")
print("To view visualization, open:\n")
url = "http://localhost:"+str(sa[1]) + "/index.html?load=" + \
self.scene_file
print(url)
webbrowser.open(url)
print("Hit Ctrl+C to stop the server...")
signal.signal(signal.SIGINT, self._stop_server)
self.httpd.serve()
def _stop_server(self, signal, frame):
"""
Confirms and stops the visulisation server
:param signal:
Required by signal.signal
:param frame:
Required by signal.signal
"""
res = raw_input("Shutdown this visualization server ([y]/n)? ")
if res is (None or 'y'):
print("Shutdown confirmed")
print("Shutting down server...")
self.httpd.stop()
else:
print("Resuming operations...")
|
Python
| 0.998496
|
@@ -20,47 +20,8 @@
on%0A%0A
-from __future__ import print_function%0A%0A
impo
|
0ed07211d62044a42e1b0ff024f8feb20435270d
|
Use strings for IDs in Committee Popolo
|
pombola/south_africa/views/api.py
|
pombola/south_africa/views/api.py
|
from django.http import JsonResponse
from django.views.generic import ListView
from pombola.core.models import Organisation
# Output Popolo JSON suitable for WriteInPublic for any committees that have an
# email address.
class CommitteesPopoloJson(ListView):
queryset = Organisation.objects.filter(
kind__name='National Assembly Committees',
contacts__kind__slug='email'
)
def render_to_response(self, context, **response_kwargs):
return JsonResponse(
{
'persons': [
{
'id': committee.id,
'name': committee.name,
'email': committee.contacts.filter(kind__slug='email')[0].value,
'contact_details': []
}
for committee in context['object_list']
]
}
)
|
Python
| 0.000001
|
@@ -580,16 +580,20 @@
'id':
+str(
committe
@@ -596,16 +596,17 @@
ittee.id
+)
,%0A
|
1cff212270e0ac0f23df5f788e0bf10426b83529
|
Fix email admin: HTML body is not being displayed (#418)
|
post_office/sanitizer.py
|
post_office/sanitizer.py
|
from django.utils.html import mark_safe, format_html
from django.utils.translation import gettext_lazy
try:
import bleach
except ImportError:
# if bleach is not installed, render HTML as escaped text to prevent XSS attacks
heading = gettext_lazy("Install 'bleach' to render HTML properly.")
clean_html = lambda body: format_html('<p><em>{heading}</em></p>\n<div>{body}</div>',
heading=heading, body=body)
else:
styles = [
'border', 'border-top', 'border-right', 'border-bottom', 'border-left',
'border-radius',
'box-shadow',
'height',
'margin', 'margin-top', 'margin-right', 'margin-bottom', 'margin-left',
'padding', 'padding-top', 'padding-right', 'padding-bottom', 'padding-left',
'width',
'max-width',
'min-width',
'border-collapse',
'border-spacing',
'caption-side',
'empty-cells',
'table-layout',
'direction',
'font',
'font-family',
'font-style',
'font-variant',
'font-size',
'font-weight',
'letter-spacing',
'line-height',
'text-align',
'text-decoration',
'text-indent',
'text-overflow',
'text-shadow',
'text-transform',
'white-space',
'word-spacing',
'word-wrap',
'vertical-align',
'color',
'background',
'background-color',
'background-image',
'background-position',
'background-repeat',
'bottom',
'clear',
'cursor',
'display',
'float',
'left',
'opacity',
'outline',
'overflow',
'position',
'resize',
'right',
'top',
'visibility',
'z-index',
'list-style-position',
'list-style-tyle',
]
tags=[
'a',
'abbr',
'acronym',
'b',
'blockquote',
'br',
'caption',
'center',
'code',
'em',
'div',
'font',
'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'head',
'hr',
'i',
'img',
'label',
'li',
'ol',
'p',
'pre',
'span',
'strong',
'table', 'tbody', 'tfoot', 'td', 'th', 'thead', 'tr',
'u',
'ul',
]
attributes={
'a': ['class', 'href', 'id', 'style', 'target'],
'abbr': ['class', 'id', 'style'],
'acronym': ['class', 'id', 'style'],
'b': ['class', 'id', 'style'],
'blockquote': ['class', 'id', 'style'],
'br': ['class', 'id', 'style'],
'caption': ['class', 'id', 'style'],
'center': ['class', 'id', 'style'],
'code': ['class', 'id', 'style'],
'em': ['class', 'id', 'style'],
'div': ['class', 'id', 'style', 'align', 'dir'],
'font': ['class', 'id', 'style', 'color', 'face', 'size'],
'h1': ['class', 'id', 'style', 'align', 'dir'],
'h2': ['class', 'id', 'style', 'align', 'dir'],
'h3': ['class', 'id', 'style', 'align', 'dir'],
'h4': ['class', 'id', 'style', 'align', 'dir'],
'h5': ['class', 'id', 'style', 'align', 'dir'],
'h6': ['class', 'id', 'style', 'align', 'dir'],
'head': ['dir', 'lang'],
'hr': ['align', 'size', 'width'],
'i': ['class', 'id', 'style'],
'img': ['class', 'id', 'style', 'align', 'border', 'height', 'hspace', 'src', 'usemap', 'vspace', 'width'],
'label': ['class', 'id', 'style'],
'li': ['class', 'id', 'style', 'dir', 'type'],
'ol': ['class', 'id', 'style', 'dir', 'type'],
'p': ['class', 'id', 'style', 'align', 'dir'],
'pre': ['class', 'id', 'style'],
'span': ['class', 'id', 'style'],
'strong': ['class', 'id', 'style'],
'table': ['class', 'id', 'style', 'align', 'bgcolor', 'border', 'cellpadding', 'cellspacing', 'dir', 'frame', 'rules', 'width'],
'tbody': ['class', 'id', 'style'],
'tfoot': ['class', 'id', 'style'],
'td': ['class', 'id', 'style', 'abbr', 'align', 'bgcolor', 'colspan', 'dir', 'height', 'lang', 'rowspan', 'scope', 'style', 'valign', 'width'],
'th': ['class', 'id', 'style', 'abbr', 'align', 'background', 'bgcolor', 'colspan', 'dir', 'height', 'lang', 'scope', 'style', 'valign', 'width'],
'thead': ['class', 'id', 'style'],
'tr': ['class', 'id', 'style', 'align', 'bgcolor', 'dir', 'style', 'valign'],
'u': ['class', 'id', 'style'],
'ul': ['class', 'id', 'style', 'dir', 'type'],
},
try:
from bleach.css_sanitizer import CSSSanitizer
css_sanitizer = CSSSanitizer(
allowed_css_properties=styles,
)
clean_html = lambda body: mark_safe(bleach.clean(
body,
tags=tags,
attributes=attributes,
strip=True,
strip_comments=True,
css_sanitizer=css_sanitizer,
))
except ModuleNotFoundError:
# if bleach version is prior to 5.0.0
clean_html = lambda body: mark_safe(bleach.clean(
body,
tags=tags,
attributes=attributes,
strip=True,
strip_comments=True,
styles=styles,
))
|
Python
| 0
|
@@ -4655,17 +4655,16 @@
%5D,%0A %7D
-,
%0A try
|
c606e5d1481ae82072cccd4b9edb6b7d73933277
|
update version in preparation for release
|
pyface/__init__.py
|
pyface/__init__.py
|
#------------------------------------------------------------------------------
# Copyright (c) 2005-2011, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
# Thanks for using Enthought open source!
#
# Author: Enthought, Inc.
# Description: <Enthought pyface package component>
#------------------------------------------------------------------------------
""" Reusable MVC-based components for Traits-based applications.
Part of the TraitsGUI project of the Enthought Tool Suite.
"""
__version__ = '4.0.1'
__requires__ = [
'traits',
]
|
Python
| 0
|
@@ -792,11 +792,11 @@
'4.
-0.1
+1.0
'%0A%0A_
|
3429e18dd112f4c5058d0e27662379c2860baded
|
Fix indentation on string_methods.py
|
exercises/concept/little-sisters-essay/string_methods.py
|
exercises/concept/little-sisters-essay/string_methods.py
|
def capitalize_title(title):
"""
:param title: str title string that needs title casing
:return: str title string in title case (first letters capitalized)
"""
pass
def check_sentence_ending(sentence):
"""
:param sentence: str a sentence to check.
:return: bool True if punctuated correctly with period, False otherwise.
"""
pass
def clean_up_spacing(sentence):
"""
:param sentence: str a sentence to clean of leading and trailing space characters.
:return: str a sentence that has been cleaned of leading and trailing space characters.
"""
pass
def replace_word_choice(sentence, old_word, new_word):
"""
:param sentence: str a sentence to replace words in.
:param new_word: str replacement word
:param old_word: str word to replace
:return: str input sentence with new words in place of old words
"""
pass
|
Python
| 0.998856
|
@@ -676,24 +676,25 @@
%22%22%22%0A%0A
+
:param sente
@@ -733,24 +733,25 @@
ords in.%0A
+
:param new_w
@@ -776,16 +776,17 @@
nt word%0A
+
:para
@@ -820,16 +820,17 @@
lace%0A
+
:return:
@@ -887,16 +887,17 @@
d words%0A
+
%22%22%22%0A%0A
|
d779c126e922b6b9907100ac4fc75de9d085b98a
|
Revert "Update runc4.py"
|
MR-OCP/MROCPdjango/ocpipeline/procs/runc4.py
|
MR-OCP/MROCPdjango/ocpipeline/procs/runc4.py
|
#!/usr/bin/env python
# Copyright 2014 Open Connectome Project (http://openconnecto.me)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# runc4.py
# Created by Greg Kiar on 2015-05-28.
# Email: TODO GK
import argparse
def runc4(nifti_paths, b_paths, opts, email):
print "I'm running!"
pass # TODO GK
# parse inputs
#forge list files
#call m2g using qsub and list files as commandline args
def main():
#runc4(niftis, bs, opts, email)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -805,186 +805,4 @@
GK%0A
- # parse inputs%0A #forge list files%0A #call m2g using qsub and list files as commandline args%0A%0Adef main():%0A %0A #runc4(niftis, bs, opts, email)%0A%0Aif __name__ == '__main__':%0A main()%0A
|
217d1f94f03b5cda709798dda98380362b937bd3
|
update comment
|
samples/python/topology/games/fizz_buzz.py
|
samples/python/topology/games/fizz_buzz.py
|
from streamsx.topology.topology import Topology
import streamsx.topology.context
import fizz_buzz_functions
def main():
"""
Plays Fizz Buzz (https://en.wikipedia.org/wiki/Fizz_buzz)
Example:
python3 fizz_buzz.py
Output:
1
2
Fizz!
4
Buzz!
Fizz!
7
8
Fizz!
Buzz!
11
Fizz!
13
14
FizzBuzz!
...
"""
topo = Topology("fizz_buzz")
# Declare an stream of int values
counting = topo.source(fizz_buzz_functions.int_tuples)
# Print the tuples to standard output
play_fizz_buzz(counting).print()
# At this point the streaming topology (streaming) is
# declared, but no data is flowing. The topology
# must be submitted to a context to be executed.
# execute the topology by submitting to a standalone context
streamsx.topology.context.submit("STANDALONE", topo.graph)
def play_fizz_buzz(counting):
"""
Return a stream that plays Fizz Buzz based
upon the values in the input stream.
Transform an input stream of integers to a
stream of strings that follows
the Fizz Buzz rules based upon each value in the
input stream.
Args:
counting: input stream
Returns:
transformed output stream
"""
shouts = counting.transform(fizz_buzz_functions.fizz_buzz)
return shouts
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -503,17 +503,16 @@
eclare a
-n
stream
|
13afade2f33e2e99127526777585357a405983ff
|
Remove unused comments & add weights check
|
sklearn_porter/classifier/KNeighborsClassifier/__init__.py
|
sklearn_porter/classifier/KNeighborsClassifier/__init__.py
|
from .. import Classifier
class KNeighborsClassifier(Classifier):
"""
See also
--------
sklearn.neighbors.KNeighborsClassifier
http://scikit-learn.org/0.18/modules/generated/sklearn.neighbors.KNeighborsClassifier.html
"""
SUPPORTED_METHODS = ['predict']
# @formatter:off
TEMPLATES = {
'java': {
'type': '{0}',
'arr': '{{{0}}}',
'arr[]': '{type}[] {name} = {{{values}}};',
'arr[][]': '{type}[][] {name} = {{{values}}};',
'indent': ' ',
},
}
# @formatter:on
def __init__(
self, language='java', method_name='predict', class_name='Tmp'):
super(KNeighborsClassifier, self).__init__(language, method_name, class_name)
def port(self, model):
"""
Port a trained model to the syntax of a chosen programming language.
Parameters
----------
:param model : KNeighborsClassifier
An instance of a trained KNeighborsClassifier classifier.
"""
super(self.__class__, self).port(model)
self.n_classes = len(self.model.classes_)
self.n_templates = len(self.model._fit_X)
self.n_features = len(self.model._fit_X[0])
self.n_neighbors = self.model.n_neighbors
self.algorithm = self.model.algorithm
self.power_param = self.model.p
if self.algorithm is not 'brute':
from sklearn.neighbors.kd_tree import KDTree
from sklearn.neighbors.ball_tree import BallTree
tree = self.model._tree
if isinstance(tree, (KDTree, BallTree)):
self.tree = tree
self.metric = self.model.metric
# print('algorithm', self.model.algorithm)
# print('classes_', self.model.classes_)
# print('metric', self.model.metric)
# print('metric_params', self.model.metric_params)
# print('n_neighbors', self.model.n_neighbors)
# print('radius', self.model.radius)
# print('algorithm', self.model.algorithm)
# print('weights', self.model.weights)
# print('_fit_X', self.model._fit_X)
# print('_y', self.model._y)
# print('_tree', self.model._tree)
# print('p', self.model.p)
# print('_tree.data', self.model._tree.data)
# for i in dir(self.model):
# print(i, " ", type(getattr(self.model, i)))
# self.n_features = len(self.model.coef_[0])
# self.n_classes = len(self.model.classes_)
if self.method_name == 'predict':
return self.predict()
def predict(self):
"""
Port the predict method.
Returns
-------
:return: out : string
The ported predict method.
"""
return self.create_class(self.create_method())
def create_method(self):
"""
Build the model method or function.
Returns
-------
:return out : string
The built method as string.
"""
# Distance computation
metric_name = '.'.join(['metric', self.metric])
distance_comp = self.temp(
metric_name, indentation=1, skipping=True)
# Templates
temps = []
for atts in enumerate(self.model._fit_X):
tmp = [self.temp('type').format(repr(a)) for a in atts[1]]
tmp = self.temp('arr').format(', '.join(tmp))
temps.append(tmp)
temps = ', '.join(temps)
temps = self.temp('arr[][]').format(
type='double',
name='X',
values=temps,
n=self.n_templates,
m=self.n_features)
# Classes
classes = self.model._y
classes = [self.temp('type').format(int(c)) for c in classes]
classes = ', '.join(classes)
classes = self.temp('arr[]').format(
type='int',
name='y',
values=classes,
n=self.n_templates)
return self.temp('method.predict', indentation=1, skipping=True).format(
method_name=self.method_name,
class_name=self.class_name,
n_neighbors=self.n_neighbors,
n_templates=self.n_templates,
n_features=self.n_features,
n_classes=self.n_classes,
distance_computation=distance_comp,
power=self.power_param,
X=temps,
y=classes)
def create_class(self, method):
"""
Build the model class.
Returns
-------
:return out : string
The built class as string.
"""
return self.temp('class').format(
class_name=self.class_name,
method_name=self.method_name,
method=method,
n_features=self.n_features)
|
Python
| 0
|
@@ -1718,25 +1718,24 @@
.metric%0A
-%0A
# print(
@@ -1730,817 +1730,167 @@
-# print('algorithm', self.model.algorithm)%0A # print('classes_', self.model.classes_)%0A # print('metric', self.model.metric)%0A # print('metric_params', self.model.metric_params)%0A # print('n_neighbors', self.model.n_neighbors)%0A # print('radius', self.model.radius)%0A # print('algorithm', self.model.algorithm)%0A # print('weights', self.model.weights)%0A%0A # print('_fit_X', self.model._fit_X)%0A # print('_y', self.model._y)%0A # print('_tree', self.model._tree)%0A # print('p', self.model.p)%0A%0A # print('_tree.data', self.model._tree.data)%0A%0A # for i in dir(self.model):%0A # print(i, %22 %22, type(getattr(self.model, i)))%0A%0A # self.n_features = len(self.model.coef_%5B0%5D)%0A # self.n_classes = len(self.model.classes_
+if self.model.weights is not 'uniform':%0A msg = %22Only 'uniform' weights are supported for this classifier.%22%0A raise NotImplementedError(msg
)%0A%0A
|
16725435efdf7ae05fa6ae04a0ce2d0ffe547b9c
|
Tidy up a bit
|
2015/python/2015-20.py
|
2015/python/2015-20.py
|
def total_presents(house_number, presents_per_elf, elf_limit=None):
"""Calculate how many presents house_number should receive
Each house is visited by numbered elves which match the divisors of
house_number, and each elf delivers a quantity of presents that
match the elf’s number times by presents_per_elf.
For instance, given:
house_number == 9
presents_per_elf = 10
The divisors (and therefore the elves) are:
[1, 3, 9]
And the presents delivered by each:
[10, 30, 90]
For a total number of presents:
130
"""
# Set a bound within which to search for divisors
int_sqrt_ish = int(house_number ** 0.5)
# All the numbers that cleanly divide house_number
divisors = [
(x, house_number / x)
for x in range(1, int_sqrt_ish + 1)
if house_number % x == 0
]
# Flatten the divisors list and remove duplicates
divisors = {divisor for divisor_pair in divisors
for divisor in divisor_pair}
if elf_limit is not None:
divisors = (d for d in divisors
if d * elf_limit >= house_number)
return sum(d * presents_per_elf for d in divisors)
def first_house_with_n_presents_linear(
target_presents, head_start=50,
elf_limit=None, presents_per_elf=10):
"""Return the number of the first house with at least total_presents
head_start determines which house to start at — smaller numbers give
a larger head start (it is used to divide target_presents).
This implements a linear search.
"""
presents = 0
house_number = target_presents // head_start
while presents < target_presents:
house_number += 1
presents = total_presents(house_number, elf_limit=elf_limit,
presents_per_elf=presents_per_elf)
if house_number % 10_000 == 0:
print(f'{house_number:,}: {presents:,}')
return house_number
def first_house_with_n_presents_binary(target_presents,
low_point=1, high_point=None):
"""Return the number of the first house with at least total_presents
This implements a binary, or divide and conquer, search.
It doesn’t actually work for this puzzle because the search space
is not linearly ascending — lower numbered houses can have higher
numbers of presents.
"""
if high_point is None:
high_point = target_presents
closest_presents = high_point * 2
closest_house = None
while closest_presents >= target_presents:
if low_point == high_point:
return low_point
elif high_point - low_point == 1:
high_presents = total_presents(high_point)
low_presents = total_presents(low_point)
if low_presents >= target_presents:
return low_point
else:
return high_point
mid_point = low_point + ((high_point - low_point) // 2)
mid_point_presents = total_presents(mid_point)
if mid_point_presents == target_presents:
# Found the number of presents exactly (unlikely!)
return mid_point
if mid_point_presents < target_presents:
low_point = mid_point
elif mid_point_presents > (target_presents * 1.05):
# Greater than 5% away from the target number of presents
high_point = mid_point
def test_house_total_presents():
test_input = [
(1, 10),
(2, 30),
(3, 40),
(4, 70),
(5, 60),
(6, 120),
(7, 80),
(8, 150),
(9, 130),
]
for house_number, expected_presents in test_input:
assert total_presents(house_number) == expected_presents
def test_first_house_with_n_presents():
test_input = [
(10, 1),
(20, 2),
(30, 2),
(40, 3),
(50, 4),
(60, 4),
(70, 4),
(80, 6),
(90, 6),
(100, 6),
(110, 6),
(120, 6),
(130, 8),
(140, 8),
(150, 8),
]
for presents, house_number in test_input:
assert first_house_with_n_presents_linear(presents) == house_number
assert first_house_with_n_presents_binary(presents) == house_number
def main(puzzle_input):
part_one_result = first_house_with_n_presents_linear(puzzle_input)
print(f'Part one: {part_one_result:,}')
part_two_result = first_house_with_n_presents_linear(
puzzle_input, elf_limit=50, presents_per_elf=11)
print(f'Part two: {part_two_result:,}')
if __name__ == '__main__':
puzzle_input = 36000000
main(puzzle_input)
|
Python
| 0
|
@@ -1870,100 +1870,8 @@
lf)%0A
- if house_number %25 10_000 == 0:%0A print(f'%7Bhouse_number:,%7D: %7Bpresents:,%7D')%0A
@@ -2432,33 +2432,8 @@
* 2
-%0A closest_house = None
%0A%0A
@@ -2600,131 +2600,36 @@
-high_presents = total_presents(high_point)%0A low_presents = total_presents(low_point)%0A if low_presents
+if total_presents(low_point)
%3E=
|
fca6289f6fe1e0e5605a7ea12a54395fe98d0425
|
Define rio tasks.
|
rio/tasks.py
|
rio/tasks.py
|
# -*- coding: utf-8 -*-
"""
rio.tasks
~~~~~~~~~~
Implement of rio tasks based on celery.
"""
from os import environ
from celery import Celery
from .conf import configure_app
def register_tasks(app):
"""Register tasks to application.
"""
pass
def create_app():
"""Celery application factory function."""
app = Celery('rio')
configure_app(app)
register_tasks(app)
return app
|
Python
| 0.000449
|
@@ -131,22 +131,53 @@
import
-Celery
+task%0Afrom celery.task.http import URL
%0A%0Afrom .
@@ -178,18 +178,18 @@
from .co
-nf
+re
import
@@ -193,251 +193,218 @@
rt c
-onfigure_app%0A%0Adef register_tasks(app):%0A %22%22%22Register tasks to application.%0A %22%22%22%0A pass%0A%0A%0Adef create_app():%0A %22%22%22Celery application factory function.%22%22%22%0A app = Celery('rio')%0A configure_app(app)%0A register_tasks(app)%0A return app
+elery%0A%0A%0Adef get_webhook(url, payload):%0A return URL(url, app=celery, dispatcher=None).get_async(**payload)%0A%0A%0Adef post_webhook(url, payload):%0A return URL(url, app=celery, dispatcher=None).post_async(**payload)
%0A
|
e5ef3e3899a7af05551c6d18b0c52adf5c067236
|
replace cleandir by shutil.rmtree
|
pp/samples/mask_pack/test_mask.py
|
pp/samples/mask_pack/test_mask.py
|
"""
This is a sample on how to define custom components.
You can make a repo out of this file, having one custom component per file
"""
import os
import shutil
from pathlib import Path
import numpy as np
import pytest
import pp
from pp.add_termination import add_gratings_and_loop_back
from pp.component import Component
from pp.components.spiral_inner_io import spiral_inner_io_euler
from pp.config import CONFIG
from pp.mask.merge_metadata import merge_metadata
def add_te(component: Component, **kwargs) -> Component:
c = pp.routing.add_fiber_array(
component=component,
grating_coupler=pp.components.grating_coupler_elliptical_te,
**kwargs,
)
c.test = "passive_optical_te"
return c
def add_tm(component, **kwargs):
c = pp.routing.add_fiber_array(
component=component,
grating_coupler=pp.components.grating_coupler_elliptical_tm,
bend_radius=20,
**kwargs,
)
return c
@pp.cell
def coupler_te(
gap: float,
length: int,
) -> Component:
"""Evanescent coupler with TE grating coupler."""
c = pp.components.coupler(gap=gap, length=length)
cc = add_te(c)
return cc
@pp.cell
def spiral_te(width: float = 0.5, length: int = 2) -> Component:
"""Spiral with TE grating_coupler
Args:
width: waveguide width um
lenght: cm
"""
c = spiral_inner_io_euler(width=width, length=length)
cc = add_gratings_and_loop_back(
component=c,
grating_coupler=pp.components.grating_coupler_elliptical_te,
bend_factory=pp.components.bend_euler,
)
return cc
@pp.cell
def spiral_tm(width=0.5, length=20e3):
"""Spiral with TM grating_coupler
Args:
width: waveguide width um
lenght: um
"""
c = spiral_inner_io_euler(width=width, length=length, dx=10, dy=10, N=5)
cc = add_gratings_and_loop_back(
component=c,
grating_coupler=pp.components.grating_coupler_elliptical_tm,
bend_factory=pp.components.bend_euler,
)
return cc
@pytest.fixture
def cleandir():
build_folder = CONFIG["samples_path"] / "mask_custom" / "build"
if build_folder.exists():
shutil.rmtree(build_folder)
@pytest.fixture
def chdir():
workspace_folder = CONFIG["samples_path"] / "mask_custom"
os.chdir(workspace_folder)
@pytest.mark.usefixtures("cleandir")
def test_mask(precision: float = 1e-9) -> Path:
workspace_folder = CONFIG["samples_path"] / "mask_pack"
build_path = workspace_folder / "build"
mask_path = build_path / "mask"
mask_path.mkdir(parents=True, exist_ok=True)
gdspath = mask_path / "sample_mask.gds"
markdown_path = gdspath.with_suffix(".md")
json_path = gdspath.with_suffix(".json")
test_metadata_path = gdspath.with_suffix(".tp.json")
components = [spiral_te(length=length) for length in np.array([2, 4, 6]) * 1e4]
components += [coupler_te(length=length, gap=0.2) for length in [10, 20, 30, 40]]
c = pp.pack(components)
m = c[0]
m.name = "sample_mask"
m.write_gds(gdspath)
merge_metadata(gdspath=gdspath)
assert gdspath.exists()
assert markdown_path.exists()
assert json_path.exists()
assert test_metadata_path.exists()
return gdspath
if __name__ == "__main__":
c = test_mask()
pp.klive.show(c)
|
Python
| 0.000491
|
@@ -133,18 +133,8 @@
%22%22%22%0A
-import os%0A
impo
@@ -191,22 +191,8 @@
s np
-%0Aimport pytest
%0A%0Aim
@@ -2021,337 +2021,8 @@
c%0A%0A%0A
-@pytest.fixture%0Adef cleandir():%0A build_folder = CONFIG%5B%22samples_path%22%5D / %22mask_custom%22 / %22build%22%0A if build_folder.exists():%0A shutil.rmtree(build_folder)%0A%0A%0A@pytest.fixture%0Adef chdir():%0A workspace_folder = CONFIG%5B%22samples_path%22%5D / %22mask_custom%22%0A os.chdir(workspace_folder)%0A%0A%0A@pytest.mark.usefixtures(%22cleandir%22)%0A
def
@@ -2206,16 +2206,66 @@
%22mask%22%0A%0A
+ shutil.rmtree(build_path, ignore_errors=True)%0A
mask
|
b7927ff8f82bc6f9f025bf6e42ba69346ac242e7
|
Refactor worker/task loops
|
2018/python/2018_07.py
|
2018/python/2018_07.py
|
"""Advent of Code 2018 Day 7: The Sum of Its Parts"""
import aoc_common
from collections import deque, defaultdict
DAY = 7
TEST_INPUT = """\
Step C must be finished before step A can begin.
Step C must be finished before step F can begin.
Step A must be finished before step B can begin.
Step A must be finished before step D can begin.
Step B must be finished before step E can begin.
Step D must be finished before step E can begin.
Step F must be finished before step E can begin.
"""
def solve_part_one(puzzle_input):
return resolve_dependencies(puzzle_input)
def solve_part_two(puzzle_input):
return timeParallelWork(puzzle_input)
def create_dependency_structures(pairs):
must_finish = {first for first, _ in pairs}
waiting = {second for _, second in pairs}
order_finished = deque(sorted(must_finish - waiting))
completed = must_finish - waiting
to_go = defaultdict(list)
for dependency, task in pairs:
to_go[task].append(dependency)
return (order_finished, completed, to_go)
def resolve_dependencies(pairs):
order_finished, completed, to_go = create_dependency_structures(pairs)
while len(to_go):
for task, dependencies in sorted(to_go.items()):
if not len(set(dependencies) - completed):
order_finished.append(task)
completed.add(task)
del to_go[task]
break # Important as earlier-letter tasks run first
return "".join(order_finished)
def task_time(task):
return ord(task) - 64
def timeParallelWork(pairs, workers=5, time_bias=60):
order_completed, _, unassigned = create_dependency_structures(pairs)
workers_times = [0] * workers
workers_tasks = [None] * workers
completed = set()
total_time = 0
# Set outstanding times for tasks with no dependencies
for idx, task in enumerate(order_completed):
workers_tasks[idx] = task
workers_times[idx] = task_time(task) + time_bias
while sum(workers_times):
total_time += 1
workers_times = [max(0, t - 1) for t in workers_times]
for idx, (time, task) in enumerate(zip(workers_times, workers_tasks)):
if not time:
completed.add(task)
workers_tasks[idx] = None
for task, dependencies in sorted(unassigned.items()):
if not any(t == 0 for t in workers_times):
break # No workers available
if not len(set(dependencies) - completed):
# Assign task to worker
time = task_time(task) + time_bias
for idx, t in enumerate(workers_times):
if not t:
workers_times[idx] = time
workers_tasks[idx] = task
break
del unassigned[task]
return total_time
def parse_puzzle_input(puzzle_input):
return [
(parts[1], parts[-3])
for parts in [line.split() for line in puzzle_input.splitlines()]
]
def test_parse_puzzle_input():
expected = [
("C", "A"),
("C", "F"),
("A", "B"),
("A", "D"),
("B", "E"),
("D", "E"),
("F", "E"),
]
assert parse_puzzle_input(TEST_INPUT) == expected
def test_resolve_dependencies():
assert resolve_dependencies(parse_puzzle_input(TEST_INPUT)) == "CABDFE"
def test_time_parallel_work():
assert (
timeParallelWork(parse_puzzle_input(TEST_INPUT), workers=2, time_bias=0) == 15
)
if __name__ == "__main__":
puzzle_input = parse_puzzle_input(aoc_common.load_puzzle_input(DAY))
print(__doc__)
part_one_solution = solve_part_one(puzzle_input)
assert part_one_solution == "GJFMDHNBCIVTUWEQYALSPXZORK"
print("Part one:", part_one_solution)
part_two_solution = solve_part_two(puzzle_input)
assert part_two_solution == 1050
print("Part two:", part_two_solution)
|
Python
| 0
|
@@ -1981,16 +1981,143 @@
e_bias%0A%0A
+ def free_workers():%0A for idx, time in enumerate(workers_times):%0A if not time:%0A yield idx%0A%0A
whil
@@ -2130,32 +2130,32 @@
workers_times):%0A
-
total_ti
@@ -2408,16 +2408,62 @@
= None%0A%0A
+ for worker_idx in free_workers():%0A
@@ -2524,106 +2524,8 @@
- if not any(t == 0 for t in workers_times):%0A break # No workers available%0A%0A
@@ -2591,16 +2591,20 @@
+
+
# Assign
@@ -2619,16 +2619,20 @@
worker%0A
+
@@ -2694,98 +2694,8 @@
-for idx, t in enumerate(workers_times):%0A if not t:%0A
@@ -2700,32 +2700,39 @@
workers_times%5B
+worker_
idx%5D = time%0A
@@ -2727,20 +2727,16 @@
= time%0A
-
@@ -2753,32 +2753,39 @@
workers_tasks%5B
+worker_
idx%5D = task%0A
@@ -2804,54 +2804,54 @@
- break%0A del unassigned%5Btask%5D
+del unassigned%5Btask%5D%0A break
%0A%0A
|
2b995c68c980f1f38e1e6c6bb69ab88b78353cce
|
Update version.
|
pyhmsa/__init__.py
|
pyhmsa/__init__.py
|
#!/usr/bin/env python
__author__ = "Philippe T. Pinard"
__email__ = "philippe.pinard@gmail.com"
__version__ = "0.1.5"
__copyright__ = "Copyright (c) 2013-2014 Philippe T. Pinard"
__license__ = "MIT"
# This is required to create a namespace package.
# A namespace package allows programs to be located in different directories or
# eggs.
__import__('pkg_resources').declare_namespace(__name__)
|
Python
| 0
|
@@ -113,9 +113,9 @@
0.1.
-5
+6
%22%0A__
|
c46f46197589a89e98c8d5960d5c587a7c3dd6b0
|
delete load data part
|
train.py
|
train.py
|
import keras
import cv2
from load import load_args
if __name__ == '__main__':
args = load_args()
print args
|
Python
| 0
|
@@ -45,16 +45,41 @@
ad_args%0A
+from load import load_img
%0A%0Aif __n
@@ -133,8 +133,35 @@
t args%0A%0A
+%09img = load_img(args.PATH)%0A
|
4c025819cb34939c7b97b145155ee89c8f0b2e93
|
add concept of entity in the askomics abstraction
|
askomics/libaskomics/integration/AbstractedEntity.py
|
askomics/libaskomics/integration/AbstractedEntity.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import json
from askomics.libaskomics.ParamManager import ParamManager
from askomics.libaskomics.utils import pformat_generic_object
class AbstractedEntity(object):
"""
An AbstractedEntity represents the classes of the database.
It is defined by an uri and a label.
"""
def __init__(self, identifier):
self.uri = ":" + ParamManager.encodeToRDFURI(identifier)
self.label = identifier
self.log = logging.getLogger(__name__)
def get_uri(self):
return self.uri
def get_turtle(self):
"""
return the turtle code describing an AbstractedEntity
for the abstraction file generation.
"""
turtle = self.get_uri() + " rdf:type owl:Class ;\n"
turtle += (len(self.get_uri()) + 1) * " " + "rdfs:label " + json.dumps(self.label) + "^^xsd:string .\n\n"
return turtle
|
Python
| 0.998542
|
@@ -792,16 +792,119 @@
ass ;%5Cn%22
+%0A%0A turtle += (len(self.get_uri()) + 1) * %22 %22 + %22displaySetting:entity %5C%22true%5C%22%5E%5Exsd:boolean ;%5Cn%22
%0A
|
1a5942364a6c47a221d0e4e8c008ead1685a1e33
|
Add failing tests for first part of #213
|
test/completion/imports.py
|
test/completion/imports.py
|
# -----------------
# own structure
# -----------------
# do separate scopes
def scope_basic():
from import_tree import mod1
#? int()
mod1.a
#? []
import_tree.a
#? []
import_tree.mod1
import import_tree
#? str()
import_tree.a
#? []
import_tree.mod1
def scope_pkg():
import import_tree.mod1
#? str()
import_tree.a
#? ['mod1']
import_tree.mod1
#? int()
import_tree.mod1.a
def scope_nested():
import import_tree.pkg.mod1
#? str()
import_tree.a
#? list
import_tree.pkg.a
#? ['sqrt']
import_tree.pkg.sqrt
#? ['a', 'pkg']
import_tree.
#? float()
import_tree.pkg.mod1.a
import import_tree.random
#? set
import_tree.random.a
# -----------------
# std lib modules
# -----------------
import tokenize
#? ['tok_name']
tokenize.tok_name
from pyclbr import *
#? ['readmodule_ex']
readmodule_ex
import os
#? ['dirname']
os.path.dirname
from itertools import (tee,
islice)
#? ['islice']
islice
from functools import (partial, wraps)
#? ['wraps']
wraps
from keyword import kwlist, \
iskeyword
#? ['kwlist']
kwlist
#? []
from keyword import not_existing1, not_existing2
from tokenize import io
tokenize.generate_tokens
# -----------------
# builtins
# -----------------
import sys
#? ['prefix']
sys.prefix
#? ['append']
sys.path.append
from math import *
#? ['cos', 'cosh']
cos
def func_with_import():
import time
return time
#? ['sleep']
func_with_import().sleep
# -----------------
# completions within imports
# -----------------
#? ['sqlite3']
import sqlite3
#? ['classes']
import classes
#? ['timedelta']
from datetime import timedel
# should not be possible, because names can only be looked up 1 level deep.
#? []
from datetime.timedelta import resolution
#? []
from datetime.timedelta import
#? ['Cursor']
from sqlite3 import Cursor
# -----------------
# relative imports
# -----------------
from .import_tree import mod1
#? int()
mod1.a
from ..import_tree import mod1
#?
mod1.a
from .......import_tree import mod1
#?
mod1.a
from .. import base
#? int()
base.sample_int
from ..base import sample_int as f
#? int()
f
from . import run
#? []
run.
from . import import_tree as imp_tree
#? str()
imp_tree.a
from . import datetime as mod1
#? []
mod1.
#? str()
imp_tree.a
#? ['some_variable']
from . import some_variable
#? ['arrays']
from . import arrays
#? []
from . import import_tree as ren
# -----------------
# special positions -> edge cases
# -----------------
import datetime
#? 6 datetime
from datetime.time import time
#? []
import datetime.
#? []
import datetime.date
#? 18 ['mod1', 'random', 'pkg', 'rename1', 'rename2', 'import']
from import_tree. import pkg
#? 18 ['pkg']
from import_tree.p import pkg
#? 17 ['import_tree']
from .import_tree import
#? 10 ['run']
from ..run import
#? ['run']
from .. import run
#? []
from not_a_module import
# self import
# this can cause recursions
from imports import *
#137
import json
#? 23 json.dump
from json import load, dump
#? 17 json.load
from json import load, dump
# without the from clause:
import json, datetime
#? 7 json
import json, datetime
#? 13 datetime
import json, datetime
# -----------------
# packages
# -----------------
from import_tree.mod1 import c
#? set
c
|
Python
| 0
|
@@ -965,24 +965,140 @@
th.dirname%0A%0A
+#? os.path.join%0Afrom os.path import join%0A%0Afrom os.path import (%0A expanduser%0A)%0A%0A#? os.path.expanduser%0Aexpanduser%0A%0A
from itertoo
|
47e18f41581cc93f8c6d9b3dcb8254323b65fbd5
|
Add a '--unified-report' option to the code coverage prep script
|
utils/prepare-code-coverage-artifact.py
|
utils/prepare-code-coverage-artifact.py
|
#!/usr/bin/env python
from __future__ import print_function
'''Prepare a code coverage artifact.
- Collate raw profiles into one indexed profile.
- Generate html reports for the given binaries.
'''
import argparse
import glob
import os
import subprocess
import sys
def merge_raw_profiles(host_llvm_profdata, profile_data_dir, preserve_profiles):
print(':: Merging raw profiles...', end='')
sys.stdout.flush()
raw_profiles = glob.glob(os.path.join(profile_data_dir, '*.profraw'))
manifest_path = os.path.join(profile_data_dir, 'profiles.manifest')
profdata_path = os.path.join(profile_data_dir, 'Coverage.profdata')
with open(manifest_path, 'w') as manifest:
manifest.write('\n'.join(raw_profiles))
subprocess.check_call([host_llvm_profdata, 'merge', '-sparse', '-f',
manifest_path, '-o', profdata_path])
if not preserve_profiles:
for raw_profile in raw_profiles:
os.remove(raw_profile)
os.remove(manifest_path)
print('Done!')
return profdata_path
def prepare_html_report(host_llvm_cov, profile, report_dir, binary,
restricted_dirs):
print(':: Preparing html report for {0}...'.format(binary), end='')
sys.stdout.flush()
binary_report_dir = os.path.join(report_dir, os.path.basename(binary))
invocation = [host_llvm_cov, 'show', binary, '-format', 'html',
'-instr-profile', profile, '-o', binary_report_dir,
'-show-line-counts-or-regions', '-Xdemangler', 'c++filt',
'-Xdemangler', '-n'] + restricted_dirs
subprocess.check_call(invocation)
with open(os.path.join(binary_report_dir, 'summary.txt'), 'wb') as Summary:
subprocess.check_call([host_llvm_cov, 'report', binary,
'-instr-profile', profile], stdout=Summary)
print('Done!')
def prepare_html_reports(host_llvm_cov, profdata_path, report_dir, binaries,
restricted_dirs):
for binary in binaries:
prepare_html_report(host_llvm_cov, profdata_path, report_dir, binary,
restricted_dirs)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('host_llvm_profdata', help='Path to llvm-profdata')
parser.add_argument('host_llvm_cov', help='Path to llvm-cov')
parser.add_argument('profile_data_dir',
help='Path to the directory containing the raw profiles')
parser.add_argument('report_dir',
help='Path to the output directory for html reports')
parser.add_argument('binaries', metavar='B', type=str, nargs='*',
help='Path to an instrumented binary')
parser.add_argument('--only-merge', action='store_true',
help='Only merge raw profiles together, skip report '
'generation')
parser.add_argument('--preserve-profiles',
help='Do not delete raw profiles', action='store_true')
parser.add_argument('--use-existing-profdata',
help='Specify an existing indexed profile to use')
parser.add_argument('--restrict', metavar='R', type=str, nargs='*',
default=[],
help='Restrict the reporting to the given source paths')
args = parser.parse_args()
if args.use_existing_profdata and args.only_merge:
print('--use-existing-profdata and --only-merge are incompatible')
exit(1)
if args.use_existing_profdata:
profdata_path = args.use_existing_profdata
else:
profdata_path = merge_raw_profiles(args.host_llvm_profdata,
args.profile_data_dir,
args.preserve_profiles)
if not args.only_merge:
prepare_html_reports(args.host_llvm_cov, profdata_path, args.report_dir,
args.binaries, args.restrict)
|
Python
| 0.000216
|
@@ -1102,33 +1102,35 @@
eport_dir, binar
-y
+ies
,%0A
@@ -1217,17 +1217,19 @@
at(binar
-y
+ies
), end='
@@ -1262,70 +1262,170 @@
-binary_report_dir = os.path.join(report_dir, os.path.basename(
+objects = %5B%5D%0A for i, binary in enumerate(binaries):%0A if i == 0:%0A objects.append(binary)%0A else:%0A objects.extend(('-object',
bina
@@ -1472,18 +1472,23 @@
how'
-, binary,
+%5D + objects + %5B
'-fo
@@ -1553,23 +1553,16 @@
, '-o',
-binary_
report_d
@@ -1767,15 +1767,8 @@
oin(
-binary_
repo
@@ -1863,25 +1863,29 @@
'report'
-, binary,
+%5D + objects +
%0A
@@ -1900,32 +1900,33 @@
+%5B
'-instr-profile'
@@ -2066,32 +2066,48 @@
+ unified_report,
restricted_dirs
@@ -2117,32 +2117,326 @@
-for binary in binaries:%0A
+if unified_report:%0A prepare_html_report(host_llvm_cov, profdata_path, report_dir, binaries,%0A restricted_dirs)%0A else:%0A for binary in binaries:%0A binary_report_dir = os.path.join(report_dir,%0A os.path.basename(binary))%0A
@@ -2481,32 +2481,39 @@
profdata_path,
+binary_
report_dir, bina
@@ -2511,16 +2511,8 @@
dir,
- binary,
%0A
@@ -2527,32 +2527,46 @@
+ %5Bbinary%5D,
restricted_dirs
@@ -3596,24 +3596,159 @@
le to use')%0A
+ parser.add_argument('--unified-report', action='store_true',%0A help='Emit a unified report for all binaries')%0A
parser.a
@@ -4399,16 +4399,119 @@
files)%0A%0A
+ if not len(args.binaries):%0A print('No binaries specified, no work to do!')%0A exit(1)%0A%0A
if n
@@ -4653,16 +4653,37 @@
inaries,
+ args.unified_report,
args.re
|
2b9d8dba5f421ad854574e9d1b7004578dd78346
|
Bump version to 4.1.1b2
|
platformio/__init__.py
|
platformio/__init__.py
|
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 1, "1b1")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A new generation ecosystem for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"RISC-V, FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "contact@platformio.org"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
|
Python
| 0
|
@@ -624,17 +624,17 @@
, 1, %221b
-1
+2
%22)%0A__ver
|
89a4d4dcf5533e2045d32282c3ad43c42d745a34
|
fix problem with wrong datatype
|
train.py
|
train.py
|
#!/usr/bin/env python
#
# (c) 2016 -- onwards Georgios Gousios <gousiosg@gmail.com>
#
import argparse
import pickle
import json
from keras.models import Sequential
from keras.layers import LSTM, Dense, Activation, Embedding, Bidirectional
from keras.callbacks import CSVLogger, EarlyStopping, ModelCheckpoint, ReduceLROnPlateau
from config import *
parser = argparse.ArgumentParser()
parser.add_argument('--prefix', default='default')
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--dropout', type=float, default=0.2)
parser.add_argument('--lstm_output', type=float, default=256)
parser.add_argument('--embedding_output', type=float, default=512)
parser.add_argument('--checkpoint', type=bool, default=False)
args = parser.parse_args()
print("Loading data set for prefix %s" % args.prefix)
x_train = pickle.load(open(x_train_file % args.prefix))
y_train = pickle.load(open(y_train_file % args.prefix))
x_val = pickle.load(open(x_val_file % args.prefix))
y_val = pickle.load(open(y_val_file % args.prefix))
config = pickle.load(open(config_file % args.prefix))
print("Training on %d merged, %d unmerged PRs" % (y_train[y_train == 1].size,
y_train[y_train == 0].size))
config.update(vars(args))
print("Training configuration:")
print json.dumps(config, indent=1)
model = Sequential()
model.add(Embedding(config['vocabulary_size'], args.embedding_output, dropout=args.dropout))
model.add(LSTM(args.lstm_output, dropout_W=args.dropout, dropout_U=args.dropout))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy', 'fmeasure'])
print('Train...')
csv_logger = CSVLogger('traininglog_%s.csv' % args.prefix)
early_stopping = EarlyStopping(monitor='val_loss', patience=5)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=3, min_lr=0.001)
callbacks = [csv_logger, early_stopping, reduce_lr]
if args.checkpoint:
checkpoint = ModelCheckpoint(checkpoint_file % args.prefix, monitor='val_loss')
callbacks.insert(checkpoint)
model.fit(x_train, y_train, batch_size=args.batch_size, nb_epoch=args.epochs,
validation_data=(x_val, y_val), callbacks=callbacks)
score, acc = model.evaluate(x_val, y_val, batch_size=args.batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
|
Python
| 0.000344
|
@@ -637,36 +637,34 @@
m_output', type=
-floa
+in
t, default=256)%0A
@@ -702,36 +702,34 @@
g_output', type=
-floa
+in
t, default=512)%0A
|
94756c1e7e6a164546b4808c8b8fb9db78e1990a
|
Update cluster_info.py
|
examples/cluster_info.py
|
examples/cluster_info.py
|
#!/usr/local/bin/python2.7
from pythonlsf import lsf
print '\n Hosts in cluster: ', lsf.get_host_names()
print '\n Clustername: ', lsf.ls_getclustername(), '\n'
print '{0:15s} {1:20s} {2:20s} {3:5s} {4:4s}'.format('Hostname', 'Type',
'Model', 'Cores', 'Load')
for info in lsf.get_host_info():
#Deal with the case when hostname contain "-".
if '-' in info.hostName:
load = lsf.get_host_load("hname=" + "'" + info.hostName + "'", lsf.R15M)
else:
load = lsf.get_host_load("hname=" + info.hostName, lsf.R15M)
if load >= 65535:
load = -1
print '{0:15s} {1:20s} {2:20s} {3:5d} {4:4.2f}'.format(info.hostName,
info.hostType,
info.hostModel,
info.cores,
load)
resources = ""
index = 0;
if info.nRes > 0:
while(1):
item = lsf.stringArray_getitem(info.resources,index)
if(item):
resources += item +" "
index += 1
else:
break;
print ' +--> Resources:', resources
|
Python
| 0.000001
|
@@ -1,30 +1,26 @@
#!
+
/usr/
-local/
bin/
+env
python
-2.7
%0A%0Afr
@@ -44,16 +44,58 @@
rt lsf%0A%0A
+if lsf.lsb_init(%22test%22) %3E 0:%0A exit(1)%0A%0A
print '%5C
|
e187ab0f5285378a891552b7cecba0bad47395ab
|
upgrade plenum version to 1.6
|
plenum/__metadata__.py
|
plenum/__metadata__.py
|
"""
plenum package metadata
"""
__title__ = 'indy-plenum'
__version_info__ = (1, 5)
__version__ = '.'.join(map(str, __version_info__))
__author__ = "Hyperledger"
__author_email__ = 'hyperledger-indy@lists.hyperledger.org'
__maintainer__ = 'Hyperledger'
__maintainer_email__ = 'hyperledger-indy@lists.hyperledger.org'
__url__ = 'https://github.com/hyperledger/indy-plenum'
__description__ = 'Plenum Byzantine Fault Tolerant Protocol'
__long_description__ = 'Plenum Byzantine Fault Tolerant Protocol'
__download_url__ = "https://github.com/hyperledger/indy-plenum/tarball/{}".format(__version__)
__license__ = "Apache 2.0"
__all__ = [
'__title__',
'__version_info__',
'__version__',
'__author__',
'__author_email__',
'__maintainer__',
'__maintainer_email__',
'__url__',
'__description__',
'__long_description__',
'__download_url__',
'__license__'
]
|
Python
| 0
|
@@ -79,9 +79,9 @@
(1,
-5
+6
)%0A__
|
8fb2d6b9194968ae6d56f0874b210101fe06c205
|
Update matmul binop in lu docs.
|
scipy/sparse/linalg/dsolve/_add_newdocs.py
|
scipy/sparse/linalg/dsolve/_add_newdocs.py
|
from numpy.lib import add_newdoc
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU',
"""
LU factorization of a sparse matrix.
Factorization is represented as::
Pr * A * Pc = L * U
To construct these `SuperLU` objects, call the `splu` and `spilu`
functions.
Attributes
----------
shape
nnz
perm_c
perm_r
L
U
Methods
-------
solve
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
The LU decomposition can be used to solve matrix equations. Consider:
>>> import numpy as np
>>> from scipy.sparse import csc_matrix, linalg as sla
>>> A = csc_matrix([[1,2,0,4],[1,0,0,1],[1,0,2,1],[2,2,1,0.]])
This can be solved for a given right-hand side:
>>> lu = sla.splu(A)
>>> b = np.array([1, 2, 3, 4])
>>> x = lu.solve(b)
>>> A.dot(x)
array([ 1., 2., 3., 4.])
The ``lu`` object also contains an explicit representation of the
decomposition. The permutations are represented as mappings of
indices:
>>> lu.perm_r
array([0, 2, 1, 3], dtype=int32)
>>> lu.perm_c
array([2, 0, 1, 3], dtype=int32)
The L and U factors are sparse matrices in CSC format:
>>> lu.L.A
array([[ 1. , 0. , 0. , 0. ],
[ 0. , 1. , 0. , 0. ],
[ 0. , 0. , 1. , 0. ],
[ 1. , 0.5, 0.5, 1. ]])
>>> lu.U.A
array([[ 2., 0., 1., 4.],
[ 0., 2., 1., 1.],
[ 0., 0., 1., 1.],
[ 0., 0., 0., -5.]])
The permutation matrices can be constructed:
>>> Pr = csc_matrix((np.ones(4), (lu.perm_r, np.arange(4))))
>>> Pc = csc_matrix((np.ones(4), (np.arange(4), lu.perm_c)))
We can reassemble the original matrix:
>>> (Pr.T * (lu.L * lu.U) * Pc.T).A
array([[ 1., 2., 0., 4.],
[ 1., 0., 0., 1.],
[ 1., 0., 2., 1.],
[ 2., 2., 1., 0.]])
""")
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('solve',
"""
solve(rhs[, trans])
Solves linear system of equations with one or several right-hand sides.
Parameters
----------
rhs : ndarray, shape (n,) or (n, k)
Right hand side(s) of equation
trans : {'N', 'T', 'H'}, optional
Type of system to solve::
'N': A * x == rhs (default)
'T': A^T * x == rhs
'H': A^H * x == rhs
i.e., normal, transposed, and hermitian conjugate.
Returns
-------
x : ndarray, shape ``rhs.shape``
Solution vector(s)
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('L',
"""
Lower triangular factor with unit diagonal as a
`scipy.sparse.csc_matrix`.
.. versionadded:: 0.14.0
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('U',
"""
Upper triangular factor as a `scipy.sparse.csc_matrix`.
.. versionadded:: 0.14.0
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('shape',
"""
Shape of the original matrix as a tuple of ints.
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('nnz',
"""
Number of nonzero elements in the matrix.
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('perm_c',
"""
Permutation Pc represented as an array of indices.
The column permutation matrix can be reconstructed via:
>>> Pc = np.zeros((n, n))
>>> Pc[np.arange(n), perm_c] = 1
"""))
add_newdoc('scipy.sparse.linalg.dsolve._superlu', 'SuperLU', ('perm_r',
"""
Permutation Pr represented as an array of indices.
The row permutation matrix can be reconstructed via:
>>> Pr = np.zeros((n, n))
>>> Pr[perm_r, np.arange(n)] = 1
"""))
|
Python
| 0
|
@@ -192,21 +192,21 @@
Pr
-* A *
+@ A @
Pc = L
* U%0A
@@ -201,17 +201,17 @@
Pc = L
-*
+@
U%0A%0A
@@ -1781,25 +1781,25 @@
r.T
-*
+@
(lu.L
-*
+@
lu.U)
-*
+@
Pc.
@@ -2332,17 +2332,17 @@
: A
-*
+@
x == rh
@@ -2377,17 +2377,17 @@
: A%5ET
-*
+@
x == rh
@@ -2415,9 +2415,9 @@
A%5EH
-*
+@
x =
|
b65e329720154799057cfeee023b4b86e0fc85ac
|
Require correct hash lengths
|
pylibscrypt/mcf.py
|
pylibscrypt/mcf.py
|
#!/usr/bin/env python
# Copyright (c) 2014 Jan Varho
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Modular Crypt Format support for scrypt, compatible with libscrypt"""
import base64
import os
import struct
from consts import *
def scrypt_mcf(scrypt, password, salt=None, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p):
"""Derives a Modular Crypt Format hash using the scrypt KDF given
Expects the signature:
scrypt(password, salt, N=SCRYPT_N, r=SCRYPT_r, p=SCRYPT_p, olen=64)
If no salt is given, 16 random bytes are generated using os.urandom."""
if salt is None:
salt = os.urandom(16)
elif not (1 <= len(salt) <= 16):
raise ValueError('salt must be 1-16 bytes')
if r > 255:
raise ValueError('scrypt_mcf r out of range [1,255]')
if p > 255:
raise ValueError('scrypt_mcf p out of range [1,255]')
if N > 2**31:
raise ValueError('scrypt_mcf N out of range [2,2**31]')
hash = scrypt(password, salt, N, r, p)
h64 = base64.b64encode(hash)
s64 = base64.b64encode(salt)
t = 1
while 2**t < N:
t += 1
params = p + (r << 8) + (t << 16)
return (
b'$s1' +
('$%06x' % params).encode() +
b'$' + s64 +
b'$' + h64
)
def _scrypt_mcf_parse_s1(mcf):
s = mcf.split(b'$')
if not (mcf.startswith(b'$s1$') and len(s) == 5):
return None
params, s64, h64 = s[2:]
params = base64.b16decode(params, True)
salt = base64.b64decode(s64)
hash = base64.b64decode(h64)
if len(params) != 3:
raise ValueError('Unrecognized MCF parameters')
t, r, p = struct.unpack('3B', params)
N = 2 ** t
return N, r, p, salt, hash
# Crypt base 64
_cb64 = './0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
_icb64 = (
[None] * 46 +
[
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, None, None, None, None, None,
None, None, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, None, None, None,
None, None, None, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63
] +
[None] * 133
)
def _cb64dec(arr, obytes):
out = bytearray()
val = bits = pos = 0
for b in arr:
val += _icb64[b] << bits
bits += 6
if bits >= 8:
out.append(val & 0xff)
bits -= 8
val >>= 8
if len(out) == obytes:
return out
raise TypeError
def _scrypt_mcf_parse_7(mcf):
s = mcf.split(b'$')
if not (mcf.startswith(b'$7$') and len(s) == 4):
return None
s64 = bytearray(s[2])
h64 = bytearray(s[3])
try:
N = 2 ** _icb64[s64[0]]
r = (_icb64[s64[1]] + (_icb64[s64[2]] << 6) + (_icb64[s64[3]] << 12) +
(_icb64[s64[4]] << 18) + (_icb64[s64[5]] << 24))
p = (_icb64[s64[6]] + (_icb64[s64[7]] << 6) + (_icb64[s64[8]] << 12) +
(_icb64[s64[9]] << 18) + (_icb64[s64[10]] << 24))
salt = bytes(s64[11:])
hash = bytes(_cb64dec(h64, 32))
except (IndexError, TypeError):
raise ValueError('Unrecognized MCF format')
return N, r, p, salt, hash
def scrypt_mcf_check(scrypt, mcf, password):
"""Returns True if the password matches the given MCF hash
Supports both the libscrypt $s1$ format and the $7$ format."""
if not isinstance(mcf, bytes):
raise TypeError
if not isinstance(password, bytes):
raise TypeError
params = _scrypt_mcf_parse_s1(mcf)
if params is None:
params = _scrypt_mcf_parse_7(mcf)
if params is None:
raise ValueError('Unrecognized MCF hash')
N, r, p, salt, hash = params
h = scrypt(password, salt, N=N, r=r, p=p, olen=len(hash))
return hash == h
|
Python
| 0.996339
|
@@ -2699,16 +2699,20 @@
lt, hash
+, 64
%0A%0A%0A# Cry
@@ -4247,16 +4247,20 @@
lt, hash
+, 32
%0A%0A%0Adef s
@@ -4755,16 +4755,22 @@
lt, hash
+, hlen
= param
@@ -4826,17 +4826,12 @@
len=
+h
len
-(hash)
)%0A
|
1a18445482c67b38810e330065e5ff04e772af4a
|
Fix from_email in IncomingLetter migrations
|
foundation/letters/migrations/0009_auto_20151216_0656.py
|
foundation/letters/migrations/0009_auto_20151216_0656.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def split_models(apps, schema_editor):
# We can't import the Person model directly as it may be a newer
# version than this migration expects. We use the historical version.
L = apps.get_model("letters", "Letter")
OL = apps.get_model("letters", "OutgoingLetter")
IL = apps.get_model("letters", "IncomingLetter")
for letter in L.objects.filter(incoming=True).all():
IL.objects.create(parent=letter,
temp_from_email=letter.email,
temp_sender=letter.sender_office)
for letter in L.objects.filter(incoming=False).all():
OL.objects.create(parent=letter,
temp_send_at=letter.send_at,
temp_sender=letter.sender_user,
temp_author=letter.author,
temp_email=letter.email)
class Migration(migrations.Migration):
dependencies = [
('letters', '0008_auto_20151216_0647'),
]
operations = [
migrations.RunPython(split_models),
]
|
Python
| 0
|
@@ -567,13 +567,8 @@
emp_
-from_
emai
|
269b511d113b5b2029fe3cc18872cfa7fc4009fd
|
Add a feature: mortality rate
|
problem/covid/lag/lag.py
|
problem/covid/lag/lag.py
|
#! /usr/bin/env python
# Copyright 2022 John Hanley. MIT licensed.
from pathlib import Path
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import LinearRegression, SGDRegressor
from sklearn.svm import LinearSVR
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import typer
from problem.covid.us_cases_deaths import get_cases_and_deaths
def _get_daily_cases_and_deaths():
df = get_cases_and_deaths()
df.cases = df.cases.diff().shift(-1)
df.deaths = df.deaths.diff().shift(-1)
df = df.dropna()
df.cases = df.cases.astype(np.int32)
df.deaths = df.deaths.astype(np.int32)
# clip a March 2022 negative deaths figure
df.cases = df.cases.clip(lower=0)
df.deaths = df.deaths.clip(lower=0)
df = df.set_index('date')
return df
def predict(out_file=Path('~/Desktop/lag.png')):
out_file = Path(out_file).expanduser()
df = _get_daily_cases_and_deaths()
train, test = _split(df)
assert len(train) >= 224
# dates = df.index.get_level_values(0)
# from sklearn.preprocessing import StandardScaler
# x_train = np.array(StandardScaler().fit_transform(train.cases))
x_train = np.array(train.cases)
y_train = np.array(train.deaths)
model = RandomForestRegressor()
model.fit(x_train.reshape(-1, 1), y_train)
x_test = np.array(test.cases)
y_test = np.array(test.deaths)
y_pred = model.predict(x_test.reshape(-1, 1))
_, axs = plt.subplots(1, 2)
plt.sca(axs[0])
train['cases'] /= 1e2
sns.scatterplot(data=train)
plt.xticks(rotation=45)
y_limit = 6_000
plt.gca().set_ylim((0, y_limit))
plt.sca(axs[1])
test['cases'] /= 1e2
test['pred_deaths'] = y_pred
sns.scatterplot(data=test)
plt.xticks(rotation=45)
plt.gca().set_ylim((0, y_limit))
plt.savefig(out_file)
print(f'R2: {model.score(x_test.reshape(-1, 1), y_test):.3f}')
print(f'MSE: {np.mean((y_test - y_pred)**2):.3f}')
print(f'MAE: {np.mean(np.abs(y_test - y_pred)):.3f}')
def _split(df: pd.DataFrame, split_date='2020-09-01'):
train = df.query(f'date < "{split_date}"')
test = df.query(f'date >= "{split_date}"')
assert len(df) == len(train) + len(test) # no rows left behind
return (train.copy(),
test.copy())
if __name__ == '__main__':
typer.run(predict)
|
Python
| 0.999998
|
@@ -141,153 +141,8 @@
sor%0A
-from sklearn.linear_model import LinearRegression, SGDRegressor%0Afrom sklearn.svm import LinearSVR%0Afrom sklearn.tree import DecisionTreeRegressor%0A
impo
@@ -480,16 +480,144 @@
dropna()
+ # Trim the final row.%0A df%5B'mortality'%5D = df.deaths / df.cases%0A df = df.dropna() # Drop 20 more, from before March 2020.
%0A df.
@@ -1053,17 +1053,17 @@
in) %3E= 2
-2
+0
4%0A%0A #
@@ -1254,21 +1254,40 @@
y(train.
-cases
+drop(columns=%5B'deaths'%5D)
)%0A y_
@@ -1375,31 +1375,16 @@
(x_train
-.reshape(-1, 1)
, y_trai
@@ -1414,21 +1414,40 @@
ay(test.
-cases
+drop(columns=%5B'deaths'%5D)
)%0A y_
@@ -1508,31 +1508,16 @@
t(x_test
-.reshape(-1, 1)
)%0A%0A _
@@ -1572,25 +1572,78 @@
train
-%5B'
+.mortality *= 1e4%0A train.mortality += 3_000%0A train.
cases
-'%5D
/= 1e2%0A
@@ -1792,17 +1792,68 @@
test
-%5B'
+.mortality *= 1e4%0A test.mortality += 3_000%0A test.
cases
-'%5D
/=
@@ -1889,16 +1889,77 @@
y_pred%0A
+ test%5B'residue'%5D = np.abs(test.pred_deaths - test.deaths)%0A
sns.
@@ -2112,23 +2112,8 @@
test
-.reshape(-1, 1)
, y_
@@ -2288,12 +2288,12 @@
'202
-0-09
+1-03
-01'
|
11b293afd11b6d568644a559dff9299ec9dc916f
|
Add comments on current Timer abstraction
|
plenum/common/timer.py
|
plenum/common/timer.py
|
from abc import ABC, abstractmethod
from functools import wraps
from typing import Callable, NamedTuple
import time
from sortedcontainers import SortedListWithKey
class TimerService(ABC):
@abstractmethod
def get_current_time(self) -> float:
pass
@abstractmethod
def schedule(self, delay: int, callback: Callable):
pass
@abstractmethod
def cancel(self, callback: Callable):
pass
class QueueTimer(TimerService):
TimerEvent = NamedTuple('TimerEvent', [('timestamp', float), ('callback', Callable)])
def __init__(self, get_current_time=time.perf_counter):
self._get_current_time = get_current_time
self._events = SortedListWithKey(key=lambda v: v.timestamp)
def queue_size(self):
return len(self._events)
def service(self):
while len(self._events) and self._events[0].timestamp <= self._get_current_time():
self._events.pop(0).callback()
def get_current_time(self) -> float:
return self._get_current_time()
def schedule(self, delay: float, callback: Callable):
timestamp = self._get_current_time() + delay
self._events.add(self.TimerEvent(timestamp=timestamp, callback=callback))
def cancel(self, callback: Callable):
indexes = [i for i, ev in enumerate(self._events) if ev.callback == callback]
for i in reversed(indexes):
del self._events[i]
class RepeatingTimer:
def __init__(self, timer: TimerService, interval: int, callback: Callable, active: bool = True):
@wraps(callback)
def wrapped_callback():
if not self._active:
return
callback()
self._timer.schedule(self._interval, self._callback)
self._timer = timer
self._interval = interval
self._callback = wrapped_callback
self._active = False
if active:
self.start()
def start(self):
if self._active:
return
self._active = True
self._timer.schedule(self._interval, self._callback)
def stop(self):
if not self._active:
return
self._active = False
self._timer.cancel(self._callback)
|
Python
| 0
|
@@ -160,16 +160,63 @@
thKey%0A%0A%0A
+# TODO: Consider renaming this into Scheduler?%0A
class Ti
@@ -319,32 +319,236 @@
@abstractmethod%0A
+ # TODO: Swapping callback and delay would allow defaulting delay to zero,%0A # effectively simplifying use-case when we want delay execution of some code%0A # just to allow some other work to run%0A
def schedule
|
317c19b2d2767276a426a4d058191dbaaf8f4c6f
|
Extend the duration of the tough_filters_cases page set.
|
tools/perf/page_sets/tough_filters_cases.py
|
tools/perf/page_sets/tough_filters_cases.py
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class ToughFiltersCasesPage(page_module.Page):
def RunSmoothness(self, action_runner):
action_runner.Wait(5)
class ToughFiltersCasesPageSet(page_set_module.PageSet):
"""
Description: Self-driven filters animation examples
"""
def __init__(self):
super(ToughFiltersCasesPageSet, self).__init__(
archive_data_file='data/tough_filters_cases.json',
bucket=page_set_module.PARTNER_BUCKET)
urls_list = [
'http://letmespellitoutforyou.com/samples/svg/filter_terrain.svg',
'http://static.bobdo.net/Analog_Clock.svg',
]
for url in urls_list:
self.AddPage(ToughFiltersCasesPage(url, self))
|
Python
| 0.000035
|
@@ -376,9 +376,10 @@
ait(
-5
+10
)%0A%0A%0A
|
bdddee22a4e710e580e105cf187ce77c59f09d31
|
enable auto parallel and non-uniform k-points
|
silicon-crystal/silicon-crystal.py
|
silicon-crystal/silicon-crystal.py
|
###
### GPAW benchmark: Silicon Crystal
###
from __future__ import print_function
from ase.lattice import bulk
from gpaw import GPAW, Mixer, ConvergenceError
from gpaw.eigensolvers.rmm_diis import RMM_DIIS
from gpaw.mpi import size, rank
try:
from gpaw import use_mic
except ImportError:
use_mic = False
# no. of replicates in each dimension (increase to scale up the system)
x = 4
y = 4
z = 4
# other parameters
h = 0.22
kpt = 1
txt = 'output.txt'
maxiter = 6
conv = {'eigenstates' : 1e-4, 'density' : 1e-2, 'energy' : 1e-3}
# output benchmark parameters
if rank == 0:
print("#"*60)
print("GPAW benchmark: Silicon Crystal")
print(" dimensions: x=%d, y=%d, z=%d" % (x, y, z))
print(" grid spacing: h=%f" % h)
print(" Brillouin-zone sampling: kpts=(%d,%d,%d)" % (kpt, kpt, kpt))
print(" MPI task: %d out of %d" % (rank, size))
print(" using MICs: " + repr(use_mic))
print("#"*60)
print("")
# setup the system
atoms = bulk('Si', cubic=True)
atoms = atoms.repeat((x, y, z))
calc = GPAW(h=h, nbands=-20, width=0.2,
kpts=(kpt,kpt,kpt), xc='PBE',
maxiter=maxiter,
txt=txt, eigensolver=RMM_DIIS(niter=2),
mixer=Mixer(0.1, 5, 100),
)
atoms.set_calculator(calc)
# execute the run
try:
atoms.get_potential_energy()
except ConvergenceError:
pass
|
Python
| 0.000001
|
@@ -432,12 +432,19 @@
%0Akpt
+s
=
-1
+(1,1,1)
%0Atxt
@@ -785,36 +785,20 @@
pts=
-(%25d,%25d,%25d)%22 %25 (kpt, kpt,
+%22 + str(
kpt
+s
))%0A
@@ -878,19 +878,18 @@
Cs: %22 +
-rep
+st
r(use_mi
@@ -1068,21 +1068,12 @@
pts=
-(
kpt
-,kpt,kpt)
+s
, xc
@@ -1161,16 +1161,56 @@
ter=2),%0A
+ parallel=%7B'sl_auto': True%7D,%0A
|
130d966f933983dc366a3023ac78a2ba24bf064c
|
add flag for data set binarization
|
train.py
|
train.py
|
"""
Andrin Jenal, 2017
ETH Zurich
"""
import tensorflow as tf
from dcgan import DCGAN
import hdf5_dataset
from checkpoint_saver import CheckpointSaver
from visualizer import ImageVisualizer
flags = tf.app.flags
flags.DEFINE_string("dataset", "datasets/celeb_dataset_3k_colored.h5", "sample results dir")
flags.DEFINE_string("data_dir", "results/", "checkpoint and logging results dir")
flags.DEFINE_integer("batch_size", 128, "batch size")
flags.DEFINE_integer("image_size", 64, "image size")
flags.DEFINE_integer("channels", 3, "color channels")
flags.DEFINE_integer("max_epoch", 500, "max epoch")
flags.DEFINE_integer("z_size", 100, "size of latent (feature?) space")
flags.DEFINE_float("learning_rate", 5e-4, "learning rate")
flags.DEFINE_integer("generation_step", 1, "generate random images")
FLAGS = flags.FLAGS
def main(_):
# create checkpoint saver
# the checkpoint saver, can create checkpoint files, which later can be use to restore a model state, but it also
# audits the model progress to a log file
checkpoint_saver = CheckpointSaver(FLAGS.data_dir)
checkpoint_saver.save_experiment_config(FLAGS.__dict__['__flags'])
# load training data
data_set, data_set_shape = hdf5_dataset.read_data_set(FLAGS.dataset, image_size=FLAGS.image_size, shape=(FLAGS.image_size, FLAGS.image_size, FLAGS.channels), binarized=False, validation=0)
train_data = data_set.train
# create a data visualizer
visualizer = ImageVisualizer(checkpoint_saver.get_experiment_dir(), image_size=FLAGS.image_size)
visualizer.training_data_sample(train_data)
# create the actual DCGAN model
dcgan_model = DCGAN(FLAGS.image_size, FLAGS.channels, z_size=FLAGS.z_size, learning_rate=FLAGS.learning_rate)
print("start", type(dcgan_model).__name__, "model training")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
dcgan_model.initialize_summaries(sess, checkpoint_saver.get_experiment_dir())
for epoch in range(FLAGS.max_epoch):
for images in train_data.next_batch(FLAGS.batch_size):
d_loss, g_loss = dcgan_model.update_params(sess, images)
msg = "epoch: %3d" % epoch + " Discriminator loss %.4f" % d_loss + " Generator loss %.4f" % g_loss
checkpoint_saver.audit_loss(msg)
dcgan_model.update_summaries(sess, images, epoch)
if epoch % FLAGS.generation_step == 0:
visualizer.save_generated_samples(dcgan_model.generate_samples(sess, num_samples=200), epoch)
if __name__ == '__main__':
tf.app.run()
|
Python
| 0
|
@@ -293,32 +293,98 @@
e results dir%22)%0A
+flags.DEFINE_boolean(%22binarized%22, False, %22data set binarization%22)%0A
flags.DEFINE_str
@@ -1415,20 +1415,30 @@
arized=F
-alse
+LAGS.binarized
, valida
|
9933920ebc49b4e275ff93bd6d918945ee77e9a4
|
Make keepalive tests under macOS less stressful
|
cheroot/test/test_wsgi.py
|
cheroot/test/test_wsgi.py
|
"""Test wsgi."""
from concurrent.futures.thread import ThreadPoolExecutor
import pytest
import portend
import requests
from requests_toolbelt.sessions import BaseUrlSession as Session
from jaraco.context import ExceptionTrap
from cheroot import wsgi
@pytest.fixture
def simple_wsgi_server():
"""Fucking simple wsgi server fixture (duh)."""
port = portend.find_available_local_port()
def app(environ, start_response):
status = '200 OK'
response_headers = [('Content-type', 'text/plain')]
start_response(status, response_headers)
return [b'Hello world!']
host = '::'
addr = host, port
server = wsgi.Server(addr, app, timeout=20)
url = 'http://localhost:{port}/'.format(**locals())
with server._run_in_thread() as thread:
yield locals()
def test_connection_keepalive(simple_wsgi_server):
"""Test the connection keepalive works (duh)."""
session = Session(base_url=simple_wsgi_server['url'])
pooled = requests.adapters.HTTPAdapter(
pool_connections=1, pool_maxsize=1000,
)
session.mount('http://', pooled)
def do_request():
with ExceptionTrap(requests.exceptions.ConnectionError) as trap:
resp = session.get('info')
resp.raise_for_status()
return bool(trap)
with ThreadPoolExecutor(max_workers=50) as pool:
tasks = [
pool.submit(do_request)
for n in range(1000)
]
failures = sum(task.result() for task in tasks)
assert not failures
|
Python
| 0.000013
|
@@ -246,16 +246,104 @@
rt wsgi%0A
+from cheroot._compat import IS_MACOS, IS_WINDOWS%0A%0A%0AIS_SLOW_ENV = IS_MACOS or IS_WINDOWS%0A
%0A%0A@pytes
@@ -767,16 +767,40 @@
timeout=
+600 if IS_SLOW_ENV else
20)%0A
@@ -1454,16 +1454,39 @@
workers=
+10 if IS_SLOW_ENV else
50) as p
@@ -1544,16 +1544,16 @@
equest)%0A
-
@@ -1571,16 +1571,40 @@
n range(
+250 if IS_SLOW_ENV else
1000)%0A
|
92c876aaa9258928123469593b36097c9834b937
|
Remove unneeded list concatenation
|
tests/scripts/generate_bignum_tests.py
|
tests/scripts/generate_bignum_tests.py
|
#!/usr/bin/env python3
"""Generate test data for bignum functions.
With no arguments, generate all test data. With non-option arguments,
generate only the specified files.
"""
# Copyright The Mbed TLS Contributors
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import sys
from typing import Callable, Dict, Iterator, List, Optional, Tuple, TypeVar
import scripts_path # pylint: disable=unused-import
from mbedtls_dev import test_case
from mbedtls_dev import test_generation
T = TypeVar('T') #pylint: disable=invalid-name
def hex_to_int(val):
return int(val, 16) if val else 0
def quote_str(val):
return "\"{}\"".format(val)
class BignumTarget(test_generation.BaseTarget):
"""Target for bignum (mpi) test case generation."""
target_basename = 'test_suite_mpi.generated'
class BignumOperation(BignumTarget):
"""Common features for test cases covering bignum operations.
Attributes:
symbol: Symbol used for operation in description.
input_values: List of values to use as test case inputs.
input_cases: List of tuples containing pairs of test case inputs. This
can be used to implement specific pairs of inputs.
"""
symbol = ""
input_values = [
"", "0", "7b", "-7b",
"0000000000000000123", "-0000000000000000123",
"1230000000000000000", "-1230000000000000000"
] # type: List[str]
input_cases = [] # type: List[Tuple[str, ...]]
def __init__(self, val_l: str, val_r: str) -> None:
super().__init__()
self.arg_l = val_l
self.arg_r = val_r
self.int_l = hex_to_int(val_l)
self.int_r = hex_to_int(val_r)
def arguments(self):
return [quote_str(self.arg_l), quote_str(self.arg_r), self.result()]
def description(self):
if not self.case_description:
self.case_description = "{} {} {}".format(
self.value_description(self.arg_l),
self.symbol,
self.value_description(self.arg_r)
)
return super().description()
def result(self) -> Optional[str]:
return None
@staticmethod
def value_description(val) -> str:
if val == "":
return "0 (null)"
if val == "0":
return "0 (1 limb)"
if val[0] == "-":
tmp = "negative"
val = val[1:]
else:
tmp = "positive"
if val[0] == "0":
tmp += " with leading zero limb"
elif len(val) > 10:
tmp = "large " + tmp
return tmp
@classmethod
def get_value_pairs(cls) -> Iterator[Tuple[str, ...]]:
"""Generate value pairs."""
for pair in list(
itertools.combinations(cls.input_values, 2)
) + cls.input_cases:
yield pair
@classmethod
def generate_tests(cls) -> Iterator[test_case.TestCase]:
if cls.test_function:
# Generate tests for the current class
for l_value, r_value in cls.get_value_pairs():
cur_op = cls(l_value, r_value)
yield cur_op.create_test_case()
# Once current class completed, check descendants
yield from super().generate_tests()
class BignumCmp(BignumOperation):
"""Target for bignum comparison test cases."""
count = 0
test_function = "mbedtls_mpi_cmp_mpi"
test_name = "MPI compare"
input_cases = [
("-2", "-3"),
("-2", "-2"),
("2b4", "2b5"),
("2b5", "2b6")
]
def __init__(self, val_l, val_r):
super().__init__(val_l, val_r)
self._result = (self.int_l > self.int_r) - (self.int_l < self.int_r)
self.symbol = ["<", "==", ">"][self._result + 1]
def result(self):
return str(self._result)
class BignumCmpAbs(BignumCmp):
"""Target for abs comparison variant."""
count = 0
test_function = "mbedtls_mpi_cmp_abs"
test_name = "MPI compare (abs)"
def __init__(self, val_l, val_r):
super().__init__(val_l.strip("-"), val_r.strip("-"))
class BignumAdd(BignumOperation):
"""Target for bignum addition test cases."""
count = 0
test_function = "mbedtls_mpi_add_mpi"
test_name = "MPI add"
input_cases = list(itertools.combinations(
[
"1c67967269c6", "9cde3",
"-1c67967269c6", "-9cde3",
], 2
))
def __init__(self, val_l, val_r):
super().__init__(val_l, val_r)
self.symbol = "+"
def result(self):
return quote_str(hex(self.int_l + self.int_r).replace("0x", "", 1))
class BignumTestGenerator(test_generation.TestGenerator):
"""Test generator subclass including bignum targets."""
TARGETS = {
subclass.target_basename: subclass.generate_tests for subclass in
test_generation.BaseTarget.__subclasses__()
} # type: Dict[str, Callable[[], test_case.TestCase]]
if __name__ == '__main__':
test_generation.main(sys.argv[1:], BignumTestGenerator)
|
Python
| 0.000003
|
@@ -3233,41 +3233,18 @@
-for pair in list(%0A
+yield from
ite
@@ -3296,15 +3296,18 @@
-
- ) +
+yield from
cls
@@ -3322,32 +3322,8 @@
ases
-:%0A yield pair
%0A%0A
|
32528fb029ec97cc0b195b80363556f69315f164
|
Condense code in Bishop
|
chess_py/pieces/bishop.py
|
chess_py/pieces/bishop.py
|
# -*- coding: utf-8 -*-
"""
Class stores Bishop on the board
| rank
| 7 8 ║♜ ♞ ♝ ♛ ♚ ♝ ♞ ♜
| 6 7 ║♟ ♟ ♟ ♟ ♟ ♟ ♟ ♟
| 5 6 ║… … … … … … … …
| 4 5 ║… … … … … … … …
| 3 4 ║… … … … … … … …
| 2 3 ║… … … … … … … …
| 1 2 ║♙ ♙ ♙ ♙ ♙ ♙ ♙ ♙
| 0 1 ║♖ ♘ ♗ ♕ ♔ ♗ ♘ ♖
| ----╚═══════════════
| ——---a b c d e f g h
| -----0 1 2 3 4 5 6 7
| ------file
| Copyright © 2016 Aubhro Sengupta. All rights reserved.
"""
from chess_py.pieces.piece import Piece
from chess_py.pieces.rook import Rook
from chess_py.core.color import Color
class Bishop(Piece):
def __init__(self, input_color, location):
"""
Creates Bishop object that can be compared to and return possible moves
:type input_color: Color
"""
super(Bishop, self).__init__(input_color, location, "♝", "♗")
def __str__(self):
return "B"
def possible_moves(self, position):
"""
Returns all possible bishop moves.
:type position: Board
:rtype: list
"""
rook = Rook(self.color, self.location)
moves = []
if rook.direction_moves(lambda x: x.shift_up_right(), position) is not None:
moves.extend(rook.direction_moves(lambda x: x.shift_up_right(), position))
if rook.direction_moves(lambda x: x.shift_up_left(), position) is not None:
moves.extend(rook.direction_moves(lambda x: x.shift_up_left(), position))
if rook.direction_moves(lambda x: x.shift_down_right(), position) is not None:
moves.extend(rook.direction_moves(lambda x: x.shift_down_right(), position))
if rook.direction_moves(lambda x: x.shift_down_left(), position) is not None:
moves.extend(rook.direction_moves(lambda x: x.shift_down_left(), position))
for move in moves:
move.piece = self
return moves
|
Python
| 0.999999
|
@@ -1069,521 +1069,180 @@
-if rook.direction_moves(lambda x: x.shift_up_right(), position) is not None:%0A moves.extend(rook.direction_moves(lambda x: x.shift_up_right(), position))%0A%0A if rook.direction_moves(lambda x: x.shift_up_left(), position) is not None:%0A moves.extend(rook.direction_moves(lambda x: x.shift_up_left(), position))%0A%0A if rook.direction_moves(lambda x: x.shift_down_right(), position) is not None:%0A moves.extend(rook.direction_moves(lambda x: x.shift_down_right(), position))%0A%0A
+fns = %5Blambda x: x.shift_up_right(), lambda x: x.shift_up_left(),%0A lambda x: x.shift_down_right(), lambda x: x.shift_down_left()%5D%0A%0A for fn in fns:%0A
@@ -1273,37 +1273,10 @@
ves(
-lambda x: x.shift_down_left()
+fn
, po
@@ -1304,24 +1304,28 @@
+
+
moves.extend
@@ -1350,37 +1350,10 @@
ves(
-lambda x: x.shift_down_left()
+fn
, po
|
8dcb7e6c7b1990541c86159cd5df85f2f7a57ddb
|
Fix tests
|
tests/source/csv/test_import_assets.py
|
tests/source/csv/test_import_assets.py
|
import os
import json
from tests.base import ApiDBTestCase
from zou.app.models.entity import Entity
from zou.app.models.entity_type import EntityType
class ImportCsvAssetsTestCase(ApiDBTestCase):
def setUp(self):
super(ImportCsvAssetsTestCase, self).setUp()
self.generate_fixture_project_status()
self.generate_fixture_project()
self.generate_fixture_metadata_descriptor(entity_type="Asset")
def test_import_assets(self):
path = "/import/csv/projects/%s/assets" % self.project.id
file_path_fixture = self.get_fixture_file_path(
os.path.join("csv", "assets.csv")
)
self.upload_file(path, file_path_fixture)
entities = Entity.query.all()
self.assertEqual(len(entities), 3)
entity_types = EntityType.query.all()
self.assertEqual(len(entity_types), 2)
asset = entities[0]
self.assertEqual(asset.data.get("contractor", None), "contractor 1")
file_path_fixture = self.get_fixture_file_path(
os.path.join("csv", "assets_no_metadata.csv")
)
self.upload_file("%s?update=true" % path, file_path_fixture)
entities = Entity.query.all()
self.assertEqual(len(entities), 3)
asset = entities[0]
self.assertEqual(asset.data.get("contractor", None), "contractor 1")
def test_import_assets_duplicates(self):
path = "/import/csv/projects/%s/assets" % self.project.id
file_path_fixture = self.get_fixture_file_path(
os.path.join("csv", "assets.csv")
)
self.upload_file(path, file_path_fixture)
self.upload_file(path, file_path_fixture)
entities = Entity.query.all()
self.assertEqual(len(entities), 3)
def test_import_assets_with_non_comma_delimiter(self):
path = "/import/csv/projects/%s/assets" % self.project.id
file_path_fixture = self.get_fixture_file_path(
os.path.join("csv", "assets_other_delimiter.csv")
)
self.upload_file(path, file_path_fixture)
entities = Entity.query.all()
self.assertEqual(len(entities), 3)
def test_import_assets_empty_lines(self):
# With empty lines. It should work
path = "/import/csv/projects/%s/assets" % self.project.id
file_path_fixture = self.get_fixture_file_path(
os.path.join("csv", "assets_broken_01.csv")
)
self.upload_file(path, file_path_fixture)
entities = Entity.query.all()
self.assertEqual(len(entities), 3)
def test_import_assets_missing_columns(self):
# With missing columns on a given line. It should not work.
path = "/import/csv/projects/%s/assets" % self.project.id
file_path_fixture = self.get_fixture_file_path(
os.path.join("csv", "assets_broken_02.csv")
)
error = json.loads(self.upload_file(path, file_path_fixture, 400))
self.assertEqual(error["line_number"], 2)
entities = Entity.query.all()
self.assertEqual(len(entities), 1)
def test_import_assets_missing_header(self):
# With missing columns on a given line. It should not work.
path = "/import/csv/projects/%s/assets" % self.project.id
file_path_fixture = self.get_fixture_file_path(
os.path.join("csv", "assets_broken_03.csv")
)
error = json.loads(self.upload_file(path, file_path_fixture, 400))
self.assertEqual(error["line_number"], 1)
entities = Entity.query.all()
self.assertEqual(len(entities), 0)
|
Python
| 0.000003
|
@@ -2871,35 +2871,25 @@
-error = json.loads(
+result =
self.upl
@@ -2918,32 +2918,142 @@
th_fixture, 400)
+%0A if type(result) != str:%0A result = result.decode(%22utf-8%22)%0A error = json.loads(result
)%0A self.a
@@ -3483,35 +3483,25 @@
-error = json.loads(
+result =
self.upl
@@ -3538,16 +3538,126 @@
re, 400)
+%0A if type(result) != str:%0A result = result.decode(%22utf-8%22)%0A error = json.loads(result
)%0A
|
88327c5e0a7ba7af086ad461e20395a33215b96c
|
Update api.py
|
chris_backend/core/api.py
|
chris_backend/core/api.py
|
from django.conf.urls import url, include
from rest_framework.urlpatterns import format_suffix_patterns
from feeds import views
# API v1 endpoints
urlpatterns = format_suffix_patterns([
url(r'^v1/$', views.api_root),
url(r'^v1/feeds/$', views.FeedList.as_view(), name='feed-list'),
url(r'^v1/feeds/(?P<pk>[0-9]+)/$',
views.FeedDetail.as_view(), name='feed-detail'),
url(r'^v1/users/$', views.UserList.as_view(), name='user-list'),
url(r'^v1/users/(?P<pk>[0-9]+)/$', views.UserDetail.as_view(), name='user-detail')
])
# Login and logout views for Djangos' browsable API
urlpatterns += [
url(r'^v1/auth/', include('rest_framework.urls', namespace='rest_framework')),
]
print('lolo')
|
Python
| 0.000001
|
@@ -703,18 +703,4 @@
%0A%5D%0A%0A
-print('lolo')%0A
|
51fb4cc79ecba178b811a1a0bb403c91317a116e
|
allow kwargs in ASE atoms converter
|
pymatgen/io/ase.py
|
pymatgen/io/ase.py
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals, absolute_import
"""
This module provides conversion between the Atomic Simulation Environment
Atoms object and pymatgen Structure objects.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 8, 2012"
from pymatgen.core.structure import Structure
try:
from ase import Atoms
ase_loaded = True
except ImportError:
ase_loaded = False
class AseAtomsAdaptor(object):
"""
Adaptor serves as a bridge between ASE Atoms and pymatgen structure.
"""
@staticmethod
def get_atoms(structure):
"""
Returns ASE Atoms object from pymatgen structure.
Args:
structure: pymatgen.core.structure.Structure
Returns:
ASE Atoms object
"""
if not structure.is_ordered:
raise ValueError("ASE Atoms only supports ordered structures")
symbols = [str(site.specie.symbol) for site in structure]
positions = [site.coords for site in structure]
cell = structure.lattice.matrix
return Atoms(symbols=symbols, positions=positions, pbc=True, cell=cell)
@staticmethod
def get_structure(atoms, cls=None):
"""
Returns pymatgen structure from ASE Atoms.
Args:
atoms: ASE Atoms object
cls: The Structure class to instantiate (defaults to pymatgen structure)
Returns:
Equivalent pymatgen.core.structure.Structure
"""
symbols = atoms.get_chemical_symbols()
positions = atoms.get_positions()
lattice = atoms.get_cell()
cls = Structure if cls is None else cls
return cls(lattice, symbols, positions,
coords_are_cartesian=True)
|
Python
| 0
|
@@ -808,16 +808,26 @@
tructure
+, **kwargs
):%0A
@@ -962,16 +962,96 @@
tructure
+%0A **kwargs: other keyword args to pass into the ASE Atoms constructor
%0A%0A
@@ -1448,18 +1448,49 @@
rue,
- cell=cell
+%0A cell=cell, **kwargs
)%0A%0A
|
5b3e77ba874ff36bb5f06e7db56a620c9f2fef62
|
Make missing personalisation error consistent
|
app/notifications/process_notifications.py
|
app/notifications/process_notifications.py
|
import uuid
from datetime import datetime
from flask import current_app
from notifications_utils.clients import redis
from notifications_utils.recipients import (
get_international_phone_info,
validate_and_format_phone_number,
format_email_address
)
from app import redis_store
from app.celery import provider_tasks
from app.config import QueueNames
from app.models import SMS_TYPE, Notification, KEY_TYPE_TEST, EMAIL_TYPE, NOTIFICATION_CREATED, ScheduledNotification
from app.dao.notifications_dao import (dao_create_notification,
dao_delete_notifications_and_history_by_id,
dao_created_scheduled_notification,
dao_create_notification_email_reply_to_mapping)
from app.v2.errors import BadRequestError
from app.utils import get_template_instance, cache_key_for_service_template_counter, convert_bst_to_utc
def create_content_for_notification(template, personalisation):
template_object = get_template_instance(template.__dict__, personalisation)
check_placeholders(template_object)
return template_object
def check_placeholders(template_object):
if template_object.missing_data:
message = 'Template missing personalisation: {}'.format(", ".join(template_object.missing_data))
raise BadRequestError(fields=[{'template': message}], message=message)
def persist_notification(
*,
template_id,
template_version,
recipient,
service,
personalisation,
notification_type,
api_key_id,
key_type,
created_at=None,
job_id=None,
job_row_number=None,
reference=None,
client_reference=None,
notification_id=None,
simulated=False,
created_by_id=None,
status=NOTIFICATION_CREATED
):
notification_created_at = created_at or datetime.utcnow()
if not notification_id:
notification_id = uuid.uuid4()
notification = Notification(
id=notification_id,
template_id=template_id,
template_version=template_version,
to=recipient,
service_id=service.id,
service=service,
personalisation=personalisation,
notification_type=notification_type,
api_key_id=api_key_id,
key_type=key_type,
created_at=notification_created_at,
job_id=job_id,
job_row_number=job_row_number,
client_reference=client_reference,
reference=reference,
created_by_id=created_by_id,
status=status
)
if notification_type == SMS_TYPE:
formatted_recipient = validate_and_format_phone_number(recipient, international=True)
recipient_info = get_international_phone_info(formatted_recipient)
notification.normalised_to = formatted_recipient
notification.international = recipient_info.international
notification.phone_prefix = recipient_info.country_prefix
notification.rate_multiplier = recipient_info.billable_units
elif notification_type == EMAIL_TYPE:
notification.normalised_to = format_email_address(notification.to)
# if simulated create a Notification model to return but do not persist the Notification to the dB
if not simulated:
dao_create_notification(notification)
if key_type != KEY_TYPE_TEST:
if redis_store.get(redis.daily_limit_cache_key(service.id)):
redis_store.incr(redis.daily_limit_cache_key(service.id))
if redis_store.get_all_from_hash(cache_key_for_service_template_counter(service.id)):
redis_store.increment_hash_value(cache_key_for_service_template_counter(service.id), template_id)
current_app.logger.info(
"{} {} created at {}".format(notification_type, notification_id, notification_created_at)
)
return notification
def send_notification_to_queue(notification, research_mode, queue=None):
if research_mode or notification.key_type == KEY_TYPE_TEST:
queue = QueueNames.RESEARCH_MODE
if notification.notification_type == SMS_TYPE:
if not queue:
queue = QueueNames.SEND_SMS
deliver_task = provider_tasks.deliver_sms
if notification.notification_type == EMAIL_TYPE:
if not queue:
queue = QueueNames.SEND_EMAIL
deliver_task = provider_tasks.deliver_email
try:
deliver_task.apply_async([str(notification.id)], queue=queue)
except Exception:
dao_delete_notifications_and_history_by_id(notification.id)
raise
current_app.logger.info(
"{} {} sent to the {} queue for delivery".format(notification.notification_type,
notification.id,
queue))
def simulated_recipient(to_address, notification_type):
if notification_type == SMS_TYPE:
formatted_simulated_numbers = [
validate_and_format_phone_number(number) for number in current_app.config['SIMULATED_SMS_NUMBERS']
]
return to_address in formatted_simulated_numbers
else:
return to_address in current_app.config['SIMULATED_EMAIL_ADDRESSES']
def persist_scheduled_notification(notification_id, scheduled_for):
scheduled_datetime = convert_bst_to_utc(datetime.strptime(scheduled_for, "%Y-%m-%d %H:%M"))
scheduled_notification = ScheduledNotification(notification_id=notification_id,
scheduled_for=scheduled_datetime)
dao_created_scheduled_notification(scheduled_notification)
def persist_email_reply_to_id_for_notification(notification_id, email_reply_to_id):
dao_create_notification_email_reply_to_mapping(notification_id, email_reply_to_id)
|
Python
| 0.999999
|
@@ -1248,18 +1248,9 @@
= '
-Template m
+M
issi
|
8c827f95c69d187ecca5a38a39b23e839f31d7b7
|
Fix get_value to get the value with the instance custom getters
|
prometheus/collectors.py
|
prometheus/collectors.py
|
import collections
import json
from multiprocessing import Lock
import quantile
from metricdict import MetricDict
# Used so only one thread can access the values at the same time
mutex = Lock()
# Used to return the value ordered (not necessary byt for consistency useful)
decoder = json.JSONDecoder(object_pairs_hook=collections.OrderedDict)
RESTRICTED_LABELS_NAMES = ('job',)
RESTRICTED_LABELS_PREFIXES = ('__',)
class Collector(object):
"""Collector is the base class for all the collectors/metrics"""
REPR_STR = "untyped"
def __init__(self, name, help_text, const_labels=None):
self.name = name
self.help_text = help_text
self.const_labels = const_labels
if const_labels:
self._label_names_correct(const_labels)
self.const_labels = const_labels
# This is a map that contains all the metrics
# This variable should be syncronized
self.values = MetricDict()
def set_value(self, labels, value):
""" Sets a value in the container"""
if labels:
self._label_names_correct(labels)
with mutex:
# TODO: Accept null labels
self.values[labels] = value
def get_value(self, labels):
""" Gets a value in the container, exception if isn't present"""
with mutex:
return self.values[labels]
def _label_names_correct(self, labels):
"""Raise exception (ValueError) if labels not correct"""
for k, v in labels.items():
# Check reserved labels
if k in RESTRICTED_LABELS_NAMES:
raise ValueError("Labels not correct")
# Check prefixes
if any(k.startswith(i) for i in RESTRICTED_LABELS_PREFIXES):
raise ValueError("Labels not correct")
return True
def get_all(self):
""" Returns a list populated by tuples of 2 elements, first one is
a dict with all the labels and the second elemnt is the value
of the metric itself
"""
with mutex:
result = []
# Check if is a single value dict (custom empty key)
for k, v in self.values.items():
if not k or k == MetricDict.EMPTY_KEY:
key = None
else:
key = decoder.decode(k)
result.append((key, v))
return result
class Counter(Collector):
""" Counter is a Metric that represents a single numerical value that only
ever goes up.
"""
REPR_STR = "counter"
def set(self, labels, value):
""" Set is used to set the Counter to an arbitrary value. """
self.set_value(labels, value)
def get(self, labels):
""" Get gets the counter of an arbitrary group of labels"""
return self.get_value(labels)
def inc(self, labels):
""" Inc increments the counter by 1."""
self.add(labels, 1)
def add(self, labels, value):
""" Add adds the given value to the counter. It panics if the value
is < 0.
"""
if value < 0:
raise ValueError("Counters can't decrease")
try:
current = self.get_value(labels)
except KeyError:
current = 0
self.set_value(labels, current + value)
class Gauge(Collector):
""" Gauge is a Metric that represents a single numerical value that can
arbitrarily go up and down.
"""
REPR_STR = "gauge"
def set(self, labels, value):
""" Set sets the Gauge to an arbitrary value."""
self.set_value(labels, value)
def get(self, labels):
""" Get gets the Gauge of an arbitrary group of labels"""
return self.get_value(labels)
def inc(self, labels):
""" Inc increments the Gauge by 1."""
self.add(labels, 1)
def dec(self, labels):
""" Dec decrements the Gauge by 1."""
self.add(labels, -1)
def add(self, labels, value):
""" Add adds the given value to the Gauge. (The value can be
negative, resulting in a decrease of the Gauge.)
"""
try:
current = self.get_value(labels)
except KeyError:
current = 0
self.set_value(labels, current + value)
def sub(self, labels, value):
""" Sub subtracts the given value from the Gauge. (The value can be
negative, resulting in an increase of the Gauge.)
"""
self.add(labels, -value)
class Summary(Collector):
""" A Summary captures individual observations from an event or sample
stream and summarizes them in a manner similar to traditional summary
statistics: 1. sum of observations, 2. observation count,
3. rank estimations.
"""
REPR_STR = "summary"
DEFAULT_INVARIANTS = [(0.50, 0.05), (0.90, 0.01), (0.99, 0.001)]
SUM_KEY = "sum"
COUNT_KEY = "count"
# Reimplement the setter and getter without mutex because we need to use
# it in a higher level (with the estimator object)
def get_value(self, labels):
return self.values[labels]
def set_value(self, labels, value):
if labels:
self._label_names_correct(labels)
self.values[labels] = value
def add(self, labels, value):
"""Add adds a single observation to the summary."""
if type(value) not in (float, int):
raise TypeError("Summary only works with digits (int, float)")
# We have already a lock for data but not for the estimator
with mutex:
try:
e = self.get_value(labels)
except KeyError:
# Initialize quantile estimator
e = quantile.Estimator(*self.__class__.DEFAULT_INVARIANTS)
self.set_value(labels, e)
e.observe(float(value))
def get(self, labels):
""" Get gets the data in the form of 0.5, 0.9 and 0.99 percentiles. Also
you get sum and count, all in a dict
"""
return_data = {}
# We have already a lock for data but not for the estimator
with mutex:
e = self.get_value(labels)
# Set invariants data (default to 0.50, 0.90 and 0.99)
for i in e._invariants:
q = i._quantile
return_data[q] = e.query(q)
# Set sum and count
return_data[self.__class__.SUM_KEY] = e._sum
return_data[self.__class__.COUNT_KEY] = e._observations
return return_data
|
Python
| 0
|
@@ -1368,32 +1368,124 @@
values%5Blabels%5D%0A%0A
+ def get(self, labels):%0A %22%22%22Handy alias%22%22%22%0A return self.get_value(labels)%0A%0A
def _label_n
@@ -2183,19 +2183,83 @@
-result = %5B%5D
+items = self.values.items()%0A%0A result = %5B%5D%0A for k, v in items:
%0A
@@ -2324,57 +2324,8 @@
ey)%0A
- for k, v in self.values.items():%0A
@@ -2383,28 +2383,24 @@
-
key = None%0A
@@ -2410,20 +2410,16 @@
-
else:%0A
@@ -2428,28 +2428,24 @@
-
-
key = decode
@@ -2464,28 +2464,24 @@
-
result.appen
@@ -2488,17 +2488,27 @@
d((key,
-v
+self.get(k)
))%0A%0A
|
8f17ee334a27917187d16b1416af505971909585
|
Enhance kernel PLS example
|
examples/kpls_example.py
|
examples/kpls_example.py
|
#!/usr/bin/python3
"""An example of the use of non-linear kernel PLS regression on the output of
a function z(x) = 4.26(exp (−x) − 4 exp (−2x) + 3 exp (−3x))
Reproduces figure 3 from "Overview and Recent Advances in Partial Least
Squares" Roman Rosipal and Nicole Krämer SLSFS 2005, LNCS 3940, pp. 34–51,
2006. """
# Copyright (c) 2015, James Humphry - see LICENSE file for details
import math
import random
import numpy as np
import matplotlib.pyplot as plt
from regressions import kernel_pls, kernels
def z(x):
return 4.26 * (np.exp(-x) - 4 * np.exp(-2.0*x) + 3 * np.exp(-3.0*x))
# Define the kernel
kern = kernels.make_gaussian_kernel(width=1.8)
# Create sample data
x_values = np.linspace(0.0, 3.5, 100)
z_pure = z(x_values)
z_pure -= z_pure.mean(0) # Ensure z_pure is centered
noise = np.random.normal(loc=0.0, scale=0.2, size=100)
z_noisy = z_pure + noise
z_noisy -= z_noisy.mean(0) # Ensure z_noisy is centered
# Perform Kernel PLS
kpls_1 = kernel_pls.Kernel_PLS(X=x_values,
Y=z_noisy,
g=1,
X_kernel=kern)
kpls_1_results = kpls_1.prediction(x_values)
kpls_4 = kernel_pls.Kernel_PLS(X=x_values,
Y=z_noisy,
g=4,
X_kernel=kern)
kpls_4_results = kpls_4.prediction(x_values)
kpls_8 = kernel_pls.Kernel_PLS(X=x_values,
Y=z_noisy,
g=8,
X_kernel=kern)
kpls_8_results = kpls_8.prediction(x_values)
# Plot the results of the above calculations
fig = plt.figure('An example of Kernel PLS regression')
plt.title('An example of Kernel PLS regression')
plt.plot(x_values, z_pure, 'k-', label='$z(.)$')
plt.plot(x_values, z_noisy, 'k+', label='$z(.)$ with noise')
plt.plot(x_values, kpls_1_results, 'k--', label='KPLS 1C')
plt.plot(x_values, kpls_4_results, 'k:', label='KPLS 4C')
plt.plot(x_values, kpls_8_results, 'k-.', label='KPLS 8C')
plt.legend(loc=4)
plt.show()
|
Python
| 0
|
@@ -304,17 +304,95 @@
51,%0A2006
-.
+ and figure 3 from %22Nonlinear Partial Least Squares: An Overview%22 Roman%0ARosipal
%22%22%22%0A%0A#
@@ -2142,8 +2142,999 @@
.show()%0A
+fig.clear()%0A%0A# Plot some of the extracted components%0A%0A# These figures plot the underlying function based on 100 (xi, z(xi)) pairs%0A# as a dotted line in the original problem space. The component extracted%0A# is a single vector in the 100-dimensional transformed feature space. Each%0A# dimension in feature space corresponds to a K(?, xi) kernel function. As%0A# the kernel in this case is the Gaussian kernel which is spacially%0A# localised, it is workable to map each K(?, xi) function to the%0A# x-cordinate xi for display in this manner. In the general case,%0A# meaningfully plotting the components in kernel space is likely to be%0A# difficult.%0A%0Afig = plt.figure('Components found in Kernel PLS regression')%0A%0Afig.set_tight_layout(True)%0A%0Afor i in range(0, 8):%0A plt.subplot(4, 2, (i+1))%0A plt.title('Kernel PLS component %7B%7D'.format((i+1)))%0A plt.plot(x_values, z_pure, 'k--')%0A plt.plot(x_values, kpls_8.P%5B:, i%5D, 'k-')%0A plt.gca().set_ybound(lower=-1.5, upper=1.0)%0A%0Aplt.show()%0Afig.clear()%0A
|
a775da72446d057af61503fed6bf85896a7a490d
|
Add missing quote
|
scripts/fixtures/radar_fixtures/cohorts.py
|
scripts/fixtures/radar_fixtures/cohorts.py
|
from radar.models.groups import Group, GROUP_TYPE
from radar.pages import PAGE
from radar_fixtures.utils import add
COHORTS = [
{
'code': 'BONEITIS',
'name': 'Bone-itis',
'short_name': 'Bone-itis',
'pages': [
PAGE.PRIMARY_DIAGNOSIS,
PAGE.DIAGNOSES,
],
},
{
'code': 'CIRCUSITIS',
'name': 'Circusitis',
'short_name': 'Circusitis',
'pages': [
PAGE.PRIMARY_DIAGNOSIS,
PAGE.DIAGNOSES,
],
},
{
'code': 'ADTKD,
'name': 'Autosomal Dominant Tubulointerstitial Kidney Disease (FUAN)',
'short_name': 'ADTKD (FUAN)',
'pages': [
PAGE.PRIMARY_DIAGNOSIS,
PAGE.DIAGNOSES,
PAGE.GENETICS,
PAGE.FAMILY_HISTORY,
PAGE.FUAN_CLINICAL_PICTURES,
PAGE.RESULTS,
PAGE.DIALYSIS,
PAGE.TRANSPLANTS,
]
},
{
'code': 'ADPKD',
'name': 'Autosomal Dominant Polycystic Kidney Disease',
'short_name': 'ADPKD',
'pages': [
PAGE.PRIMARY_DIAGNOSIS,
PAGE.DIAGNOSES,
PAGE.GENETICS,
PAGE.FAMILY_HISTORY,
PAGE.RENAL_IMAGING,
PAGE.LIVER_IMAGING,
PAGE.LIVER_DISEASES,
PAGE.RESULTS,
PAGE.TRANSPLANTS,
PAGE.LIVER_TRANSPLANTS,
]
},
{
'code': 'ARPKD',
'name': 'Autosomal Recessive Polycystic Kidney Disease',
'short_name': 'ARPKD',
'pages': [
PAGE.PRIMARY_DIAGNOSIS,
PAGE.DIAGNOSES,
PAGE.GENETICS,
PAGE.FAMILY_HISTORY,
PAGE.FETAL_ULTRASOUNDS,
PAGE.RENAL_IMAGING,
PAGE.LIVER_IMAGING,
PAGE.LIVER_DISEASES,
PAGE.RESULTS,
PAGE.NUTRITION,
PAGE.LIVER_TRANSPLANTS,
PAGE.NEPHRECTOMIES,
]
}
]
def create_cohorts():
for x in COHORTS:
group = Group()
group.type = GROUP_TYPE.COHORT
group.code = x['code']
group.name = x['name']
group.short_name = x['short_name']
group.pages = x['pages']
add(group)
|
Python
| 0.000283
|
@@ -554,16 +554,17 @@
: 'ADTKD
+'
,%0A
|
c97651e6ac93fcc23c9c263cd1a6200fffb04431
|
Version bump
|
pymisp/__init__.py
|
pymisp/__init__.py
|
__version__ = '2.4.48.1'
from .api import PyMISP, PyMISPError, NewEventError, NewAttributeError, MissingDependency, NoURL, NoKey
|
Python
| 0.000001
|
@@ -19,9 +19,9 @@
.48.
-1
+2
'%0A%0Af
|
aad71ec87196381e66e801f62d7b7566c279f16a
|
Make sure that this view is named appropriately
|
project_template/urls/defaults.py
|
project_template/urls/defaults.py
|
from django.conf.urls.defaults import patterns, include, url
from armstrong.core.arm_wells.views import QuerySetBackedWellView
from armstrong.core.arm_sections.views import SimpleSectionView, SectionFeed
from armstrong.apps.articles.models import Article
from armstrong.apps.articles.views import ArticleFeed
from django.views.generic.list_detail import object_detail
from django.views.generic import TemplateView
from django.conf import settings
# ADMIN_BASE is the base URL for your Armstrong admin. It is highly
# recommended that you change this to a different URL unless you enforce a
# strict password-strength policy for your users.
ADMIN_BASE = "admin"
# Comment the next two lines out to disable the admin:
from armstrong import hatband as admin
admin.autodiscover()
from .utils import get_url_for_model
urlpatterns = patterns('',
# Examples:
# url(r'^$', '{{ project_name }}.views.home', name='home'),
# url(r'^{{ project_name }}/', include('{{ project_name }}.foo.urls')),
# Comment the admin/doc line below to disable admin documentation:
url(r'^%s/doc/' % ADMIN_BASE, include('django.contrib.admindocs.urls')),
# Comment the next line to disable the admin:
url(r'^%s/' % ADMIN_BASE, include(admin.site.urls)),
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
# Load the Armstrong "success" page by default
url(r'^$', TemplateView.as_view(template_name="index.html")),
# ## Creating a standard "front page"
#
# Below is an example of an adhoc QuerySetBackedWellView. You should
# uncomment it if you used `armstrong load_demo_data` to create your
# initial data set.
#
# url(r'^$',
# QuerySetBackedWellView.as_view(well_title="Front Page",
# template_name="index.html", queryset=Article.published.all()),
# name="home"),
url(r'^section/(?P<full_slug>[-\w/]+)',
SimpleSectionView.as_view(template_name='section.html'),
name='section_view'),
url(r'^feed/section/(?P<full_slug>[-\w/]+)',
SectionFeed(section_view='section_view'),
name='section_feed'),
url(r'^feed/all',
ArticleFeed(title='Demo site articles',
link='/',
queryset=Article.objects.all()),
name='all_articles_feed'),
url(r'^article/(?P<slug>[-\w]+)/', object_detail, {
'queryset': Article.published.all().select_subclasses(),
'template_name': 'article.html',
'slug_field': 'slug',
},
name='article_detail'),
)
# Uncomment the following two lines if you want to expose our default API
# from .api import urlpatterns as api_urlpatterns
# urlpatterns += api_urlpatterns
|
Python
| 0.00001
|
@@ -1482,16 +1482,29 @@
x.html%22)
+, name=%22home%22
),%0A%0A
|
5907932dc3783d341338b9b8bc184fd49187977d
|
add exchange input
|
web/cgi-bin/bittrader/bitcoinaverager.py
|
web/cgi-bin/bittrader/bitcoinaverager.py
|
#!/usr/bin/python
# Copyright (c) 2014 Bitquant Research Laboratories (Asia) Ltd.
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from wsgiref.handlers import CGIHandler
from flask import Flask, Response, request
from werkzeug.utils import secure_filename
import subprocess
import sys
import shutil
import os
import json
import getpass
import login
import traceback
import fcntl
import time
import crypt
import pandas
import datetime
import time
import pytz
import dateutil
script_dir = os.path.dirname(os.path.realpath(__file__))
os.chdir(script_dir)
from BitcoinAverager import PriceCompositor
compositor = PriceCompositor()
app = Flask(__name__)
@app.route('/')
def index():
return 'index'
@app.route('/average-form')
def average_form():
return """
<form method=POST action="generate-data">
Start time (yyyy/mm/dd): <input name="year" size=4 value="2014">/
<input name="month" size=2 value="02">/<input name="day" size=2 value="01"> <input name="hour" size=2 value="00">:<input name="minute" size=2 value="00">:<input name="second" size=2 value="00"><br>
Time zone: <input name="tz" size=20 value="Europe/London"><br>
Time interval: <input name=interval_length value="3" size=3>
<select name="interval_type">
<option value="month">month(s)</option>
<option value="week">week(s)</option>
<option value="day">day(s)</option>
<option value="hour" selected>hour(s)</option>
<option value="minute">minute(s)</option>
<option value="second">second(s)</option>
</select><br>
Intervals: <input name=intervals value="20" size=3>
<p>
Include:
<input type="checkbox" name="time_table" value="True">Time/Epoch information
<input type="checkbox" name="currency_table" value="True">Breakdown by currency
<input type="checkbox" name="conversion_table" value="True">Currency conversion
<input type="checkbox" name="exchange_table" value="True">Exchange breakdown
<br>
Format: <select name="format">
<option value="text/html">HTML</option>
<option value="text/csv">CSV</option>
<option value="text/json">JSON</option>
</select>
<p>
<input type="submit" />
</form>
"""
@app.route('/generate-data', methods = ['POST'])
def generate_data():
import cStringIO
year = request.form['year']
month = request.form['month']
day = request.form['day']
hour = request.form['hour']
minute = request.form['minute']
second = request.form['second']
time_zone = request.form['tz']
interval_length = int(request.form['interval_length'])
interval_type = request.form['interval_type']
intervals = int(request.form['intervals'])
time_table = (request.form.get('time_table', '') == 'True')
currency_table = (request.form.get('currency_table', '') == 'True')
conversion_table = (request.form.get('conversion_table', '') == 'True')
exchange_table = (request.form.get('exchange_table', '') == 'True')
format = request.form.get('format', "text/html")
local_tz = pytz.timezone(time_zone)
start_date = local_tz.localize(datetime.datetime(int(year),
int(month),
int(day),
int(hour),
int(minute),
int(second)))
time_delta = None
if interval_type == "month":
time_delta = dateutil.relativedelta.relativedelta(months=interval_length)
elif interval_type == "week":
time_delta = dateutil.relativedelta.relativedelta(weeks=interval_length)
elif interval_type == "day":
time_delta = dateutil.relativedelta.relativedelta(days=interval_length)
elif interval_type == "hour":
time_delta = dateutil.relativedelta.relativedelta(hours=interval_length)
elif interval_type == "minute":
time_delta = dateutil.relativedelta.relativedelta(minutes=interval_length)
elif interval_type == "seconds":
time_delta = dateutil.relativedelta.relativedelta(seconds=interval_length)
else:
return "invalid interval_type"
table = compositor.generate(start_date,
time_delta,
intervals,
times=time_table,
currency=currency_table,
exchange=exchange_table,
rates=conversion_table)
output = cStringIO.StringIO()
if format == "text/html":
table.to_html(output)
elif format == "text/csv":
table.to_csv(output)
elif format == "text/json":
table.to_json(output)
else:
return "invalid format"
string = output.getvalue()
output.close()
return Response(string, mimetype=format)
if __name__ == '__main__' and len(sys.argv) == 1:
from wsgiref.handlers import CGIHandler
CGIHandler().run(app)
elif __name__ == '__main__' and sys.argv[1] == "refresh-scripts":
print refresh_scripts()
|
Python
| 0.000003
|
@@ -1605,39 +1605,9 @@
tor%0A
-compositor = PriceCompositor()
%0A
+
%0Aapp
@@ -2509,16 +2509,132 @@
size=3%3E
+%3Cbr%3E%0AExchanges: %3Cinput name=exchanges value=%22bitfinexUSD,bitstampUSD,itbitUSD,itbitEUR,krakenEUR,itbitSGD,anxhkHKD%22%3E
%0A%3Cp%3E%0AInc
@@ -3640,16 +3640,58 @@
rvals'%5D)
+%0A exchanges = request.form%5B'exchanges'%5D
%0A%0A ti
@@ -5219,16 +5219,70 @@
l_type%22%0A
+ compositor = PriceCompositor(exchanges.split(%22,%22))
%0A tab
|
dbeedb49c1c35a6ba6a1e6dfb287493e3786961f
|
Save the refresh token with all authentication methods
|
pynubank/nubank.py
|
pynubank/nubank.py
|
import json
import os
import uuid
from typing import Tuple
import requests
from qrcode import QRCode
from requests import Response
PAYMENT_EVENT_TYPES = (
'TransferOutEvent',
'TransferInEvent',
'TransferOutReversalEvent',
'BarcodePaymentEvent'
)
class NuException(Exception):
def __init__(self, status_code, response, url):
super().__init__()
self.url = url
self.status_code = status_code
self.response = response
class Nubank:
DISCOVERY_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/discovery'
DISCOVERY_APP_URL = 'https://prod-s0-webapp-proxy.nubank.com.br/api/app/discovery'
auth_url = None
feed_url = None
proxy_list_url = None
proxy_list_app_url = None
query_url = None
bills_url = None
refresh_token = None
def __init__(self):
self.headers = {
'Content-Type': 'application/json',
'X-Correlation-Id': 'WEB-APP.pewW9',
'User-Agent': 'pynubank Client - https://github.com/andreroggeri/pynubank',
}
self._update_proxy_urls()
self.auth_url = self.proxy_list_url['login']
@staticmethod
def _get_query(query_name):
root = os.path.abspath(os.path.dirname(__file__))
gql_file = query_name + '.gql'
path = os.path.join(root, 'queries', gql_file)
with open(path) as gql:
return gql.read()
def _update_proxy_urls(self):
request = requests.get(self.DISCOVERY_URL, headers=self.headers)
self.proxy_list_url = json.loads(request.content.decode('utf-8'))
request = requests.get(self.DISCOVERY_APP_URL, headers=self.headers)
self.proxy_list_app_url = json.loads(request.content.decode('utf-8'))
def _make_graphql_request(self, graphql_object):
body = {
'query': self._get_query(graphql_object)
}
response = requests.post(self.query_url, json=body, headers=self.headers)
return self._handle_response(response)
def _password_auth(self, cpf: str, password: str):
payload = {
"grant_type": "password",
"login": cpf,
"password": password,
"client_id": "other.conta",
"client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO"
}
response = requests.post(self.auth_url, json=payload, headers=self.headers)
data = self._handle_response(response)
self.refresh_token = data['refresh_token']
return data
def _handle_response(self, response: Response) -> dict:
if response.status_code != 200:
raise NuException(f'The request made failed with HTTP status code {response.status_code}',
response.status_code, response.json())
return response.json()
def get_qr_code(self) -> Tuple[str, QRCode]:
content = str(uuid.uuid4())
qr = QRCode()
qr.add_data(content)
return content, qr
def authenticate(self, cpf: str, password: str):
auth_data = self._password_auth(cpf, password)
self.authenticate_with_refresh_token(auth_data['refresh_token'])
def authenticate_with_qr_code(self, cpf: str, password, uuid: str):
auth_data = self._password_auth(cpf, password)
self.headers['Authorization'] = f'Bearer {auth_data["access_token"]}'
payload = {
'qr_code_id': uuid,
'type': 'login-webapp'
}
response = requests.post(self.proxy_list_app_url['lift'], json=payload, headers=self.headers)
auth_data = self._handle_response(response)
self.headers['Authorization'] = f'Bearer {auth_data["access_token"]}'
self.feed_url = auth_data['_links']['events']['href']
self.query_url = auth_data['_links']['ghostflame']['href']
self.bills_url = auth_data['_links']['bills_summary']['href']
def authenticate_with_refresh_token(self, token: str):
body = {
"grant_type": "refresh_token",
"refresh_token": token,
"client_id": "other.conta",
"client_secret": "yQPeLzoHuJzlMMSAjC-LgNUJdUecx8XO"
}
response = requests.post(self.auth_url, json=body, headers=self.headers)
auth_data = self._handle_response(response)
self.headers['Authorization'] = f'Bearer {auth_data["access_token"]}'
self.feed_url = auth_data['_links']['events']['href']
self.query_url = auth_data['_links']['ghostflame']['href']
self.bills_url = auth_data['_links']['bills_summary']['href']
def get_card_feed(self):
request = requests.get(self.feed_url, headers=self.headers)
return json.loads(request.content.decode('utf-8'))
def get_card_statements(self):
feed = self.get_card_feed()
return list(filter(lambda x: x['category'] == 'transaction', feed['events']))
def get_bills(self):
request = requests.get(self.bills_url, headers=self.headers)
return json.loads(request.content.decode('utf-8'))['bills']
def get_bill_details(self, bill):
request = requests.get(bill['_links']['self']['href'], headers=self.headers)
return json.loads(request.content.decode('utf-8'))
def get_account_feed(self):
data = self._make_graphql_request('account_feed')
return data['data']['viewer']['savingsAccount']['feed']
def get_account_statements(self):
feed = self.get_account_feed()
return list(filter(lambda x: x['__typename'] in PAYMENT_EVENT_TYPES, feed))
def get_account_balance(self):
data = self._make_graphql_request('account_balance')
return data['data']['viewer']['savingsAccount']['currentSavingsBalance']['netAmount']
|
Python
| 0
|
@@ -3600,32 +3600,87 @@
ponse(response)%0A
+ self.refresh_token = auth_data%5B'refresh_token'%5D
%0A self.he
@@ -4342,24 +4342,79 @@
e(response)%0A
+ self.refresh_token = auth_data%5B'refresh_token'%5D
%0A sel
|
6db9a65c7b734c7c421075cbae11b5b1df35980e
|
Remove RingBuffer TODO from midi_monitor example
|
examples/midi_monitor.py
|
examples/midi_monitor.py
|
#!/usr/bin/env python3
"""JACK client that prints all received MIDI events."""
import jack
import binascii
client = jack.Client("MIDI-Monitor")
port = client.midi_inports.register("input")
@client.set_process_callback
def process(frames):
for offset, data in port.incoming_midi_events():
# TODO: use ringbuffer
print("{0}: 0x{1}".format(client.last_frame_time + offset,
binascii.hexlify(data).decode()))
return jack.CALL_AGAIN
with client:
print("#" * 80)
print("press Return to quit")
print("#" * 80)
input()
|
Python
| 0
|
@@ -294,39 +294,8 @@
():%0A
- # TODO: use ringbuffer%0A
|
e47b7e5952d4001459aee5ba570a7cc6d4c10d43
|
Add import of the InvalidDirectoryValueError to the directory package's test file
|
tests/unit/directory/test_directory.py
|
tests/unit/directory/test_directory.py
|
"""Contains the unit tests for the inner directory package"""
import unittest
import os
from classyfd import Directory
class TestDirectory(unittest.TestCase):
def setUp(self):
self.fake_path = os.path.abspath("hello-world-dir")
return
def test_create_directory_object(self):
d = Directory(self.fake_path)
self.assertTrue(d)
return
if __name__ == "__main__":
unittest.main()
|
Python
| 0
|
@@ -113,16 +113,44 @@
irectory
+, InvalidDirectoryValueError
%0A%0Aclass
@@ -408,16 +408,24 @@
return%0A
+ %0A %0A
%0A%0Aif __n
|
f1ceb45a0b332db80c2a963195e81f4dc5a822dd
|
Remove before_build and after_build hook
|
pypaas/checkout.py
|
pypaas/checkout.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import copy
import datetime
import os
import os.path
import shutil
import subprocess
from configparser import ConfigParser
from . import options
class Checkout(object):
def __init__(self, branch, commit, name):
self.branch, self.commit, self.name = branch, commit, name
@property
def path(self):
return os.path.join(
options.BASEPATH, 'checkouts',
self.branch.repo.name, self.branch.name,
'{}-{}'.format(self.name, self.commit[:11])
)
@classmethod
def create(cls, branch, commit):
name = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
self = cls(branch, commit, name)
subprocess.check_call(
['git', 'clone', '-q', self.branch.repo.path, self.path],
env={}
)
subprocess.check_call(
['git', 'config', 'advice.detachedHead', 'false'],
env={}, cwd=self.path
)
subprocess.check_call(
['git', 'checkout', self.commit],
env={}, cwd=self.path
)
subprocess.check_call(
['git', 'submodule', 'update', '--init', '--recursive'],
env={}, cwd=self.path
)
to_delete = []
for root, dirs, files in os.walk(self.path):
for d in dirs:
if d == '.git':
to_delete.append(os.path.join(root, d))
for d in to_delete:
shutil.rmtree(d)
return self
@property
def cmd_env(self):
env = copy.deepcopy(self.branch.config.get('env', dict()))
env.update(os.environ)
return env
@classmethod
def all_for_branch(cls, branch):
try:
files = os.listdir(os.path.join(
options.BASEPATH, 'checkouts', branch.repo.name, branch.name
))
except FileNotFoundError:
return
for basename in files:
f = os.path.join(
options.BASEPATH, 'checkouts',
branch.repo.name, branch.name, basename
)
if not os.path.isdir(f):
continue
name, commit = basename.split('-')
yield cls(branch, commit, name)
def run_hook_cmd(self, name, default=None):
hook = self.branch.config.get('hooks', {}).get(name, default)
if hook is None:
return
if not isinstance(hook, list):
hook = [hook]
for c in hook:
subprocess.check_call(c, shell=True, cwd=self.path,
env=self.cmd_env)
@property
def custom_cmds(self):
try:
return self.branch.config['custom_cmds']
except KeyError:
return dict()
def run_custom_cmd(self, name):
subprocess.check_call(
self.custom_cmds[name],
shell=True, cwd=self.path,
env=self.cmd_env
)
def build(self):
self.run_hook_cmd('before_build')
self.run_hook_cmd(
name='build',
default='if [ -f ./.build.sh ]; then ./.build.sh; fi'
)
self.run_hook_cmd('after_build')
def remove(self):
shutil.rmtree(self.path)
|
Python
| 0
|
@@ -2996,50 +2996,8 @@
f):%0A
- self.run_hook_cmd('before_build')%0A
@@ -3124,49 +3124,8 @@
)
-%0A self.run_hook_cmd('after_build')
%0A%0A
|
6037630389ec902098b7a0fb8db1d89e35fbff60
|
make mail notification helper function more generic
|
src/bda/plone/orders/mailnotify.py
|
src/bda/plone/orders/mailnotify.py
|
import smtplib
from email.MIMEText import MIMEText
from email.Utils import formatdate
from email.Header import Header
from zope.i18n import translate
from zope.i18nmessageid import MessageFactory
from souper.soup import get_soup
from repoze.catalog.query import Any
from Products.CMFCore.utils import getToolByName
from bda.plone.cart import get_catalog_brain
from .common import (
DT_FORMAT,
get_order,
)
from .mailtemplates import get_templates
from Products.CMFPlone.utils import safe_unicode
_ = MessageFactory('bda.plone.orders')
def status_message(context, msg):
putils = getToolByName(context, 'plone_utils')
putils.addPortalMessage(msg)
def create_mail_listing(context, attrs):
soup = get_soup('bda_plone_orders_bookings', context)
bookings = soup.query((Any('uid', attrs['booking_uids'])))
lines = []
for booking in bookings:
buyable = get_catalog_brain(context, booking.attrs['buyable_uid'])
title = buyable.Title
comment = booking.attrs['buyable_comment']
if comment:
title = '%s (%s)' % (title, comment)
line = ' %s %s' % (booking.attrs['buyable_count'], title)
lines.append(line)
return '\n'.join(lines)
def create_order_total(context, attrs):
soup = get_soup('bda_plone_orders_bookings', context)
bookings = soup.query((Any('uid', attrs['booking_uids'])))
ret = 0.0
for booking in bookings:
count = float(booking.attrs['buyable_count'])
net = booking.attrs.get('net', 0.0) * count
ret += net
ret += net * booking.attrs.get('vat', 0.0) / 100
return "%.2f" %(ret + float(attrs['shipping']))
def create_mail_body(context, attrs):
templates = get_templates(context)
arguments = dict()
arguments['date'] = attrs['created'].strftime(DT_FORMAT)
arguments['ordernumber'] = attrs['ordernumber']
arguments['personal_data.firstname'] = attrs['personal_data.firstname']
arguments['personal_data.lastname'] = attrs['personal_data.lastname']
arguments['personal_data.company'] = attrs['personal_data.company']
arguments['personal_data.phone'] = attrs['personal_data.phone']
arguments['personal_data.email'] = attrs['personal_data.email']
arguments['billing_address.street'] = attrs['billing_address.street']
arguments['billing_address.zip'] = attrs['billing_address.zip']
arguments['billing_address.city'] = attrs['billing_address.city']
arguments['billing_address.country'] = attrs['billing_address.country']
if attrs['delivery_address.alternative_delivery']:
delivery = dict()
delivery['delivery_address.firstname'] = attrs['delivery_address.firstname']
delivery['delivery_address.lastname'] = attrs['delivery_address.lastname']
delivery['delivery_address.company'] = attrs['delivery_address.company']
delivery['delivery_address.street'] = attrs['delivery_address.street']
delivery['delivery_address.zip'] = attrs['delivery_address.zip']
delivery['delivery_address.city'] = attrs['delivery_address.city']
delivery['delivery_address.country'] = attrs['delivery_address.country']
delivery_address_template = templates['delivery_address']
arguments['delivery_address'] = delivery_address_template % delivery
else:
arguments['delivery_address'] = ''
arguments['order_comment.comment'] = attrs['order_comment.comment']
arguments['item_listing'] = create_mail_listing(context, attrs)
arguments['order_total'] = create_order_total(context, attrs)
body_template = templates['body']
return body_template % arguments
def notify_order(event):
"""Send notification mail after checkout succeed.
"""
context = event.context
order = get_order(context, event.order_uid)
attrs = order.attrs
templates = get_templates(context)
subject = templates['subject'] % attrs['ordernumber']
message = create_mail_body(context, attrs)
customer_address = attrs['personal_data.email']
props = getToolByName(context, 'portal_properties')
shop_manager_address = props.site_properties.email_from_address
mail_notify = MailNotify(context)
for receiver in [customer_address, shop_manager_address]:
try:
mail_notify.send(subject, message, receiver)
except Exception, e:
msg = translate(
_('email_sending_failed',
'Failed to send Notification to ${receiver}',
mapping={'receiver': receiver}))
status_message(context, msg)
class MailNotify(object):
"""Mail notifyer.
"""
def __init__(self, context):
self.context = context
def send(self, subject, message, receiver):
sent_key = '_order_mail_already_sent_%s' % receiver
if self.context.REQUEST.get(sent_key):
return
purl = getToolByName(self.context, 'portal_url')
mailfrom = purl.getPortalObject().email_from_address
mailfrom_name = purl.getPortalObject().email_from_name
if mailfrom_name:
mailfrom = u"%s <%s>" % (safe_unicode(mailfrom_name), mailfrom)
mailhost = getToolByName(self.context, 'MailHost')
subject = subject.encode('utf-8')
subject = Header(subject, 'utf-8')
message = MIMEText(message, _subtype='plain')
message.set_charset('utf-8')
message.add_header('Date', formatdate(localtime=True))
message.add_header('From_', mailfrom)
message.add_header('From', mailfrom)
message.add_header('To', receiver)
message['Subject'] = subject
mailhost.send(messageText=message,
mto=receiver)
self.context.REQUEST[sent_key] = 1
|
Python
| 0.000001
|
@@ -1671,32 +1671,43 @@
reate_mail_body(
+templates,
context, attrs):
@@ -1710,47 +1710,8 @@
rs):
-%0A templates = get_templates(context)
%0A
@@ -3403,71 +3403,197 @@
-arguments%5B'item_listing'%5D = create_mail_listing(context, attrs)
+item_listing_callback = templates%5B'item_listing_callback'%5D%0A arguments%5B'item_listing'%5D = item_listing_callback(context, attrs)%0A order_total_callback = templates%5B'order_total_callback'%5D
%0A
@@ -3620,23 +3620,16 @@
tal'%5D =
-create_
order_to
@@ -3623,32 +3623,41 @@
'%5D = order_total
+_callback
(context, attrs)
@@ -3941,30 +3941,179 @@
s =
-get_templates(context)
+dict()%0A templates.update(get_templates(context))%0A templates%5B'item_listing_callback'%5D = create_mail_listing%0A templates%5B'order_total_callback'%5D = create_order_total
%0A
@@ -4163,24 +4163,24 @@
dernumber'%5D%0A
-
message
@@ -4198,16 +4198,27 @@
il_body(
+templates,
context,
|
3e996903031bd394bab9a343cb60f725cfe4de29
|
add error for common mistake
|
AFQ/api/participant.py
|
AFQ/api/participant.py
|
import nibabel as nib
import os.path as op
from time import time
import logging
from AFQ.definitions.mapping import SlrMap
from AFQ.api.utils import (
check_attribute, AFQclass_doc,
export_all_helper, valid_exports_string)
from AFQ.tasks.data import get_data_plan
from AFQ.tasks.mapping import get_mapping_plan
from AFQ.tasks.tractography import get_tractography_plan
from AFQ.tasks.segmentation import get_segmentation_plan
from AFQ.tasks.viz import get_viz_plan
from AFQ.utils.path import drop_extension
__all__ = ["ParticipantAFQ"]
class ParticipantAFQ(object):
f"""{AFQclass_doc}"""
def __init__(self,
dwi_data_file,
bval_file, bvec_file,
output_dir,
bids_info=None,
**kwargs):
"""
Initialize a ParticipantAFQ object from a BIDS dataset.
Parameters
----------
dwi_data_file : str
Path to DWI data file.
bval_file : str
Path to bval file.
bvec_file : str
Path to bvec file.
output_dir : str
Path to output directory.
bids_info : dict or None, optional
This is used by GroupAFQ to provide information about
the BIDS layout to each participant.
kwargs : additional optional parameters
You can set additional parameters for any step
of the process. See :ref:`usage/kwargs` for more details.
Examples
--------
api.ParticipantAFQ(
dwi_data_file, bval_file, bvec_file, output_dir,
csd_sh_order=4)
api.ParticipantAFQ(
dwi_data_file, bval_file, bvec_file, output_dir,
reg_template_spec="mni_t2", reg_subject_spec="b0")
Notes
-----
In tracking_params, parameters with the suffix mask which are also
an image from AFQ.definitions.image will be handled automatically by
the api.
It is recommended that you leave the bids_info parameter as None,
and instead pass in the paths to the files you want to use directly.
"""
if not isinstance(output_dir, str):
raise TypeError(
"output_dir must be a str")
if not isinstance(dwi_data_file, str):
raise TypeError(
"dwi_data_file must be a str")
if not isinstance(bval_file, str):
raise TypeError(
"bval_file must be a str")
if not isinstance(bvec_file, str):
raise TypeError(
"bvec_file must be a str")
if not op.exists(output_dir):
raise ValueError(
f"output_dir does not exist: {output_dir}")
self.logger = logging.getLogger('AFQ.api')
kwargs = dict(
dwi=dwi_data_file,
bval=bval_file,
bvec=bvec_file,
results_dir=output_dir,
dwi_affine=nib.load(dwi_data_file).affine,
bids_info=bids_info,
base_fname=op.join(
output_dir,
drop_extension(op.basename(dwi_data_file))),
**kwargs)
# construct pimms plans
if "mapping_definition" in kwargs and isinstance(
kwargs["mapping_definition"], SlrMap):
plans = { # if using SLR map, do tractography first
"data": get_data_plan(kwargs),
"tractography": get_tractography_plan(kwargs),
"mapping": get_mapping_plan(kwargs, use_sls=True),
"segmentation": get_segmentation_plan(kwargs),
"viz": get_viz_plan(kwargs)}
else:
plans = { # Otherwise, do mapping first
"data": get_data_plan(kwargs),
"mapping": get_mapping_plan(kwargs),
"tractography": get_tractography_plan(kwargs),
"segmentation": get_segmentation_plan(kwargs),
"viz": get_viz_plan(kwargs)}
# chain together a complete plan from individual plans
previous_data = {}
for name, plan in plans.items():
previous_data[f"{name}_imap"] = plan(
**kwargs,
**previous_data)
self.wf_dict =\
previous_data[f"{name}_imap"]
def export(self, attr_name="help"):
"""
Export a specific output. To print a list of available outputs,
call export without arguments.
Parameters
----------
attr_name : str
Name of the output to export. Default: "help"
Returns
-------
output : any
The specific output, or None if called without arguments.
"""
section = check_attribute(attr_name)
if section is None:
return self.wf_dict[attr_name]
return self.wf_dict[section][attr_name]
def export_all(self, viz=True, xforms=True,
indiv=True):
f""" Exports all the possible outputs
{valid_exports_string}
Parameters
----------
viz : bool
Whether to output visualizations. This includes tract profile
plots, a figure containing all bundles, and, if using the AFQ
segmentation algorithm, individual bundle figures.
Default: True
xforms : bool
Whether to output the reg_template image in subject space and,
depending on if it is possible based on the mapping used, to
output the b0 in template space.
Default: True
indiv : bool
Whether to output individual bundles in their own files, in
addition to the one file containing all bundles. If using
the AFQ segmentation algorithm, individual ROIs are also
output.
Default: True
"""
start_time = time()
seg_algo = self.export("segmentation_params").get("seg_algo", "AFQ")
export_all_helper(self, seg_algo, xforms, indiv, viz)
self.logger.info(
f"Time taken for export all: {time() - start_time}")
|
Python
| 0.000097
|
@@ -2732,16 +2732,205 @@
t_dir%7D%22)
+%0A if %22tractography_params%22 in kwargs:%0A raise ValueError((%0A %22unrecognized parameter tractography_params, %22%0A %22did you mean tracking_params ?%22))
%0A%0A
|
91eddb82671842cfd1dd7aa58dc42d7ffd1d1550
|
call passed functions in get_function
|
tensorforce/util/config_util.py
|
tensorforce/util/config_util.py
|
# Copyright 2016 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Utility functions concerning configurations
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six import callable
import importlib
def get_function(fn, param=None, default=None):
"""
Get function reference by full module path. Either returns the function reference or calls the function
if param is not None and returns the result.
:param fn: Callable object or String containing the full function path
:param param: None to return function name, kwargs dict to return executed function
:param default: Default reference to return if str is None or empty
:return: Function reference, or result from function call
"""
if not fn:
return default
if callable(fn):
return fn
module_name, function_name = fn.rsplit('.', 1)
module = importlib.import_module(module_name)
func = getattr(module, function_name)
if isinstance(param, dict):
return func(**param)
else:
return func
|
Python
| 0.000003
|
@@ -1470,18 +1470,32 @@
-return
+func =
fn%0A
+ else:%0A
@@ -1541,24 +1541,28 @@
'.', 1)%0A
+
+
module = imp
@@ -1595,16 +1595,20 @@
e_name)%0A
+
func
|
d6532c24675956c6dc093dd330be1b78d691994f
|
Build Test
|
rppy/rppy.py
|
rppy/rppy.py
|
# rppy - a geophysical library for Python
# Copyright (C) 2015 Sean Matthew Contenti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import numpy as np
|
Python
| 0.000001
|
@@ -844,10 +844,41 @@
y as np%0A
+import matplotlib.pyplot as plt
%0A%0A
|
7ab671fea7fda45be5994d85378bfb326eddd7fb
|
Fix invalid use of F() when creating user story
|
taiga/projects/occ/mixins.py
|
taiga/projects/occ/mixins.py
|
# Copyright (C) 2014 Andrey Antukh <niwi@niwi.be>
# Copyright (C) 2014 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014 David Barragán <bameda@dbarragan.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.utils.translation import ugettext_lazy as _
from taiga.base import exceptions as exc
class OCCResourceMixin(object):
"""
Rest Framework resource mixin for resources that need to have concurrent
accesses and editions controlled.
"""
def pre_save(self, obj):
current_version = obj.version
param_version = self.request.DATA.get('version', None)
if current_version != param_version:
raise exc.WrongArguments({"version": "The version doesn't match with the current one"})
obj.version = models.F('version') + 1
super().pre_save(obj)
class OCCModelMixin(models.Model):
"""
Generic model mixin that makes model compatible
with concurrency control system.
"""
version = models.IntegerField(null=False, blank=False, default=1, verbose_name=_("version"))
class Meta:
abstract = True
|
Python
| 0.000211
|
@@ -1381,16 +1381,39 @@
one%22%7D)%0A%0A
+ if obj.id:%0A
@@ -1450,16 +1450,17 @@
n') + 1%0A
+%0A
|
4b7b2727a35cfcb0117b0ba4571da9a0ea81824a
|
Remove old reimplementation of routes.
|
greenmine/base/routers.py
|
greenmine/base/routers.py
|
# -*- coding: utf-8 -*-
from rest_framework import routers
# Special router for actions.
actions_router = routers.Route(url=r'^{prefix}/{methodname}{trailing_slash}$',
mapping={'{httpmethod}': '{methodname}'},
name='{basename}-{methodnamehyphen}',
initkwargs={})
class DefaultRouter(routers.DefaultRouter):
routes = [
routers.DefaultRouter.routes[0],
actions_router,
routers.DefaultRouter.routes[2],
routers.DefaultRouter.routes[1]
]
__all__ = ["DefaultRouter"]
|
Python
| 0
|
@@ -58,517 +58,61 @@
rs%0A%0A
-# Special router for actions.%0Aactions_router = routers.Route(url=r'%5E%7Bprefix%7D/%7Bmethodname%7D%7Btrailing_slash%7D$',%0A mapping=%7B'%7Bhttpmethod%7D': '%7Bmethodname%7D'%7D,%0A name='%7Bbasename%7D-%7Bmethodnamehyphen%7D',%0A initkwargs=%7B%7D)%0A%0A%0Aclass DefaultRouter(routers.DefaultRouter):%0A routes = %5B%0A routers.DefaultRouter.routes%5B0%5D,%0A actions_router,%0A routers.DefaultRouter.routes%5B2%5D,%0A routers.DefaultRouter.routes%5B1%5D%0A %5D
+%0Aclass DefaultRouter(routers.DefaultRouter):%0A pass
%0A%0A%0A_
|
0e2c092ce3472bf26db7d3b836eb230cfb002656
|
fix method naming conflict
|
examples/sanic_peewee.py
|
examples/sanic_peewee.py
|
## You need the following additional packages for this example
# aiopg
# peewee_async
# peewee
## sanic imports
from sanic import Sanic
from sanic.response import json
## peewee_async related imports
import uvloop
import peewee
from peewee_async import Manager, PostgresqlDatabase
# we instantiate a custom loop so we can pass it to our db manager
loop = uvloop.new_event_loop()
database = PostgresqlDatabase(database='test',
host='127.0.0.1',
user='postgres',
password='mysecretpassword')
objects = Manager(database, loop=loop)
## from peewee_async docs:
# Also there’s no need to connect and re-connect before executing async queries
# with manager! It’s all automatic. But you can run Manager.connect() or
# Manager.close() when you need it.
# let's create a simple key value store:
class KeyValue(peewee.Model):
key = peewee.CharField(max_length=40, unique=True)
text = peewee.TextField(default='')
class Meta:
database = database
# create table synchronously
KeyValue.create_table(True)
# OPTIONAL: close synchronous connection
database.close()
# OPTIONAL: disable any future syncronous calls
objects.database.allow_sync = False # this will raise AssertionError on ANY sync call
app = Sanic('peewee_example')
@app.route('/post')
async def root(request):
await objects.create(KeyValue, key='my_first_async_db', text="I was inserted asynchronously!")
return json({'success': True})
@app.route('/get')
async def root(request):
all_objects = await objects.execute(KeyValue.select())
serialized_obj = []
for obj in all_objects:
serialized_obj.append({obj.key: obj.text})
return json({'objects': serialized_obj})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000, loop=loop)
|
Python
| 0.000042
|
@@ -1356,35 +1356,35 @@
ost')%0Aasync def
-roo
+pos
t(request):%0A
@@ -1548,11 +1548,10 @@
def
-roo
+ge
t(re
|
53e4a8a00d4b1c0bed0ae93bb48831b04f1fc12d
|
Exclude msaa on Mac bots Review URL: https://codereview.appspot.com/7055043
|
slave/skia_slave_scripts/run_gm.py
|
slave/skia_slave_scripts/run_gm.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Run the Skia GM executable. """
from utils import shell_utils
from build_step import BuildStep
import errno
import os
import shutil
import sys
JSON_SUMMARY_FILENAME = 'actual-results.json'
class RunGM(BuildStep):
def _PreGM(self,):
print 'Removing %s' % self._gm_actual_dir
try:
shutil.rmtree(self._gm_actual_dir)
except:
pass
print 'Creating %s' % self._gm_actual_dir
try:
os.makedirs(self._gm_actual_dir)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise e
def _RunModulo(self, cmd):
""" Run GM in multiple concurrent processes using the --modulo flag. """
subprocesses = []
retcodes = []
for idx in range(self._num_cores):
subprocesses.append(shell_utils.BashAsync(cmd + ['--modulo', str(idx),
str(self._num_cores)]))
for proc in subprocesses:
retcode = 0
try:
retcode = shell_utils.LogProcessToCompletion(proc)[0]
except:
retcode = 1
retcodes.append(retcode)
for retcode in retcodes:
if retcode != 0:
raise Exception('Command failed with code %d.' % retcode)
def _Run(self):
self._PreGM()
cmd = [self._PathToBinary('gm'),
'--writePath', self._gm_actual_dir,
'--writeJsonSummary', os.path.join(self._gm_actual_dir,
JSON_SUMMARY_FILENAME),
] + self._gm_args
self._RunModulo(cmd)
if '__main__' == __name__:
sys.exit(BuildStep.RunBuildStep(RunGM))
|
Python
| 0.000023
|
@@ -1670,16 +1670,170 @@
gm_args%0A
+ # msaa16 is flaky on Macs (driver bug?) so we skip the test for now%0A if sys.platform == 'darwin':%0A cmd.extend(%5B'--exclude-config', 'msaa16'%5D)%0A
self
|
ff68546c69b68c4f83eb843f3ecb5789358d2f32
|
enable category select plugin by default
|
searx/plugins/search_on_category_select.py
|
searx/plugins/search_on_category_select.py
|
'''
searx is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
searx is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with searx. If not, see < http://www.gnu.org/licenses/ >.
(C) 2015 by Adam Tauber, <asciimoo@gmail.com>
'''
from flask.ext.babel import gettext
name = 'Search on category select'
description = gettext('Perform search immediately if a category selected')
default_on = False
js_dependencies = ('js/search_on_category_select.js',)
|
Python
| 0
|
@@ -704,16 +704,24 @@
%0Aname =
+gettext(
'Search
@@ -739,16 +739,17 @@
select'
+)
%0Adescrip
@@ -813,16 +813,56 @@
selected
+. Disable to select multiple categories.
')%0Adefau
@@ -873,12 +873,11 @@
n =
-Fals
+Tru
e%0A%0Aj
|
cf983ed1832699a85cacb2d33d8dcb2735df01cf
|
fix build for FilteredModelIteratorBuilder
|
corehq/apps/dump_reload/sql/filters.py
|
corehq/apps/dump_reload/sql/filters.py
|
from abc import ABCMeta, abstractmethod
import six
from django.db.models import Q
from dimagi.utils.chunked import chunked
class DomainFilter(six.with_metaclass(ABCMeta)):
@abstractmethod
def get_filters(self, domain_name):
"""Return a list of filters. Each filter will be applied to a queryset independently
of the others."""
raise NotImplementedError()
class SimpleFilter(DomainFilter):
def __init__(self, filter_kwarg):
self.filter_kwarg = filter_kwarg
def get_filters(self, domain_name):
return [Q(**{self.filter_kwarg: domain_name})]
class UsernameFilter(DomainFilter):
def get_filters(self, domain_name):
"""
:return: A generator of filters each filtering for at most 500 users.
"""
from corehq.apps.users.dbaccessors.all_commcare_users import get_all_usernames_by_domain
usernames = get_all_usernames_by_domain(domain_name)
for chunk in chunked(usernames, 500):
filter = Q()
for username in chunk:
filter |= Q(username__iexact=username)
yield filter
class UserIDFilter(DomainFilter):
def __init__(self, user_id_field, include_web_users=True):
self.user_id_field = user_id_field
self.include_web_users = include_web_users
def get_filters(self, domain_name):
"""
:return: A generator of filters each filtering for at most 1000 users.
"""
from corehq.apps.users.dbaccessors.all_commcare_users import get_all_user_ids_by_domain
user_ids = get_all_user_ids_by_domain(domain_name, include_web_users=self.include_web_users)
for chunk in chunked(user_ids, 1000):
query_kwarg = '{}__in'.format(self.user_id_field)
yield Q(**{query_kwarg: chunk})
class UnfilteredModelIteratorBuilder(object):
def __init__(self, model_label):
self.model_label = model_label
def build(self, domain, model_class, db_alias):
objects = model_class._default_manager
queryset = objects.using(db_alias).order_by(model_class._meta.pk.name)
return [queryset.iterator()]
class FilteredModelIteratorBuilder(UnfilteredModelIteratorBuilder):
def __init__(self, model_label, filter):
super(FilteredModelIteratorBuilder, self).__init__(model_label)
self.filter = filter
def build(self, domain, model_class, db_alias):
queryset = super(FilteredModelIteratorBuilder, self).build(domain, model_class, db_alias)
filters = self.filter.get_filters(domain)
for filter in filters:
yield queryset.filter(filter).iterator()
class UniqueFilteredModelIteratorBuilder(FilteredModelIteratorBuilder):
def build(self, domain, model_class, db_alias):
def _unique(iterator):
seen = set()
for model in iterator:
if model.pk not in seen:
seen.add(model.pk)
yield model
iterators = super(UniqueFilteredModelIteratorBuilder, self).build(domain, model_class, db_alias)
for iterator in iterators:
yield _unique(iterator)
|
Python
| 0
|
@@ -1938,34 +1938,29 @@
def
-build(self, domain
+queryset(self
, model_
@@ -2032,26 +2032,22 @@
-queryset =
+return
objects
@@ -2094,24 +2094,133 @@
eta.pk.name)
+%0A%0A def build(self, domain, model_class, db_alias):%0A queryset = self.queryset(model_class, db_alias)
%0A ret
@@ -2538,63 +2538,21 @@
= s
-uper(FilteredModelIteratorBuilder, self).build(domain,
+elf.queryset(
mode
|
a55822e8a9e6b1433118d139358bf72efd073b13
|
Remove ambiguous funcs
|
bqx/abstract.py
|
bqx/abstract.py
|
class Comparable:
"""Abstract class of 'comparable' clauses.
Please mind that 'comparable' is not about Python, but is about SQL.
Inherit this to clarify that sub-classes can't be 'calculated' without explicit implementation.
See implementations in parts.py.
"""
def __init__(self):
pass
def __lt__(self, other):
# self < other
raise NotImplementedError
def __le__(self, other):
# self <= other
raise NotImplementedError
def __eq__(self, other):
# self == other
raise NotImplementedError
def __ne__(self, other):
# self != other
raise NotImplementedError
def __gt__(self, other):
# self > other
raise NotImplementedError
def __ge__(self, other):
# self >= other
raise NotImplementedError
def __add__(self, other):
# self + other
raise NotImplementedError
def __sub__(self, other):
# self - other
raise NotImplementedError
def __mul__(self, other):
# self * other
raise NotImplementedError
def __truediv__(self, other):
# self / other
raise NotImplementedError
def __mod__(self, other):
# self % other
raise NotImplementedError
def __and__(self, other):
# self & other
raise NotImplementedError
def __or__(self, other):
# self | other
raise NotImplementedError
class Alias:
"""Subclass of Alias will be an alias of something like column and table.
In SQL query like "column AS col", 'column' is real name and 'col' is alias name.
"""
def __init__(self, real_name):
self.real_name = real_name
self.alias_name = None
def __str__(self):
"""Sub-classes have to define how this class looks like."""
raise NotImplementedError
def AS(self, alias):
"""Set alias name, declared in AS claus.
Args:
alias (str): Alias name
Returns:
self
"""
self.alias_name = alias
return self
def as_claus(self, auto_alias=False):
"""Expand its real/alias name in AS claus form.
If the alias name is not defined, auto-generated AS claus form
or just real name will be returned. Behavior depends on auto_alias.
Keyword Args:
auto_alias (bool): Generate AS claus automatically or not
Returns:
str: Appropriate representation of alias object
"""
if self.alias_name:
return '%s AS %s' % (self.real_name, self.alias_name)
else:
if auto_alias and '.' in self.real_name:
self.alias_name = self.real_name.split('.')[-1]
return self.as_claus()
else:
return self.real_name
def alias_name(self):
return self.alias_name
def real_name(self):
return self.real_name
|
Python
| 0.999999
|
@@ -2854,117 +2854,4 @@
me%0A%0A
- def alias_name(self):%0A return self.alias_name%0A%0A def real_name(self):%0A return self.real_name%0A
|
e6a72c4987246e5c56863a7b98cdbe8be729a688
|
fix syntax errors
|
slider/templatetags/slider_tags.py
|
slider/templatetags/slider_tags.py
|
# -*- coding: utf-8 -*-
from django import template
from slider.models import SliderImage
import random
register = template.Library()
def get_random_item(l,max=None):
res= []
size = len(l)
indexs = range(0,size)
if max = None:
max = size
for i in range(0: max):
index = random.choice(indexs)
indexs.pop(index)
res += l[index]
return res
@register.assignment_tag
def get_slider_images(limit=False, randomize=True):
qs = SliderImage.objects.filter(is_visible=True)
if randomize is True and limit is True :
qs = get_random_item(qs,limit)
elif randomize is True:
qs = get_random_item(qs)
if limit is not False:
qs = qs[0:limit]
return qs
|
Python
| 0.000009
|
@@ -231,16 +231,17 @@
if max =
+=
None:%0A
@@ -279,17 +279,17 @@
range(0
-:
+,
max):%0A
|
3bd59973df4a14c575e24c73e0629524daae6cad
|
Handle in feed_tools when language isn't specified
|
podcasts/feed_tools.py
|
podcasts/feed_tools.py
|
from bs4 import BeautifulSoup
from dateutil import parser
import feedparser
from urllib.error import HTTPError
from urllib.request import urlopen
def get_podcast_data(feed_url):
try:
feed_request = urlopen(feed_url)
except HTTPError as e:
raise e
feed_xml = feed_request.read()
feed = feedparser.parse(feed_xml).feed
soup = BeautifulSoup(feed_xml)
try:
title = feed.title
except AttributeError:
raise AttributeError('xml')
response = {
'title': title,
'description': feed.subtitle,
'link': getattr(feed, 'link', ""),
'language': __get_language(feed),
'tags': __get_tags(feed),
'images': __get_images(soup, feed),
'categories': __get_categories(soup),
}
return response
def __get_language(feed):
return feed.language[:2]
def __get_categories(soup):
categories = {}
for each in soup.find_all('itunes:category'):
category = each['text']
subcategories = []
for each in each.find_all('itunes:category'):
subcategory = each['text']
subcategories.append(subcategory)
categories.update({category: subcategories})
return categories
def __get_tags(feed):
try:
return [tag.term for tag in feed.tags]
except AttributeError:
return []
def __get_images(soup, feed):
images = []
# Find an itunes:image inside the channel
itunes_image = soup.channel.find('itunes:image', recursive=False)
if itunes_image:
href = itunes_image.get('href')
if href and href not in images:
images.append(href)
# Find an image element that's a direct child to the channel element
image_tag = soup.channel.find('image', recurisve=False)
if image_tag:
url = getattr(getattr(image_tag, 'url', None), 'text', None)
if url:
images.append(url)
# Lastly add feedparser's image if it isn't already in the list
feedparser_image = getattr(getattr(feed, 'image', None), 'href', None)
if feedparser_image and feedparser_image not in images:
images.append(feedparser_image)
return images
def get_episode_data(feed_url, existing_episode_titles):
feed = feedparser.parse(feed_url)
episodes = []
for feed_episode in feed.entries:
try:
if feed_episode.title in existing_episode_titles:
# If the episode already is in the DB
continue
except AttributeError:
# The episode in the feed doesn't have a title
continue
episodes.append({
'title': feed_episode.title,
'link': __get_link(feed_episode),
'description': __get_description(feed_episode),
'published': __get_published(feed_episode),
'audio_file': __get_audio_file(feed_episode),
})
return episodes
def __get_link(episode):
return getattr(episode, 'link', '')
def __get_published(episode):
# parser.parse(): http://stackoverflow.com/a/18726020/595990
return parser.parse(episode.published)
def __get_audio_file(episode):
enclosures = getattr(episode, 'enclosures', '')
for enclosure in enclosures:
if enclosure.type[:5] == 'audio':
return enclosures[0].href
return ''
def __get_description(episode):
summary = getattr(episode, 'summary', None)
if summary:
return summary
content = getattr(episode, 'content', None)
if content:
return content[0].value
return ''
|
Python
| 0.000001
|
@@ -837,21 +837,31 @@
urn
+getattr(
feed
-.
+, '
language
%5B:2%5D
@@ -856,16 +856,24 @@
language
+', '??')
%5B:2%5D%0A%0A%0Ad
|
7ed873c44467ab09b5b168777f89f59f7e7b1ab7
|
fix blksize
|
pyscf/mp/dfgmp2.py
|
pyscf/mp/dfgmp2.py
|
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Xing Zhang <zhangxing.nju@gmail.com>
#
'''
density fitting GMP2, 3-center integrals incore.
'''
import numpy as np
from pyscf import __config__
from pyscf import lib
from pyscf.lib import logger
from pyscf.ao2mo import _ao2mo
from pyscf.mp import gmp2, dfmp2
from pyscf.mp.gmp2 import make_rdm1, make_rdm2
WITH_T2 = getattr(__config__, 'mp_dfgmp2_with_t2', True)
def kernel(mp, mo_energy=None, mo_coeff=None, eris=None, with_t2=WITH_T2,
verbose=logger.NOTE):
if mo_energy is not None or mo_coeff is not None:
# For backward compatibility. In pyscf-1.4 or earlier, mp.frozen is
# not supported when mo_energy or mo_coeff is given.
assert(mp.frozen == 0 or mp.frozen is None)
if eris is None: eris = mp.ao2mo(mo_coeff)
if mo_energy is None: mo_energy = eris.mo_energy
if mo_coeff is None: mo_coeff = eris.mo_coeff
nocc = mp.nocc
nvir = mp.nmo - nocc
naux = mp.with_df.get_naoaux()
eia = mo_energy[:nocc,None] - mo_energy[None,nocc:]
if with_t2:
t2 = np.empty((nocc,nocc,nvir,nvir), dtype=mo_coeff.dtype)
else:
t2 = None
orbspin = eris.orbspin
Lova = np.empty((naux, nocc*nvir))
if orbspin is None:
Lovb = np.empty((naux, nocc*nvir))
p1 = 0
for qova, qovb in mp.loop_ao2mo(mo_coeff, nocc, orbspin):
p0, p1 = p1, p1 + qova.shape[0]
Lova[p0:p1] = qova
if orbspin is None:
Lovb[p0:p1] = qovb
if orbspin is not None:
sym_forbid = (orbspin[:nocc,None] != orbspin[nocc:]).flatten()
Lova[:,sym_forbid] = 0
emp2 = 0
for i in range(nocc):
if orbspin is None:
buf = np.dot(Lova[:,i*nvir:(i+1)*nvir].T, Lova)
buf += np.dot(Lovb[:,i*nvir:(i+1)*nvir].T, Lovb)
buf += np.dot(Lova[:,i*nvir:(i+1)*nvir].T, Lovb)
buf += np.dot(Lovb[:,i*nvir:(i+1)*nvir].T, Lova)
else:
buf = np.dot(Lova[:,i*nvir:(i+1)*nvir].T, Lova)
gi = np.array(buf, copy=False).reshape(nvir,nocc,nvir)
gi = gi.transpose(1,0,2) - gi.transpose(1,2,0)
t2i = gi/lib.direct_sum('jb+a->jba', eia, eia[i])
emp2 += np.einsum('jab,jab', t2i, gi) * .25
if with_t2:
t2[i] = t2i
return emp2, t2
class DFGMP2(dfmp2.DFMP2):
def loop_ao2mo(self, mo_coeff, nocc, orbspin):
nao, nmo = mo_coeff.shape
if orbspin is None:
moa = np.asarray(mo_coeff[:nao//2], order='F')
mob = np.asarray(mo_coeff[nao//2:], order='F')
else:
moa = np.asarray(mo_coeff[:nao//2]+mo_coeff[nao//2:], order='F')
ijslice = (0, nocc, nocc, nmo)
Lova = Lovb = None
with_df = self.with_df
nvir = nmo - nocc
naux = with_df.get_naoaux()
mem_now = lib.current_memory()[0]
max_memory = max(2000, self.max_memory*.9-mem_now)
blksize = int(min(naux, max(with_df.blockdim,
(max_memory*1e6/8-nocc*nvir**2*2)/(2*nocc*nvir))))
if orbspin is None:
for eri1 in with_df.loop(blksize=blksize):
Lova = _ao2mo.nr_e2(eri1, moa, ijslice, aosym='s2', out=Lova)
Lovb = _ao2mo.nr_e2(eri1, mob, ijslice, aosym='s2', out=Lovb)
yield Lova, Lovb
else:
for eri1 in with_df.loop(blksize=blksize):
Lova = _ao2mo.nr_e2(eri1, moa, ijslice, aosym='s2', out=Lova)
yield Lova, None
def ao2mo(self, mo_coeff=None):
eris = gmp2._PhysicistsERIs()
# Initialize only the mo_coeff and
eris._common_init_(self, mo_coeff)
return eris
def make_rdm1(self, t2=None, ao_repr=False):
if t2 is None:
t2 = self.t2
assert t2 is not None
return make_rdm1(self, t2, ao_repr=ao_repr)
def make_rdm2(self, t2=None, ao_repr=False):
if t2 is None:
t2 = self.t2
assert t2 is not None
return make_rdm2(self, t2, ao_repr=ao_repr)
def init_amps(self, mo_energy=None, mo_coeff=None, eris=None, with_t2=WITH_T2):
return kernel(self, mo_energy, mo_coeff, eris, with_t2)
if __name__ == "__main__":
from pyscf import gto, scf, mp
mol = gto.Mole()
mol.atom = [
['Li', (0., 0., 0.)],
['H', (1., 0., 0.)]]
mol.basis = 'cc-pvdz'
mol.build()
mf = scf.GHF(mol).run()
mymp = DFGMP2(mf)
mymp.kernel()
mf = scf.RHF(mol).run()
mf = mf.to_ghf()
mymp = DFGMP2(mf)
mymp.kernel()
mymp = mp.GMP2(mf).density_fit()
mymp.kernel()
mf = scf.RHF(mol).density_fit().run()
mymp = mp.GMP2(mf)
mymp.kernel()
|
Python
| 0.000925
|
@@ -3519,16 +3519,98 @@
em_now)%0A
+ if orbspin is None:%0A fac = 2%0A else:%0A fac = 1%0A
@@ -3726,17 +3726,19 @@
**2*2)/(
-2
+fac
*nocc*nv
|
73cd64313ae1238c592af464533ee80389df3b0e
|
save and predict on best Lv model
|
sfddd/sgd.py
|
sfddd/sgd.py
|
import cPickle
import gzip
import logging
import os
import time
import lasagne
import numpy as np
import theano
import theano.tensor as T
from tqdm import tqdm
from sfddd import models
from .preproc import SIZE_X, SIZE_Y
from .util import gpu_free_mem
logger = logging.getLogger(__name__)
DEFAULT_BATCHSIZE = 32
LEARNING_RATE = 0.0001
def load_img_batch(fnames, cache_folder='cache/train/'):
ext = '.pkl.gzip'
X = []
for fn in fnames:
with gzip.open(os.path.join(cache_folder, fn + ext), 'rb') as fi:
img = cPickle.load(fi)
X.append(img)
X = np.array(X).astype('float32')
X = X.reshape((-1, 3, SIZE_X, SIZE_Y))
return X
def minibatch_iterator(inputs, targets, batchsize, cache_folder='cache/train/'):
assert len(inputs) == len(targets)
indicies = np.arange(len(inputs))
np.random.shuffle(indicies)
for start_idx in tqdm(range(0, len(inputs) - batchsize + 1, batchsize)):
excerpt = indicies[start_idx:start_idx + batchsize]
X = load_img_batch(inputs[excerpt], cache_folder)
yield X, targets[excerpt]
def testbatch_iterator(inputs, batchsize, cache_folder='cache/test/'):
for start_idx in tqdm(range(0, len(inputs) - batchsize + 1, batchsize)):
excerpt = slice(start_idx, start_idx + batchsize)
X = load_img_batch(inputs[excerpt], cache_folder)
yield X
def train(Xs, Ys, Xv, Yv, size_x=SIZE_X, size_y=SIZE_Y, epochs=10,
batchsize=DEFAULT_BATCHSIZE, cache_folder='cache/'):
logger.info('GPU Free Mem: %.3fGB' % gpu_free_mem('gb'))
cache_folder = os.path.join(cache_folder, 'train/')
input_var = T.tensor4('inputs')
target_var = T.ivector('targets')
logger.info("Compiling network functions...")
# net = models.test_cnn(size_x, size_y, input_var)
net = models.vgg16(input_var)
prediction = lasagne.layers.get_output(net)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
params = lasagne.layers.get_all_params(net, trainable=True)
updates = lasagne.updates.adam(loss, params, learning_rate=LEARNING_RATE)
test_prediction = lasagne.layers.get_output(net, deterministic=True)
test_loss = lasagne.objectives.\
categorical_crossentropy(test_prediction, target_var)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
dtype=theano.config.floatX)
train_fn = theano.function([input_var, target_var], loss, updates=updates)
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
predict_proba = theano.function([input_var], test_prediction)
logger.info("Training...")
logger.info('GPU Free Mem: %.3f' % gpu_free_mem('gb'))
for epoch in range(epochs):
start_time = time.time()
train_err, train_batches = 0, 0
for batch in minibatch_iterator(Xs, Ys, batchsize, cache_folder):
inputs, targets = batch
train_err += train_fn(inputs, targets)
train_batches += 1
val_err, val_acc, val_batches = 0, 0, 0
for batch in minibatch_iterator(Xv, Yv, batchsize, cache_folder):
inputs, targets = batch
err, acc = val_fn(inputs, targets)
val_err += err
val_acc += acc
val_batches += 1
train_loss = train_err / train_batches
val_loss = val_err / val_batches
val_acc = val_acc / val_batches * 100
end_time = time.time() - start_time
logger.info("epoch[%d] -- Ls: %.3f | Lv: %.3f | ACCv: %.3f | Ts: %.3f"
% (epoch, train_loss, val_loss, val_acc, end_time))
return predict_proba
def predict(Xt, pred_fn, batchsize=2, cache_folder='cache/'):
cache_folder = os.path.join(cache_folder, 'test/')
logger.info('Predicting on test set...')
pred = []
for batch in testbatch_iterator(Xt, batchsize, cache_folder):
pred.extend(pred_fn(batch))
pred = np.array(pred)
logger.info('pred shape: (%d, %d)' % pred.shape)
return pred
|
Python
| 0
|
@@ -1439,18 +1439,17 @@
epochs=
-10
+2
,%0A
@@ -2789,16 +2789,61 @@
('gb'))%0A
+%0A best_val_loss, best_epoch = None, None%0A%0A
for
@@ -3612,149 +3612,589 @@
-logger.info(%22epoch%5B%25d%5D -- Ls: %25.3f %7C Lv: %25.3f %7C ACCv: %25.3f %7C Ts: %25.3f%22%0A %25 (epoch, train_loss, val_loss, val_acc, end_time)
+if not best_val_loss or val_loss %3C best_val_loss:%0A best_val_loss = val_loss%0A best_epoch = epoch%0A np.savez('out/model.npz', *lasagne.layers.get_all_param_values(net))%0A%0A logger.info(%22epoch%5B%25d%5D -- Ls: %25.3f %7C Lv: %25.3f %7C ACCv: %25.3f %7C Ts: %25.3f%22%0A %25 (epoch, train_loss, val_loss, val_acc, end_time))%0A%0A logger.info(%22loading best model: epoch%5B%25d%5D%22 %25 best_epoch)%0A with np.load('out/model.npz') as f:%0A param_values = %5Bf%5B'arr_%25d' %25 i%5D for i in range(len(f.files))%5D%0A lasagne.layers.set_all_param_values(net, param_values
)%0A%0A
|
7bf673eb581e037bb7f06c05b258995ff41002a2
|
add level.intermediate
|
src/c3nav/mapdata/models/level.py
|
src/c3nav/mapdata/models/level.py
|
from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from shapely.geometry import JOIN_STYLE
from shapely.ops import cascaded_union
from c3nav.mapdata.models.base import MapItem
class Level(MapItem):
"""
A map level (-1, 0, 1, 2…)
"""
name = models.SlugField(_('level name'), unique=True, max_length=50,
help_text=_('Usually just an integer (e.g. -1, 0, 1, 2)'))
altitude = models.DecimalField(_('level altitude'), null=True, max_digits=6, decimal_places=2)
intermediate = models.BooleanField(_('intermediate level'))
class Meta:
verbose_name = _('Level')
verbose_name_plural = _('Levels')
default_related_name = 'levels'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@cached_property
def geometries(self):
return LevelGeometries.by_level(self)
def tofilename(self):
return 'levels/%s.json' % self.name
def lower(self):
return Level.objects.filter(altitude__lt=self.altitude).order_by('altitude')
def higher(self):
return Level.objects.filter(altitude__gt=self.altitude).order_by('altitude')
@classmethod
def fromfile(cls, data, file_path):
kwargs = super().fromfile(data, file_path)
if 'altitude' not in data:
raise ValueError('missing altitude.')
if not isinstance(data['altitude'], (int, float)):
raise ValueError('altitude has to be int or float.')
kwargs['altitude'] = data['altitude']
return kwargs
def tofile(self):
result = super().tofile()
result['altitude'] = float(self.altitude)
return result
def __str__(self):
return self.name
class LevelGeometries():
by_level_name = {}
@classmethod
def by_level(cls, level):
return cls.by_level_name.setdefault(level.name, cls(level))
def __init__(self, level):
self.level = level
@cached_property
def raw_rooms(self):
return cascaded_union([room.geometry for room in self.level.rooms.all()])
@cached_property
def buildings(self):
result = cascaded_union([building.geometry for building in self.level.buildings.all()])
if self.level.intermediate:
result = cascaded_union([result, self.raw_rooms])
return result
@cached_property
def rooms(self):
return self.raw_rooms.intersection(self.buildings)
@cached_property
def outsides(self):
return cascaded_union([outside.geometry for outside in self.level.outsides.all()]).difference(self.buildings)
@cached_property
def mapped(self):
return cascaded_union([self.buildings, self.outsides])
@cached_property
def obstacles(self):
return cascaded_union([obstacle.geometry for obstacle in self.level.obstacles.all()]).intersection(self.mapped)
@cached_property
def raw_doors(self):
return cascaded_union([door.geometry for door in self.level.doors.all()]).intersection(self.mapped)
@cached_property
def elevatorlevels(self):
return cascaded_union([elevatorlevel.geometry for elevatorlevel in self.level.elevatorlevels.all()])
@cached_property
def areas(self):
return cascaded_union([self.rooms, self.outsides, self.elevatorlevels])
@cached_property
def holes(self):
return cascaded_union([holes.geometry for holes in self.level.holes.all()]).intersection(self.areas)
@cached_property
def accessible(self):
return self.areas.difference(self.holes).difference(self.obstacles)
@cached_property
def buildings_with_holes(self):
return self.buildings.difference(self.holes)
@cached_property
def areas_and_doors(self):
return cascaded_union([self.areas, self.raw_doors])
@cached_property
def walls(self):
return self.buildings.difference(self.areas_and_doors)
@cached_property
def walls_shadow(self):
return self.walls.buffer(0.2, join_style=JOIN_STYLE.mitre).intersection(self.buildings_with_holes)
@cached_property
def doors(self):
return self.raw_doors.difference(self.areas)
def get_levelconnectors(self, to_level=None):
queryset = self.level.levelconnectors.prefetch_related('levels')
if to_level is not None:
queryset = queryset.filter(levels=to_level)
return cascaded_union([levelconnector.geometry for levelconnector in queryset])
@cached_property
def levelconnectors(self):
return self.get_levelconnectors()
def intermediate_shadows(self, to_level=None):
return self.buildings.difference(self.get_levelconnectors(to_level))
|
Python
| 0.000028
|
@@ -1615,16 +1615,285 @@
tude'%5D%0A%0A
+ if 'intermediate' not in data:%0A raise ValueError('missing intermediate.')%0A%0A if not isinstance(data%5B'intermediate'%5D, bool):%0A raise ValueError('intermediate has to be boolean.')%0A%0A kwargs%5B'intermediate'%5D = data%5B'intermediate'%5D%0A%0A
@@ -2013,16 +2013,67 @@
titude)%0A
+ result%5B'intermediate'%5D = self.intermediate%0A
|
d18b37b8329e156b8573edc6c1bc8a6e4eb6f23a
|
Undo run_tests.py modification in the hopes of making this merge
|
run_tests.py
|
run_tests.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This is our basic test running framework based on Twisted's Trial.
Usage Examples:
# to run all the tests
python run_tests.py
# to run a specific test suite imported here
python run_tests.py NodeConnectionTestCase
# to run a specific test imported here
python run_tests.py NodeConnectionTestCase.test_reboot
# to run some test suites elsewhere
python run_tests.py nova.tests.node_unittest
python run_tests.py nova.tests.node_unittest.NodeConnectionTestCase
Due to our use of multiprocessing it we frequently get some ignorable
'Interrupted system call' exceptions after test completion.
"""
import __main__
import os
import sys
from twisted.scripts import trial as trial_script
from nova import datastore
from nova import flags
from nova import twistd
from nova.tests.access_unittest import *
from nova.tests.api_unittest import *
from nova.tests.api import *
from nova.tests.api.rackspace import *
from nova.tests.auth_unittest import *
from nova.tests.cloud_unittest import *
from nova.tests.compute_unittest import *
from nova.tests.flags_unittest import *
from nova.tests.model_unittest import *
from nova.tests.network_unittest import *
from nova.tests.objectstore_unittest import *
from nova.tests.process_unittest import *
from nova.tests.rpc_unittest import *
from nova.tests.validator_unittest import *
from nova.tests.volume_unittest import *
FLAGS = flags.FLAGS
flags.DEFINE_bool('flush_db', True,
'Flush the database before running fake tests')
flags.DEFINE_string('tests_stderr', 'run_tests.err.log',
'Path to where to pipe STDERR during test runs.'
' Default = "run_tests.err.log"')
if __name__ == '__main__':
OptionsClass = twistd.WrapTwistedOptions(trial_script.Options)
config = OptionsClass()
argv = config.parseOptions()
FLAGS.verbose = True
# TODO(termie): these should make a call instead of doing work on import
if FLAGS.fake_tests:
from nova.tests.fake_flags import *
# use db 8 for fake tests
FLAGS.redis_db = 8
if FLAGS.flush_db:
logging.info("Flushing redis datastore")
r = datastore.Redis.instance()
r.flushdb()
else:
from nova.tests.real_flags import *
# Establish redirect for STDERR
sys.stderr.flush()
err = open(FLAGS.tests_stderr, 'w+', 0)
os.dup2(err.fileno(), sys.stderr.fileno())
if len(argv) == 1 and len(config['tests']) == 0:
# If no tests were specified run the ones imported in this file
# NOTE(termie): "tests" is not a flag, just some Trial related stuff
config['tests'].update(['__main__'])
elif len(config['tests']):
# If we specified tests check first whether they are in __main__
for arg in config['tests']:
key = arg.split('.')[0]
if hasattr(__main__, key):
config['tests'].remove(arg)
config['tests'].add('__main__.%s' % arg)
trial_script._initialDebugSetup(config)
trialRunner = trial_script._makeRunner(config)
suite = trial_script._getSuite(config)
if config['until-failure']:
test_result = trialRunner.runUntilFailure(suite)
else:
test_result = trialRunner.run(suite)
if config.tracer:
sys.settrace(None)
results = config.tracer.results()
results.write_results(show_missing=1, summary=False,
coverdir=config.coverdir)
sys.exit(not test_result.wasSuccessful())
|
Python
| 0
|
@@ -1631,18 +1631,19 @@
.tests.a
-pi
+uth
_unittes
@@ -1676,77 +1676,8 @@
.api
- import *%0Afrom nova.tests.api.rackspace import *%0Afrom nova.tests.auth
_uni
|
82b4ea673aefd73384eb442c1769211d55c74c14
|
Update test infrastructure
|
project/settings/test.py
|
project/settings/test.py
|
# Local
from .base import *
# Heroku
ALLOWED_HOSTS = [
'testserver',
]
# Redis
RQ_QUEUES['default']['ASYNC'] = False
# Email
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
|
Python
| 0.000001
|
@@ -190,8 +190,135 @@
ackend'%0A
+%0A# Cloudinary%0ACLOUDINARY_URL = None%0AMEDIA_URL = '/media/'%0ADEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage'%0A
|
e2ef73097ae220be4e52563e4e098aea228f82fa
|
rename polls hook
|
polls/wagtail_hooks.py
|
polls/wagtail_hooks.py
|
from django.conf.urls import url
from polls.admin import QuestionsModelAdmin
from polls.admin_views import QuestionResultsAdminView
from polls.models import PollsIndexPage
from wagtail.wagtailcore import hooks
from wagtail.contrib.modeladmin.options import modeladmin_register
from django.contrib.auth.models import User
@hooks.register('register_admin_urls')
def register_question_results_admin_view_url():
return [
url(r'polls/question/(?P<parent>\d+)/results/$',
QuestionResultsAdminView.as_view(),
name='question-results-admin'),
]
modeladmin_register(QuestionsModelAdmin)
@hooks.register('construct_main_menu')
def show_polls_entries_for_users_have_access(request, menu_items):
if not request.user.is_superuser and not User.objects.filter(
pk=request.user.pk, groups__name='Moderators').exists():
menu_items[:] = [
item for item in menu_items if item.name != 'polls']
@hooks.register('construct_explorer_page_queryset')
def show_main_language_only(parent_page, pages, request):
polls_index_page_pk = PollsIndexPage.objects.descendant_of(
request.site.root_page).first().pk
return pages.exclude(pk=polls_index_page_pk)
|
Python
| 0.000014
|
@@ -1010,31 +1010,29 @@
def
-show_main_language_only
+hide_polls_index_page
(par
|
5fc16267239890acbf6c4d7ab4685c4a2f420360
|
allow empty domain in tests
|
corehq/form_processor/utils/general.py
|
corehq/form_processor/utils/general.py
|
from django.conf import settings
from corehq.toggles import USE_SQL_BACKEND, NAMESPACE_DOMAIN, NEW_EXPORTS, TF_USES_SQLITE_BACKEND
from dimagi.utils.logging import notify_exception
def should_use_sql_backend(domain_name):
from corehq.apps.domain.models import Domain
if settings.UNIT_TESTING:
return _should_use_sql_backend_in_tests(domain_name)
# TODO: remove toggle once all domains have been migrated
toggle_enabled = USE_SQL_BACKEND.enabled(domain_name)
if toggle_enabled:
try:
# migrate domains in toggle
domain = Domain.get_by_name(domain_name)
if not domain.use_sql_backend:
domain.use_sql_backend = True
domain.save()
USE_SQL_BACKEND.set(domain_name, enabled=False, namespace=NAMESPACE_DOMAIN)
except Exception:
notify_exception(None, "Error migrating SQL BACKEND toggle", {
'domain': domain_name
})
return True
return toggle_enabled or Domain.get_by_name(domain_name).use_sql_backend
def _should_use_sql_backend_in_tests(domain_name):
"""The default return value is False unless the ``TESTS_SHOULD_USE_SQL_BACKEND`` setting
has been set or a Domain object with the same name exists."""
assert settings.UNIT_TESTING
from corehq.apps.domain.models import Domain
override = getattr(settings, 'TESTS_SHOULD_USE_SQL_BACKEND', None)
if override is not None:
return override
elif getattr(settings, 'DB_ENABLED', True):
domain = Domain.get_by_name(domain_name)
return domain and domain.use_sql_backend
else:
return False
def use_new_exports(domain_name):
return NEW_EXPORTS.enabled(domain_name) or should_use_sql_backend(domain_name)
def use_sqlite_backend(domain_name):
return TF_USES_SQLITE_BACKEND.enabled(domain_name) or should_use_sql_backend(domain_name)
def is_commcarecase(obj):
from casexml.apps.case.models import CommCareCase
from corehq.form_processor.models import CommCareCaseSQL
return isinstance(obj, (CommCareCase, CommCareCaseSQL))
|
Python
| 0.000001
|
@@ -1498,16 +1498,32 @@
elif
+domain_name and
getattr(
|
ad73d9f8960eea834d1ff9fa73e3b79aa3445b8d
|
return errno to travis
|
run_tests.py
|
run_tests.py
|
#!/usr/bin/env python
def interact(line, stdin, process):
# print line
pass
import unittest
import sys
import os
import subprocess
import argparse
import tmuxp.testsuite
from tmuxp.util import tmux
from tmuxp import t
tmux_path = sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
if tmux_path not in sys.path:
sys.path.insert(0, tmux_path)
from time import sleep
import itertools
parser = argparse.ArgumentParser(description="test framework")
parser.add_argument('--pypid', type=int, required=False)
def main():
t.socket_name = 'hi'
def has_virtualenv():
if os.environ.get('VIRTUAL_ENV'):
return os.environ.get('VIRTUAL_ENV')
else:
False
def in_tmux():
if os.environ.get('TMUX'):
return True
else:
return False
tmuxclient = None
def la():
if not in_tmux():
shell_commands = []
if has_virtualenv():
shell_commands.append('source %s/bin/activate' % has_virtualenv())
shell_commands.append('echo wat lol %s' % has_virtualenv())
session_name = 'tmuxp'
t.tmux('new-session', '-d', '-s', session_name)
for shell_command in shell_commands:
t.tmux('send-keys', '-t', session_name, shell_command, '^M')
t.tmux('send-keys', '-R', '-t', session_name, 'python run_tests.py --pypid=%s' % os.getpid(), '^M')
os.environ['pypid'] = str(os.getpid())
#os.execl('/usr/local/bin/tmux', 'tmux', 'attach-session', '-t', session_name)
#t.hotswap(session_name=session_name)
def output(line):
#print(line)
pass
#tmuxclient = t.tmux('-C')
#tmuxclient = subprocess.Popen(['tmux', '-C', '-Lhi', 'attach'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
else:
print has_virtualenv()
print in_tmux()
print os.environ.get('pypid')
args = vars(parser.parse_args())
if 'pypid' in args:
print args['pypid']
# todo create a hook to run after suite / loader to detach
# and killall tmuxp + tmuxp_-prefixed sessions.
#tmux('detach')
#os.kill(args['pypid'], 9)
#t.kill_server()
suites = unittest.TestLoader().discover('tmuxp.testsuite', pattern="*.py")
return unittest.TextTestRunner(verbosity=2).run(suites)
session_name = 'tmuxp'
t.tmux('new-session', '-d', '-s', session_name)
suites = unittest.TestLoader().discover('tmuxp.testsuite', pattern="*.py")
return unittest.TextTestRunner(verbosity=2).run(suites)
if __name__ == '__main__':
return main()
|
Python
| 0.999999
|
@@ -2446,28 +2446,30 @@
re
-turn
+sult =
unittest.Te
@@ -2506,16 +2506,129 @@
suites)%0A
+ if result.wasSuccessful():%0A sys.exit(0)%0A else:%0A sys.exit(1)%0A
sess
@@ -2783,20 +2783,22 @@
)%0A re
-turn
+sult =
unittes
@@ -2838,16 +2838,97 @@
(suites)
+%0A if result.wasSuccessful():%0A sys.exit(0)%0A else:%0A sys.exit(1)
%0A%0Aif __n
@@ -2948,26 +2948,19 @@
ain__':%0A
+
-return
main()%0A
|
d72f2a59f0df669ea3ccb223869121afd7a7fe7f
|
update docs
|
common/src/gosa/common/mqtt_connection_state.py
|
common/src/gosa/common/mqtt_connection_state.py
|
# This file is part of the clacks framework.
#
# http://clacks-project.org
#
# Copyright:
# (C) 2010-2012 GONICUS GmbH, Germany, http://www.gonicus.de
#
# License:
# GPL-2: http://www.gnu.org/licenses/gpl-2.0.html
#
# See the LICENSE file in the project's top-level directory for details.
import zope
from lxml import objectify, etree
from zope.interface import implementer, Interface
from gosa.common import Environment
from gosa.common.event import EventMaker
from gosa.common.components.mqtt_handler import MQTTHandler
from gosa.common.handler import IInterfaceHandler
@implementer(IInterfaceHandler)
class MQTTConnectionHandler(MQTTHandler):
"""
Handle MQTT connection states of the participants (backend, proxies, clients).
Clients can announce themselved in a 2-staged manner. First sending 'init' state
and when they are able to handle requests from other clients they tell them by
sending the 'ready' state.
If a clients shuts down, it send the 'leave' state.
backend <-> default backend broker <-> proxy <-> default proxy broker <-> clients
.. NOTE:
Client connections maintained by ClientLeave and ClientAnnounce events
as those events can have additional information about the clients, needed by GOsa.
But the clients also use this handler to be informed about active proxies/backends.
"""
_priority_ = 0
__active_connections = {}
def __init__(self):
self.env = Environment.getInstance()
self.topic = "%s/bus" % self.env.domain
super(MQTTConnectionHandler, self).__init__(client_id_prefix="MQTTConnectionHandler")
self.client_type = self.env.mode
self.e = EventMaker()
if hasattr(self.env, "core_uuid"):
self.client_id = self.env.core_uuid
else:
self.client_id = self.env.uuid
self.init = self.e.Event(self.e.BusClientState(
self.e.Id(self.client_id),
self.e.State('init'),
self.e.Type(self.client_type)
))
self.ready = self.e.Event(self.e.BusClientState(
self.e.Id(self.client_id),
self.e.State('ready'),
self.e.Type(self.client_type)
))
self.goodbye = self.e.Event(self.e.BusClientState(
self.e.Id(self.client_id),
self.e.State('leave'),
self.e.Type(self.client_type)
))
def serve(self):
# set last will
self.will_set(self.topic, self.goodbye, qos=1)
if self.client_type == "backend":
zope.event.subscribers.append(self.__handle_events)
self.wait_for_connection(self.send_init)
else:
self.wait_for_connection(self.send_ready)
def send_init(self):
self.log.info("MQTTConnectionHandler '%s' sending hello (init)" % self.client_type)
self.send_event(self.init, self.topic, qos=1)
def send_ready(self):
self.log.info("MQTTConnectionHandler '%s' sending hello (ready)" % self.client_type)
self.send_event(self.ready, self.topic, qos=1)
def stop(self):
self.log.info("MQTTConnectionHandler sending goodbye")
self.send_event(self.goodbye, self.topic, qos=1)
self.close()
def init_subscriptions(self):
""" add client subscriptions """
self.log.info("MQTTConnectionHandler subscribing to '%s' on '%s'" % (self.topic, self.host))
self.get_client().add_subscription(self.topic, qos=1)
self.get_client().set_subscription_callback(self._handle_message)
def _handle_message(self, topic, message):
if message[0:1] != "{":
# event received
try:
xml = objectify.fromstring(message)
if hasattr(xml, "BusClientState"):
client_id = xml.BusClientState.Id.text
client_type = xml.BusClientState.Type.text
client_state = xml.BusClientState.State.text
zope.event.notify(BusClientAvailability(client_id, client_state, client_type))
if client_state in ["init", "ready"]:
if client_type not in self.__active_connections:
self.__active_connections[client_type] = []
self.__active_connections[client_type].append(client_id)
elif client_state == "leave":
if client_type in self.__active_connections and client_id in self.__active_connections[client_type]:
self.__active_connections[client_type].remove(client_id)
elif hasattr(xml, "ClientPoll"):
# say hello
self.send_event(self.hello, self.topic, qos=1)
except etree.XMLSyntaxError as e:
self.log.error("Message parsing error: %s" % e)
def __handle_events(self, event):
"""
React on object modifications, send ready after index scan is finished
"""
if event.__class__.__name__ == "IndexScanFinished":
self.send_ready()
class IBusClientAvailability(Interface): # pragma: nocover
def __init__(self, obj):
pass
@implementer(IBusClientAvailability)
class BusClientAvailability(object):
def __init__(self, client_id, state, type):
self.client_id = client_id
self.state = state
self.type = type
|
Python
| 0.000001
|
@@ -775,45 +775,89 @@
elve
-d
+s
in
-a 2-
+2
stage
-d manner. First
+s. As soon as they are connected to the%0A MQTT Broker they
send
-ing
+ the
'in
@@ -869,18 +869,15 @@
tate
+.
%0A
-and w
+W
hen
@@ -972,24 +972,216 @@
eady' state.
+%0A Those two states can be send right after each other when the client does need no%0A initialization, but e.g. a backend need a certain amount of time after startup%0A to build the index.
%0A%0A If a c
@@ -1185,17 +1185,16 @@
a client
-s
shuts d
@@ -1201,24 +1201,25 @@
own, it send
+s
the 'leave'
|
a486d9bb6f498391997639b549b51b691490f4fa
|
Update settings.py
|
project_name/settings.py
|
project_name/settings.py
|
# -*- coding: utf-8 -*-
import os
from cartoview.settings import *
PROJECT_DIR = os.path.dirname(os.path.abspath(__file__))
BASE_DIR = os.path.dirname(PROJECT_DIR)
# static settings section
STATICFILES_DIRS += [os.path.join(PROJECT_DIR, "static"), ]
MEDIA_ROOT = os.path.join(BASE_DIR, "uploaded")
MEDIA_URL = "/uploaded/"
LOCAL_MEDIA_URL = "/uploaded/"
STATIC_ROOT = os.path.join(BASE_DIR, "static")
APPS_DIR = os.path.abspath(os.path.join(BASE_DIR, "apps"))
try:
from .local_settings import *
except:
pass
# cartoview setings
TEMPLATES[0]["DIRS"] = CARTOVIEW_TEMPLATE_DIRS + TEMPLATES[0]["DIRS"]
from cartoview import app_manager
from past.builtins import execfile
app_manager_settings = os.path.join(
os.path.dirname(app_manager.__file__), "settings.py")
execfile(os.path.realpath(app_manager_settings))
load_apps(APPS_DIR)
INSTALLED_APPS += CARTOVIEW_APPS
for settings_file in APPS_SETTINGS:
try:
execfile(settings_file)
except Exception as e:
print(e.message)
|
Python
| 0.000001
|
@@ -60,16 +60,49 @@
mport *%0A
+PROJECT_NAME = %22%7B%7Bproject_name%7D%7D%22
%0APROJECT
|
7deae8b534e3a0ce8a05804661ba2270ba549b61
|
Generate no_custom outputs in tests
|
run_tests.py
|
run_tests.py
|
import os
import unittest
from JSOV import generator
class Run_Tests(unittest.TestCase):
INPUT_JSON = "tests/input/sample.json"
INPUT_TEMPLATE = "tests/input/template.jsov"
OUTPUT_HTML = "tests/output/output.html"
OUTPUT_CSS = "tests/output/style.css"
GENERATED_DIR = "tests/generated/"
GENERATED_HTML = "tests/generated/output.html"
GENERATED_CSS = "tests/generated/style.css"
def create_instance(self):
self.visualizer = generator.Generator(self.INPUT_JSON, self.INPUT_TEMPLATE)
def test_input_json_exists(self):
self.assertTrue(os.path.exists(self.INPUT_JSON))
def test_input_jsov_exists(self):
self.assertTrue(os.path.exists(self.INPUT_TEMPLATE))
def test_output_html_exists(self):
self.assertTrue(os.path.exists(self.OUTPUT_HTML))
def test_output_css_exists(self):
self.assertTrue(os.path.exists(self.OUTPUT_CSS))
def test_generated_dir_exists(self):
self.assertTrue(os.path.exists(self.GENERATED_DIR))
if __name__ == "__main__":
unittest.main()
|
Python
| 0.998531
|
@@ -489,16 +489,124 @@
PLATE)%0A%0A
+%09def generate_no_custom(self):%0A%09%09self.visualizer.generate_htmlcss(self.GENERATED_HTML, self.GENERATED_CSS)%0A%0A
%09def tes
|
01bb58dfe82c69763d699b4ad5c637eee9bb7d36
|
remove commit title
|
solvebio/resource/datasetcommit.py
|
solvebio/resource/datasetcommit.py
|
import time
from .apiresource import ListableAPIResource
from .apiresource import CreateableAPIResource
from .apiresource import UpdateableAPIResource
from .solveobject import convert_to_solve_object
from .task import Task
def follow_commits(task, sleep_seconds):
"""Utility used to wait for commits"""
while True:
unfinished_commits = [
c for c in task.dataset_commits
if c.status in ['queued', 'running']
]
if not unfinished_commits:
print("All commits have finished processing")
break
print("{0}/{1} commits have finished processing"
.format(len(unfinished_commits),
len(task.dataset_commits)))
# Prints a status for each one
for commit in unfinished_commits:
commit.follow(loop=False, sleep_seconds=sleep_seconds)
time.sleep(sleep_seconds)
# refresh Task to get fresh dataset commits
task.refresh()
class DatasetCommit(CreateableAPIResource, ListableAPIResource,
UpdateableAPIResource):
"""
DatasetCommits represent a change made to a Dataset.
"""
RESOURCE_VERSION = 2
LIST_FIELDS = (
('id', 'ID'),
('title', 'Title'),
('description', 'Description'),
('status', 'Status'),
('created_at', 'Created'),
)
@property
def dataset(self):
return convert_to_solve_object(self['dataset'], client=self._client)
@property
def parent_object(self):
""" Get the commit objects parent Import or Migration """
from . import types
parent_klass = types.get(self.parent_job_model.split('.')[1])
return parent_klass.retrieve(self.parent_job_id, client=self._client)
def follow(self, loop=True, sleep_seconds=Task.SLEEP_WAIT_DEFAULT):
# Follow unfinished commits
while self.status in ['queued', 'running']:
if self.status == 'running':
print("Commit '{0}' ({4}) is {1}: {2}/{3} records indexed"
.format(self.title,
self.status,
self.records_modified,
self.records_total,
self.id))
else:
print("Commit '{0}' ({1}) is {2}"
.format(self.title,
self.id,
self.status))
# When following a parent DatasetImport we do not want to
# loop for status updates. It will handle its own looping
# so break out of loop and return here.
if not loop:
break
# sleep
time.sleep(sleep_seconds)
# refresh status
self.refresh()
if loop:
print("Commit '{0}' ({1}) is {2}".format(self.title,
self.id,
self.status))
print("View your imported data: "
"https://my.solvebio.com/data/{0}"
.format(self['dataset']['id']))
|
Python
| 0.012209
|
@@ -2007,34 +2007,26 @@
mit
-'%7B0%7D' (%7B4%7D)
+%7B3%7D
is %7B
-1
+0
%7D: %7B
-2%7D/%7B3
+1%7D/%7B2
%7D re
@@ -2079,87 +2079,15 @@
elf.
-title,%0A self.status,%0A
+status,
sel
@@ -2159,38 +2159,8 @@
tal,
-%0A
sel
@@ -2218,138 +2218,35 @@
mit
-'
%7B0%7D
-' (%7B1%7D)
is %7B
-2%7D%22%0A .format(self.title,%0A self.id,%0A
+1%7D%22.format(self.id,
sel
|
0a6e2be8c67265e37ff9600522ded4a861d165a2
|
Fix #54
|
cleverhans/utils_mnist.py
|
cleverhans/utils_mnist.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D
from keras.utils import np_utils
def data_mnist():
"""
Preprocess MNIST dataset
:return:
"""
# These values are specific to MNIST
img_rows = 28
img_cols = 28
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
def model_mnist(logits=False,input_ph=None, img_rows=28, img_cols=28, nb_filters=64, nb_classes=10):
"""
Defines MNIST model using Keras sequential model
:param logits: If set to False, returns a Keras model, otherwise will also return logits tensor
:param input_ph: The TensorFlow placeholder for the input (needed if returning logits)
:return:
"""
model = Sequential()
model.add(Dropout(0.2, input_shape=(1, img_rows, img_cols)))
model.add(Convolution2D(nb_filters, 8, 8,
subsample=(2, 2),
border_mode="same"
))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters * 2, 6, 6, subsample=(2, 2),
border_mode="valid"))
model.add(Activation('relu'))
model.add(Convolution2D(nb_filters *2, 5, 5, subsample=(1, 1)))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(nb_classes))
if logits:
logits_tensor = model(input_ph)
model.add(Activation('softmax'))
if logits:
return model, logits_tensor
else:
return model
|
Python
| 0.000001
|
@@ -1797,32 +1797,79 @@
bsample=(2, 2),%0A
+ dim_ordering=%22th%22,%0A
@@ -2035,24 +2035,43 @@
2),%0A
+ dim_ordering=%22th%22,
border_mode
@@ -2180,16 +2180,43 @@
e=(1, 1)
+,%0A dim_ordering=%22th%22
))%0A m
|
70b61dd599529009f9cf9631c9ae505dd210c23b
|
Fix flake8 issues with OtsuMultipleThreshold.py
|
tomviz/python/OtsuMultipleThreshold.py
|
tomviz/python/OtsuMultipleThreshold.py
|
def transform_scalars(dataset):
"""This filter performs semi-automatic multithresholding of a data set.
Voxels are automatically classified into a chosen number of classes such that
inter-class variance of the voxel values is minimized. The output is a label
map with one label per voxel class.
"""
try:
import itk
import vtk
from tomviz import utils
except Exception as exc:
print("Could not import necessary module(s) itk, vtk, or tomviz.utils")
print(exc)
#----USER SPECIFIED VARIABLES----#
###NUMBEROFTHRESHOLDS### # Specify number of thresholds between classes
###ENABLEVALLEYEMPHASIS### # Enable valley emphasis.
# Return values
returnValues = None
# Add a try/except around the ITK portion. ITK exceptions are
# passed up to the Python layer, so we can at least report what
# went wrong with the script, e.g,, unsupported image type.
try:
# Get the ITK image
itk_image = utils.convert_vtk_to_itk_image(dataset)
itk_input_image_type = type(itk_image)
# OtsuMultipleThresholdsImageFilter's wrapping requires that the input
# and output image types be the same.
# TODO - handle casting of float image types to some sensible integer
# format.
itk_threshold_image_type = itk_input_image_type
# Otsu multiple threshold filter
otsu_filter = itk.OtsuMultipleThresholdsImageFilter[itk_input_image_type, itk_threshold_image_type].New()
otsu_filter.SetNumberOfThresholds(number_of_thresholds)
otsu_filter.SetValleyEmphasis(enable_valley_emphasis);
otsu_filter.SetInput(itk_image)
otsu_filter.Update()
print("Otsu threshold(s): %s" % (otsu_filter.GetThresholds(),))
itk_image_data = otsu_filter.GetOutput()
label_buffer = itk.PyBuffer[itk_threshold_image_type].GetArrayFromImage(itk_image_data)
label_map_data_set = vtk.vtkImageData()
label_map_data_set.CopyStructure(dataset)
utils.set_label_map(label_map_data_set, label_buffer);
# Set up dictionary to return operator results
returnValues = {}
returnValues["label_map"] = label_map_data_set
except Exception as exc:
print("Exception encountered while running OtsuMultipleThreshold")
print(exc)
return returnValues
|
Python
| 0
|
@@ -181,17 +181,17 @@
such
+%0A
that
-%0A
int
@@ -261,18 +261,18 @@
is a
+%0A
label
-%0A
map
@@ -1462,16 +1462,29 @@
eFilter%5B
+%0A
itk_inpu
@@ -1529,16 +1529,16 @@
%5D.New()%0A
-
@@ -1654,17 +1654,16 @@
mphasis)
-;
%0A
@@ -1908,16 +1908,31 @@
ge_type%5D
+ %5C%0A
.GetArra
@@ -2049,32 +2049,32 @@
ucture(dataset)%0A
+
utils.se
@@ -2122,9 +2122,8 @@
fer)
-;
%0A%0A
|
47bca8347afd67765756abcb7b7c20c817171697
|
Remove unused import
|
runme.py
|
runme.py
|
import datetime
import os
import sys
from socket import socket, AF_INET, SOCK_STREAM, SOL_SOCKET, SO_REUSEADDR
from arguments import parse
from cases import cases
from httphandler import HTTPRequest
from settings import HONEYPORT, HONEYFOLDER
# TODO unittests
# import signal
# exit -- something to do on SIGINT
# signal.signal(signal.SIGINT, exit)
# TODO implement SIGINT handler
def create_file(message, directory):
if not os.path.exists(HONEYFOLDER+directory):
os.makedirs(HONEYFOLDER+directory)
currtime = str(datetime.datetime.now()).replace(':', ';')
filename = HONEYFOLDER+directory+"/"+currtime
f = open(filename, 'w')
f.write(str(message))
f.close()
def get_honey(path):
global unknown_cases
outputdata = ""
stringfile = ""
msgsize = 0
if path in cases:
respfilename = cases[path]
f = open('responses/'+respfilename)
stringfile = f.read()
f.close()
if respfilename == "webdav.xml":
msgsize = sys.getsizeof(stringfile)
outputdata += 'HTTP/1.1 207 Multi-Status\r\n'
outputdata += 'Content-Type: application/xml; charset="utf-8\r\n'
outputdata += '\r\n'
outputdata += stringfile
else:
msgsize = sys.getsizeof(stringfile)
outputdata += 'HTTP/1.1 200 OK\r\n'
outputdata += 'Server: Apache/1.3.42 (Unix) (Red Hat/Linux)\r\n'
outputdata += 'Content-Type: text/html\r\n'
outputdata += 'Connection: close\r\n'
outputdata += 'Date: ' + str(datetime.datetime.now())
outputdata += 'Content-Length: ' + str(msgsize)
outputdata += '\r\n'
outputdata += stringfile
print ip_addr + " " + path + " gotcha!"
# TODO turn off verbose by args
else:
print ip_addr + " " + path + " not detected..."
if path not in unknown_cases:
unknown_cases.append(path)
with open("cases.txt", "a") as myfile:
myfile.write(path + "\n")
print path + " added to list"
# TODO add to souces, if not
respfilename = cases["zero"]
f = open('responses/'+respfilename)
stringfile = f.read()
f.close()
msgsize = sys.getsizeof(stringfile)
outputdata += 'HTTP/1.1 200 OK\r\n'
outputdata += 'Server: Apache/1.3.42 (Unix) (Red Hat/Linux)\r\n'
outputdata += 'Content-Type: text/html\r\n'
outputdata += 'Connection: close\r\n'
outputdata += 'Date: ' + str(datetime.datetime.now())
outputdata += 'Content-Length: ' + str(msgsize)
outputdata += '\r\n'
outputdata += stringfile
return outputdata
unknown_cases = [line.rstrip('\n') for line in open('cases.txt')]
serverSocket = socket(AF_INET, SOCK_STREAM)
serverSocket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
args = parse()
serverSocket.bind(('', args.port))
serverSocket.listen(1)
print "Serving honey on port %s" % args.port
while True:
connectionSocket, addr = serverSocket.accept()
try:
message = connectionSocket.recv(30*1024)
ip_addr = connectionSocket.getpeername()[0]
create_file(message, ip_addr)
path = ""
request = HTTPRequest(message)
if request.error_code is None:
path = request.path
outputdata = get_honey(path)
else:
path = str(request.error_code) # use non-http parser here
outputdata = get_honey(path)
connectionSocket.send(outputdata)
connectionSocket.close()
except: # rewrite this
# print "Caught exception socket.error : %s" % e
connectionSocket.close()
serverSocket.close() # This line is never achieved, implement in SIGINT?
|
Python
| 0
|
@@ -217,19 +217,8 @@
port
- HONEYPORT,
HON
|
9d3ed4b59ca951fe877986a72aa63092bb536385
|
fix video cache
|
apps/widget/video_cache.py
|
apps/widget/video_cache.py
|
# Universal Subtitles, universalsubtitles.org
#
# Copyright (C) 2010 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django.core.cache import cache
from videos.types.base import VideoTypeError
TIMEOUT = 60 * 60 * 24 * 5 # 5 days
def get_video_id(video_url):
cache_key = _video_id_key(video_url)
value = cache.get(cache_key)
if value is not None:
return value
else:
from videos.models import Video
try:
video, create = Video.get_or_create_for_url(video_url)
except VideoTypeError:
return None
video_id = video.video_id
cache.set(cache_key, video_id, TIMEOUT)
return video_id
def _invalidate_cache(video_id, language_code=None):
cache.delete(_video_urls_key(video_id))
cache.delete(_subtitles_dict_key(video_id, language_code))
cache.delete(_subtitles_dict_key(video_id, None))
cache.delete(_subtitles_count_key(video_id))
cache.delete(_video_languages_key(video_id))
# only used while testing.
def invalidate_video_id(video_url):
cache.delete(_video_id_key(video_url))
def on_subtitle_language_save(sender, instance, **kwargs):
_invalidate_cache(instance.video.video_id, instance.language)
def on_subtitle_version_save(sender, instance, **kwargs):
_invalidate_cache(instance.language.video.video_id,
instance.language.language)
def on_video_url_save(sender, instance, **kwargs):
_invalidate_cache(instance.video.video_id)
def _video_id_key(video_url):
return 'video_id_{0}'.format(video_url)
def _video_urls_key(video_id):
return 'widget_video_urls_{0}'.format(video_id)
def _subtitles_dict_key(video_id, language_code, version_no=None):
return 'widget_subtitles_{0}{1}{2}'.format(video_id, language_code, version_no)
def _subtitles_count_key(video_id):
return "subtitle_count_{0}".format(video_id)
def _video_languages_key(video_id):
return "widget_video_languages_{0}".format(video_id)
def get_video_urls(video_id):
cache_key = _video_urls_key(video_id)
value = cache.get(cache_key)
if value is not None:
return value
else:
from videos.models import Video
video_urls = \
[vu.effective_url for vu
in Video.objects.get(video_id=video_id).videourl_set.all()]
cache.set(cache_key, video_urls, TIMEOUT)
return video_urls
def get_subtitles_dict(
video_id, language_code, version_no, subtitles_dict_fn):
cache_key = _subtitles_dict_key(video_id, language_code, version_no)
value = cache.get(cache_key)
if value is not None:
cached_value = value
else:
from videos.models import Video
video = Video.objects.get(video_id=video_id)
video.update_subtitles_fetched(language_code)
version = video.version(version_no, language_code)
if version:
cached_value = subtitles_dict_fn(version)
else:
cached_value = 0
cache.set(cache_key, cached_value, TIMEOUT)
return None if cached_value == 0 else cached_value
def get_subtitle_count(video_id):
cache_key = _subtitles_count_key(video_id)
value = cache.get(cache_key)
if value is not None:
return value
else:
from videos.models import Video
video = Video.objects.get(video_id=video_id)
version = video.latest_version()
return_value = 0 if version is None else version.subtitle_set.count()
cache.set(cache_key, return_value, TIMEOUT)
return cache.get(cache_key)
def get_video_languages(video_id):
cache_key = _video_languages_key(video_id)
value = cache.get(cache_key)
if value is not None:
return value
else:
from videos.models import Video
video = Video.objects.get(video_id=video_id)
translated_languages = video.subtitlelanguage_set.filter(
is_complete=True).filter(is_original=False)
return_value = [(t.language, t.percent_done) for t in translated_languages]
cache.set(cache_key, return_value, TIMEOUT)
return return_value
|
Python
| 0.000003
|
@@ -3839,32 +3839,48 @@
.get(cache_key)%0A
+ print value%0A
if value is
@@ -4199,36 +4199,28 @@
return
-cache.get(cache_key)
+return_value
%0A%0Adef ge
|
3e1f330236fdb0af692099f91ee3435d273a7bad
|
Fix import error "No module named six.moves" for plugin sanity job
|
tools/generate-tempest-plugins-list.py
|
tools/generate-tempest-plugins-list.py
|
#! /usr/bin/env python
# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This script is intended to be run as part of a periodic proposal bot
# job in OpenStack infrastructure.
#
# In order to function correctly, the environment in which the
# script runs must have
# * network access to the review.openstack.org Gerrit API
# working directory
# * network access to https://git.openstack.org/cgit
import json
import re
from six.moves import urllib
url = 'https://review.openstack.org/projects/'
# This is what a project looks like
'''
"openstack-attic/akanda": {
"id": "openstack-attic%2Fakanda",
"state": "READ_ONLY"
},
'''
def is_in_openstack_namespace(proj):
return proj.startswith('openstack/')
# Rather than returning a 404 for a nonexistent file, cgit delivers a
# 0-byte response to a GET request. It also does not provide a
# Content-Length in a HEAD response, so the way we tell if a file exists
# is to check the length of the entire GET response body.
def has_tempest_plugin(proj):
try:
r = urllib.request.urlopen(
"https://git.openstack.org/cgit/%s/plain/setup.cfg" % proj)
except urllib.error.HTTPError as err:
if err.code == 404:
return False
p = re.compile('^tempest\.test_plugins', re.M)
if p.findall(r.read().decode('utf-8')):
return True
else:
False
r = urllib.request.urlopen(url)
# Gerrit prepends 4 garbage octets to the JSON, in order to counter
# cross-site scripting attacks. Therefore we must discard it so the
# json library won't choke.
projects = sorted(filter(is_in_openstack_namespace, json.loads(r.read()[4:])))
# Retrieve projects having no deb, ui or spec namespace as those namespaces
# do not contains tempest plugins.
projects_list = [i for i in projects if not (i.startswith('openstack/deb-') or
i.endswith('-ui') or
i.endswith('-specs'))]
found_plugins = list(filter(has_tempest_plugin, projects_list))
# Every element of the found_plugins list begins with "openstack/".
# We drop those initial 10 octets when printing the list.
for project in found_plugins:
print(project[10:])
|
Python
| 0.000005
|
@@ -995,36 +995,265 @@
re%0A%0A
-from six.moves import urllib
+try:%0A # For Python 3.0 and later%0A from urllib.error import HTTPError as HTTPError%0A import urllib.request as urllib%0Aexcept ImportError:%0A # Fall back to Python 2's urllib2%0A import urllib2 as urllib%0A from urllib2 import HTTPError as HTTPError
%0A%0A%0Au
@@ -1843,32 +1843,24 @@
r = urllib.
-request.
urlopen(%0A
@@ -1943,21 +1943,8 @@
ept
-urllib.error.
HTTP
@@ -2161,24 +2161,16 @@
urllib.
-request.
urlopen(
|
9507516da333a42e0ab0def6741bbd8755a5b19f
|
Fix submodule imports before running tests.
|
run_tests.py
|
run_tests.py
|
#!/usr/bin/env python
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
COLOR_G = '\033[0;32m' # Green.
COLOR_R = '\033[0;31m' # Red.
COLOR_NONE = '\033[0;m' # Reset to terminal's foreground color.
def main():
success = True
# Add needed paths to the python path.
tests_dir = os.path.normpath(os.path.join(os.path.dirname(__file__)))
root_dir = tests_dir
appengine_dir = '/usr/local/google_appengine/'
sys.path.insert(0, appengine_dir)
sys.path.insert(0, root_dir)
print 'Running tests...'
test_filenames = sys.argv[1:]
if not test_filenames:
test_filenames = []
for root, unused_dirs, files in os.walk(tests_dir):
# Skip modules directory.
if 'modules' in root:
continue
new_tests = [os.path.join(root, f) for f in files]
new_tests = [name.replace(tests_dir + '/', '') for name in new_tests]
test_filenames.extend(new_tests)
test_filenames = sorted(test_filenames)
for basename in test_filenames:
filename = os.path.join(tests_dir, basename)
if not filename.endswith('_test.py'):
continue
sys.stdout.write('Testing %s\r' % basename)
sys.stdout.flush()
env = os.environ.copy()
env['PYTHONPATH'] = ':'.join([tests_dir, root_dir, appengine_dir,
env.get('PYTHONPATH', '')])
process = subprocess.Popen([sys.executable, filename], env=env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
try:
stdout, stderr = process.communicate()
except KeyboardInterrupt:
process.terminate()
print process.stdout.read()
print process.stderr.read()
sys.exit('Tests terminated.')
# Certain tests output to stderr but correctly pass. For clarity, we hide
# the output unless the test itself fails.
if process.returncode != 0:
msg = [COLOR_R, 'FAILED', COLOR_NONE, ': ', basename]
print ''.join(msg)
print stdout
print stderr
success = False
else:
msg = [COLOR_G, 'SUCCESS', COLOR_NONE, ': ', basename]
print ''.join(msg)
if success:
print 'All tests were successful.'
else:
# Important: this returns a non-zero return code.
sys.exit('One or more tests failed.')
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -782,16 +782,70 @@
color.%0A%0A
+from grow import submodules%0Asubmodules.fix_imports()%0A%0A
%0Adef mai
|
e065515362281039f459e5fa79292957f0435aa7
|
Fix copyright year
|
opentracing_instrumentation/client_hooks/_singleton.py
|
opentracing_instrumentation/client_hooks/_singleton.py
|
# Copyright (c) 2018 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import functools
NOT_CALLED = 1
CALLED = 2
def singleton(func):
"""
This decorator allows you to make sure that a function is called once and
only once. Note that recursive functions will still work.
WARNING: Not thread-safe!!!
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
if wrapper.__call_state__ == CALLED:
return
ret = func(*args, **kwargs)
wrapper.__call_state__ = CALLED
return ret
def reset():
wrapper.__call_state__ = NOT_CALLED
wrapper.reset = reset
reset()
# save original func to be able to patch and restore multiple times from
# unit tests
wrapper.__original_func = func
return wrapper
|
Python
| 0.999997
|
@@ -9,16 +9,21 @@
ght (c)
+2015,
2018 Ube
|
d896082b282d17616573de2bcca4b383420d1e7a
|
Fix a bad import (get_version)
|
python/__init__.py
|
python/__init__.py
|
# -*- coding: UTF-8 -*-
# Copyright (C) 2009 Itaapy, ArsAperta, Pierlis, Talend
# Import from itools
from itools.pkg import get_version
__version__ = get_version()
|
Python
| 0.001665
|
@@ -111,11 +111,12 @@
ols.
-pkg
+core
imp
|
b78ce84f2a36789fc0fbb6b184b5c8d8ebb23234
|
Clarify py.test arguments in run_test.py
|
run_tests.py
|
run_tests.py
|
#!/usr/bin/env python
import sys
import pytest
if __name__ == '__main__':
sys.exit(pytest.main())
|
Python
| 0.000015
|
@@ -68,16 +68,372 @@
ain__':%0A
+ # show output results from every test function%0A args = %5B'-v'%5D%0A # show the message output for skipped and expected failure tests%0A args.append('-rxs')%0A # compute coverage stats for bluesky%0A args.extend(%5B'--cov', 'bluesky'%5D)%0A # call pytest and exit with the return code from pytest so that%0A # travis will fail correctly if tests fail%0A
sys.
@@ -449,11 +449,15 @@
st.main(
+args
))%0A
|
b42112d6286b22db65cf71885d92f77a6fa91e06
|
update scribbler and table configs
|
twirl.py
|
twirl.py
|
#!/usr/bin/env python
# Tai Sakuma <sakuma@cern.ch>
import os, sys
import argparse
import ROOT
import AlphaTwirl
import Framework
import Scribbler
ROOT.gROOT.SetBatch(1)
##__________________________________________________________________||
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", help = "the path to the input file")
parser.add_argument("-p", "--process", default = 1, type = int, help = "number of processes to run in parallel")
parser.add_argument('-o', '--outdir', default = os.path.join('tbl', 'out'))
parser.add_argument('-q', '--quiet', action = 'store_true', default = False, help = 'quiet mode')
parser.add_argument('--force', action = 'store_true', default = False, help = 'recreate all output files')
args = parser.parse_args()
##__________________________________________________________________||
def main():
reader_collector_pairs = [ ]
#
# configure scribblers
#
NullCollector = AlphaTwirl.EventReader.NullCollector
reader_collector_pairs.extend([
(Scribbler.EventAuxiliary(), NullCollector()),
(Scribbler.MET(), NullCollector()),
# (Scribbler.Scratch(), NullCollector()),
])
#
# configure tables
#
Binning = AlphaTwirl.Binning.Binning
Echo = AlphaTwirl.Binning.Echo
Round = AlphaTwirl.Binning.Round
RoundLog = AlphaTwirl.Binning.RoundLog
Combine = AlphaTwirl.Binning.Combine
echo = Echo(nextFunc = None)
tblcfg = [
dict(branchNames = ('run', ), binnings = (echo, )),
dict(branchNames = ('lumi', ), binnings = (echo, )),
dict(branchNames = ('eventId', ), binnings = (echo, )),
dict(branchNames = ('pfMet', ), binnings = (Round(10, 0), )),
]
# complete table configs
tableConfigCompleter = AlphaTwirl.Configure.TableConfigCompleter(
defaultCountsClass = AlphaTwirl.Counter.Counts,
defaultOutDir = args.outdir
)
tblcfg = [tableConfigCompleter.complete(c) for c in tblcfg]
# do not recreate tables that already exist unless the force option is used
if not args.force:
tblcfg = [c for c in tblcfg if c['outFile'] and not os.path.exists(c['outFilePath'])]
reader_collector_pairs.extend(
[AlphaTwirl.Configure.build_counter_collector_pair(c) for c in tblcfg]
)
#
# configure data sets
#
dataset = Framework.Dataset('root3', [args.input])
#
# run
#
fw = Framework.Framework(quiet = args.quiet, process = args.process)
fw.run(
dataset = dataset,
reader_collector_pairs = reader_collector_pairs
)
##__________________________________________________________________||
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -1121,32 +1121,87 @@
llCollector()),%0A
+ (Scribbler.HFPreRecHit(), NullCollector()),%0A
# (Scrib
@@ -1784,24 +1784,539 @@
10, 0), )),%0A
+ dict(%0A branchNames = ('hfrechit_QIE10_energy', ),%0A binnings = (Round(0.1, 0), ),%0A indices = ('(*)', ),%0A outColumnNames = ('energy', ),%0A%0A ),%0A dict(%0A branchNames = ('hfrechit_ieta', 'hfrechit_iphi', 'hfrechit_QIE10_index', 'hfrechit_QIE10_energy'),%0A binnings = (echo, echo, echo, Round(0.1, 0)),%0A indices = ('(*)', '%5C%5C1', '%5C%5C1', '%5C%5C1'),%0A outColumnNames = ('ieta', 'iphi', 'idxQIE10', 'energy'),%0A%0A ),%0A
%5D%0A%0A #
|
bc9112cc5532a08f8b577935cb7dd7b912743ac3
|
remove superfluous import
|
test/test_extract_value.py
|
test/test_extract_value.py
|
# Copyright Hugh Perkins 2016
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This needs CUDA_HOME defined, pointing to eg /usr/local/cuda-7.5
"""
import subprocess
import numpy as np
import pyopencl as cl
import pyopencl.tools
import pytest
import os
from os import path
gpu_idx = int(os.environ.get('TARGET_GPU', 0))
mf = cl.mem_flags
clang_path = 'clang++-3.8'
def mangle(name, param_types):
mangled = '_Z%s%s' % (len(name), name)
for param in param_types:
if param.replace(' ', '') == 'float*':
mangled += 'Pf'
elif param.replace(' ', '') == 'int*':
mangled += 'Pi'
elif param.replace(' ', '') == 'int':
mangled += 'i'
elif param.replace(' ', '') == 'float':
mangled += 'f'
elif param.endswith('*'):
# assume pointer to struct
param = param.replace(' ', '').replace('*', '')
mangled += 'P%s%s' % (len(param), param)
else:
raise Exception('not implemented %s' % param)
return mangled
@pytest.fixture(scope='module')
def context():
platforms = cl.get_platforms()
i = 0
ctx = None
for platform in platforms:
gpu_devices = platform.get_devices(device_type=cl.device_type.GPU)
if gpu_idx < i + len(gpu_devices):
ctx = cl.Context(devices=[gpu_devices[gpu_idx - i]])
break
i += len(gpu_devices)
if ctx is None:
raise Exception('unable to find gpu at index %s' % gpu_idx)
print('context', ctx)
# ctx = cl.create_some_context()
return ctx
@pytest.fixture(scope='module')
def queue(context):
q = cl.CommandQueue(context)
return q
@pytest.fixture(scope='module')
def q(queue):
return queue
@pytest.fixture(scope='module')
def ctx(context):
return context
@pytest.fixture(scope='module')
def extract_value_cl():
# lets check it's compileable ll first, using llvm
# cu_filepath = 'test/extract_value.cu'
# ll_filepath = 'test/generated/extract_value.ll'
ll_filepath = 'test/extract_value.ll'
cl_filepath = 'test/generated/extract_value.cl'
# cuda_home = os.environ.get('CUDA_HOME', '/usr/local/cuda-7.5')
# print(subprocess.check_output([
# clang_path,
# '-I%s/include' % cuda_home,
# '-include', 'include/fake_funcs.h',
# cu_filepath,
# '--cuda-device-only',
# '-emit-llvm', '-S',
# '-o', ll_filepath
# ]).decode('utf-8'))
print(subprocess.check_output([
clang_path,
'-c', ll_filepath,
'-O3',
'-o', '/tmp/~foo'
]).decode('utf-8'))
if not path.isdir('test/generated'):
os.makedirs('test/generated')
print(subprocess.check_output([
'build/ir-to-opencl',
'--debug',
ll_filepath,
cl_filepath
]).decode('utf-8'))
return cl_filepath
@pytest.fixture(scope='module')
def extract_value(context, extract_value_cl):
with open(extract_value_cl, 'r') as f:
sourcecode = f.read()
prog = cl.Program(context, sourcecode).build()
return prog
@pytest.fixture
def int_data():
np.random.seed(123)
int_data = np.random.randint(1024, size=(1024,), dtype=np.int32)
return int_data
@pytest.fixture
def float_data():
np.random.seed(124)
float_data = np.random.randn(1024).astype(np.float32)
return float_data
@pytest.fixture
def int_data_gpu(int_data, ctx):
int_data_gpu = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=int_data)
return int_data_gpu
@pytest.fixture
def float_data_gpu(float_data, ctx):
float_data_gpu = cl.Buffer(ctx, mf.READ_WRITE | mf.COPY_HOST_PTR, hostbuf=float_data)
return float_data_gpu
def test_program_compiles(extract_value):
pass
def test_copy_float(extract_value, q, float_data, float_data_gpu):
extract_value.__getattr__(mangle('test_floats', ['float *']))(q, (32,), (32,), float_data_gpu)
cl.enqueue_copy(q, float_data, float_data_gpu)
q.finish()
assert float_data[0] == float_data[1]
|
Python
| 0.998454
|
@@ -685,30 +685,8 @@
cl%0A
-import pyopencl.tools%0A
impo
|
6b89fab7d7ac30a04bcac063a247b1e2a03b4ac7
|
Use get_rpc_transport instead of get_transport
|
barbican/queue/__init__.py
|
barbican/queue/__init__.py
|
# Copyright (c) 2013-2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Queue objects for Barbican
"""
import oslo_messaging as messaging
from oslo_messaging.notify import dispatcher as notify_dispatcher
from oslo_messaging.notify import listener
from oslo_messaging.rpc import dispatcher
from barbican.common import config
from barbican.common import exception
# Constant at one place if this needs to be changed later
KS_NOTIFICATIONS_GRP_NAME = config.KS_NOTIFICATIONS_GRP_NAME
CONF = config.CONF
TRANSPORT = None
IS_SERVER_SIDE = True
ALLOWED_EXMODS = [
exception.__name__,
]
def get_allowed_exmods():
return ALLOWED_EXMODS
def init(conf, is_server_side=True):
global TRANSPORT, IS_SERVER_SIDE
exmods = get_allowed_exmods()
IS_SERVER_SIDE = is_server_side
TRANSPORT = messaging.get_transport(conf, allowed_remote_exmods=exmods)
def is_server_side():
return IS_SERVER_SIDE
def cleanup():
global TRANSPORT
TRANSPORT.cleanup()
TRANSPORT = None
def get_target():
return messaging.Target(topic=CONF.queue.topic,
namespace=CONF.queue.namespace,
version=CONF.queue.version,
server=CONF.queue.server_name)
def get_client(target=None, version_cap=None, serializer=None):
if not CONF.queue.enable:
return None
queue_target = target or get_target()
return messaging.RPCClient(TRANSPORT,
target=queue_target,
version_cap=version_cap,
serializer=serializer)
def get_server(target, endpoints, serializer=None):
access_policy = dispatcher.DefaultRPCAccessPolicy
return messaging.get_rpc_server(TRANSPORT,
target,
endpoints,
executor='eventlet',
serializer=serializer,
access_policy=access_policy)
def get_notification_target():
conf_opts = getattr(CONF, KS_NOTIFICATIONS_GRP_NAME)
return messaging.Target(exchange=conf_opts.control_exchange,
topic=conf_opts.topic,
version=conf_opts.version,
fanout=True)
def get_notification_server(targets, endpoints, serializer=None):
"""Retrieve notification server
This Notification server uses same transport configuration as used by
other barbican functionality like async order processing.
Assumption is that messaging infrastructure is going to be shared (same)
among different barbican features.
"""
allow_requeue = getattr(getattr(CONF, KS_NOTIFICATIONS_GRP_NAME),
'allow_requeue')
TRANSPORT._require_driver_features(requeue=allow_requeue)
dispatcher = notify_dispatcher.NotificationDispatcher(endpoints,
serializer)
# we don't want blocking executor so use eventlet as executor choice
return listener.NotificationServer(TRANSPORT, targets, dispatcher,
executor='eventlet',
allow_requeue=allow_requeue)
|
Python
| 0.00005
|
@@ -1335,16 +1335,20 @@
ing.get_
+rpc_
transpor
|
d134e9c461af2c9b67673aa97fc15a302dcbc58c
|
Add comments
|
beetsplug/smartplaylist.py
|
beetsplug/smartplaylist.py
|
from __future__ import print_function
from beets.plugins import BeetsPlugin
from beets import config, ui
from beets.util import normpath, syspath
import os
database_changed = False
library = None
def update_playlists(lib):
print("Updating smart playlists...")
playlists = config['smartplaylist']['playlists'].get(list)
playlist_dir = config['smartplaylist']['playlist_dir'].get(unicode)
relative_to = config['smartplaylist']['relative_to'].get()
if relative_to:
relative_to = normpath(relative_to)
for playlist in playlists:
items = lib.items(playlist['query'])
if relative_to:
paths = [os.path.relpath(item.path, relative_to) for item in items]
else:
paths = [item.path for item in items]
basename = playlist['name'].encode('utf8')
m3u_path = os.path.join(playlist_dir, basename)
with open(syspath(m3u_path), 'w') as f:
for path in paths:
f.write(path + '\n')
print("... Done")
class SmartPlaylistPlugin(BeetsPlugin):
def __init__(self):
super(SmartPlaylistPlugin, self).__init__()
self.config.add({
'relative_to': None,
'playlists': []
})
def commands(self):
def update(lib, opts, args):
update_playlists(lib)
spl_update = ui.Subcommand('spl_update',
help='update the smart playlists')
spl_update.func = update
return [spl_update]
@SmartPlaylistPlugin.listen('database_change')
def handle_change(lib):
global library
global database_changed
library = lib
database_changed = True
@SmartPlaylistPlugin.listen('cli_exit')
def update():
if database_changed:
update_playlists(library)
|
Python
| 0
|
@@ -1,12 +1,732 @@
+# This file is part of beets.%0A# Copyright 2013, Dang Mai %3Ccontact@dangmai.net%3E.%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining%0A# a copy of this software and associated documentation files (the%0A# %22Software%22), to deal in the Software without restriction, including%0A# without limitation the rights to use, copy, modify, merge, publish,%0A# distribute, sublicense, and/or sell copies of the Software, and to%0A# permit persons to whom the Software is furnished to do so, subject to%0A# the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be%0A# included in all copies or substantial portions of the Software.%0A%0A%22%22%22Generates smart playlists based on beets queries.%0A%22%22%22%0A
from __futur
@@ -871,16 +871,125 @@
ort os%0A%0A
+# Global variables so that smartplaylist can detect database changes and run%0A# only once before beets exits.%0A
database
|
178834a747be8b9a60fb36dc95513305cf10851d
|
Fix cache busting (#285)
|
runserver.py
|
runserver.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import logging
import time
import re
import requests
# Currently supported pgoapi
pgoapi_version = "1.1.6"
# Moved here so logger is configured at load time
logging.basicConfig(format='%(asctime)s [%(threadName)16s][%(module)14s][%(levelname)8s] %(message)s')
log = logging.getLogger()
# Make sure pogom/pgoapi is actually removed if it is an empty directory
# This is a leftover directory from the time pgoapi was embedded in PokemonGo-Map
# The empty directory will cause problems with `import pgoapi` so it needs to go
oldpgoapiPath = os.path.join(os.path.dirname(__file__), "pogom/pgoapi")
if os.path.isdir(oldpgoapiPath):
log.info("I found %s, but its no longer used. Going to remove it...", oldpgoapiPath)
shutil.rmtree(oldpgoapiPath)
log.info("Done!")
# Assert pgoapi is installed
try:
import pgoapi
except ImportError:
log.critical("It seems `pgoapi` is not installed. You must run pip install -r requirements.txt again")
sys.exit(1)
# Assert pgoapi >= 1.1.6 is installed
from distutils.version import StrictVersion
if not hasattr(pgoapi, "__version__") or StrictVersion(pgoapi.__version__) < StrictVersion(pgoapi_version):
log.critical("It seems `pgoapi` is not up-to-date. You must run pip install -r requirements.txt again")
sys.exit(1)
from threading import Thread, Event
from queue import Queue
from flask_cors import CORS
from flask.ext import cache_bust
from pogom import config
from pogom.app import Pogom
from pogom.utils import get_args, insert_mock_data, get_encryption_lib_path
from pogom.search import search_overseer_thread, fake_search_loop
from pogom.models import init_database, create_tables, drop_tables, Pokemon, Pokestop, Gym
from pgoapi import utilities as util
if __name__ == '__main__':
# Check if we have the proper encryption library file and get its path
encryption_lib_path = get_encryption_lib_path()
if encryption_lib_path is "":
sys.exit(1)
args = get_args()
if args.debug:
log.setLevel(logging.DEBUG);
else:
log.setLevel(logging.INFO);
# Let's not forget to run Grunt / Only needed when running with webserver
if not args.no_server:
if not os.path.exists(os.path.join(os.path.dirname(__file__), 'static/dist')):
log.critical('Missing front-end assets (static/dist) -- please run "npm install && npm run build" before starting the server');
sys.exit();
# These are very noisey, let's shush them up a bit
logging.getLogger('peewee').setLevel(logging.INFO)
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('pgoapi.pgoapi').setLevel(logging.WARNING)
logging.getLogger('pgoapi.rpc_api').setLevel(logging.INFO)
logging.getLogger('werkzeug').setLevel(logging.ERROR)
config['parse_pokemon'] = not args.no_pokemon
config['parse_pokestops'] = not args.no_pokestops
config['parse_gyms'] = not args.no_gyms
# Turn these back up if debugging
if args.debug:
logging.getLogger('requests').setLevel(logging.DEBUG)
logging.getLogger('pgoapi').setLevel(logging.DEBUG)
logging.getLogger('rpc_api').setLevel(logging.DEBUG)
# use lat/lng directly if matches such a pattern
prog = re.compile("^(\-?\d+\.\d+),?\s?(\-?\d+\.\d+)$")
res = prog.match(args.location)
if res:
log.debug('Using coordinates from CLI directly')
position = (float(res.group(1)), float(res.group(2)), 0)
else:
log.debug('Looking up coordinates in API')
position = util.get_pos_by_name(args.location)
# Use the latitude and longitude to get the local altitude from Google
try:
url = 'https://maps.googleapis.com/maps/api/elevation/json?locations={},{}'.format(
str(position[0]), str(position[1]))
altitude = requests.get(url).json()[u'results'][0][u'elevation']
log.debug('Local altitude is: %sm', altitude)
position = (position[0], position[1], altitude)
except (requests.exceptions.RequestException, IndexError, KeyError):
log.error('Unable to retrieve altitude from Google APIs; setting to 0')
if not any(position):
log.error('Could not get a position by name, aborting')
sys.exit()
log.info('Parsed location is: %.4f/%.4f/%.4f (lat/lng/alt)',
position[0], position[1], position[2])
if args.no_pokemon:
log.info('Parsing of Pokemon disabled')
if args.no_pokestops:
log.info('Parsing of Pokestops disabled')
if args.no_gyms:
log.info('Parsing of Gyms disabled')
config['LOCALE'] = args.locale
config['CHINA'] = args.china
app = Pogom(__name__)
db = init_database(app)
if args.clear_db:
log.info('Clearing database')
if args.db_type == 'mysql':
drop_tables(db)
elif os.path.isfile(args.db):
os.remove(args.db)
create_tables(db)
app.set_current_location(position);
# Control the search status (running or not) across threads
pause_bit = Event()
pause_bit.clear()
# Setup the location tracking queue and push the first location on
new_location_queue = Queue()
new_location_queue.put(position)
if not args.only_server:
# Gather the pokemons!
if not args.mock:
log.debug('Starting a real search thread')
search_thread = Thread(target=search_overseer_thread, args=(args, new_location_queue, pause_bit, encryption_lib_path))
else:
log.debug('Starting a fake search thread')
insert_mock_data(position)
search_thread = Thread(target=fake_search_loop)
search_thread.daemon = True
search_thread.name = 'search_thread'
search_thread.start()
if args.cors:
CORS(app);
# No more stale JS
cache_bust.init_cache_busting(app)
app.set_search_control(pause_bit)
app.set_location_queue(new_location_queue)
config['ROOT_PATH'] = app.root_path
config['GMAPS_KEY'] = args.gmaps_key
if args.no_server:
# This loop allows for ctrl-c interupts to work since flask won't be holding the program open
while search_thread.is_alive():
time.sleep(60)
else:
app.run(threaded=True, use_reloader=False, debug=args.debug, host=args.host, port=args.port)
|
Python
| 0
|
@@ -1455,19 +1455,26 @@
om flask
-.ex
+_cache_bus
t import
@@ -1474,16 +1474,21 @@
import
+init_
cache_bu
@@ -1489,16 +1489,19 @@
che_bust
+ing
%0A%0Afrom p
@@ -5918,19 +5918,8 @@
-cache_bust.
init
|
d043eef098be68690b9d6cd5790b667cdb2d825b
|
Add comments about security issue
|
runserver.py
|
runserver.py
|
from wKRApp import app
app.secret_key = "my precious" # 2 security flaws, need to sort out
app.run(debug=True)
|
Python
| 0
|
@@ -20,38 +20,8 @@
app%0A
-app.secret_key = %22my precious%22
# 2
@@ -53,16 +53,164 @@
sort out
+%0A # %09%091. the key should be randomy generated%0A # %09%092. the key should be set in a config file that is then imported in.%0Aapp.secret_key = %22my precious%22
%0Aapp.run
|
f56d1c7502e5499c37dce690a5d13f3e099baa77
|
Save neighbourhoods figure manually
|
bin/plot_neighbourhoods.py
|
bin/plot_neighbourhoods.py
|
"""plot_neighbourhoods.py
Plot the neighbourhoods of all classes for the city specified as an input with the following color codes
* Black: where the class is over-represented (with 99% CI)
* Light grey: where the class is 'normally' represented
* White: where the class is under-represented
"""
import sys
import math
import csv
import fiona
from descartes import PolygonPatch
from shapely.geometry import shape
from matplotlib import pylab as plt
#
# Parameters
#
## Read city from input
city = sys.argv[1]
## Colors
colours = {'over': 'black',
'norm': 'grey',
'under': 'white'}
#
# Import data
#
## Blockgroups borders
blocks = {}
with fiona.open('data/shp/msa/%s/blockgroups.shp'%city, 'r', 'ESRI Shapefile') as source:
for f in source:
blocks[f['properties']['BKGPIDFP00']] = shape(f['geometry'])
## List of MSA
msa = {}
with open('data/names/msa.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
msa[rows[0]] = rows[1]
## Classes
classes = {}
with open('extr/classes/msa_average/classes.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
classes[rows[0]] =[int(r) for r in rows[1:]]
## Representation values
rep_vals = {}
with open('extr/representation/classes/msa/%s_values.csv'%city, 'r') as source:
reader = csv.reader(source, delimiter='\t')
classes_list = reader.next()[1:]
for rows in reader:
rep_vals[rows[0]] = {cl:float(r) for cl,r in zip(classes_list,
rows[1:])}
## Representation variance
rep_var = {}
with open('extr/representation/classes/msa/%s_variance.csv'%city, 'r') as source:
reader = csv.reader(source, delimiter='\t')
classes_list = reader.next()[1:]
for rows in reader:
rep_var[rows[0]] = {cl:float(r) for cl,r in zip(classes_list,
rows[1:])}
#
# Transform representation values and variance into list of areal units
#
representation = {cl:{} for cl in classes} # cl:{bckgp:over, under, or norm}
for bg in rep_vals:
for cl in classes:
rep = rep_vals[bg][cl]-1
std = math.sqrt(rep_var[bg][cl])
## if wihin 2.57 sigma or nan, mark as normal
if abs(rep) <= 2.57*std or math.isnan(rep):
representation[cl][bg] = 'norm'
## else it is over-represented or under
else:
if rep < 0:
representation[cl][bg] = 'under'
else:
representation[cl][bg] = 'over'
#
# Plot
#
fig = plt.figure()
for i,cl in enumerate(classes):
if i==0:
ax = fig.add_subplot(1,len(classes),i+1)
else:
ax = fig.add_subplot(1,len(classes),i+1, sharex=ax, sharey=ax)
for bg in representation[cl]:
color = colours[representation[cl][bg]]
if blocks[bg].geom_type=="Polygon":
patch = PolygonPatch(blocks[bg], fc=color, ec='None', alpha=1, zorder=1)
ax.add_patch(patch)
else:
for t in blocks[bg]:
patch = PolygonPatch(t, fc=color, ec='None', alpha=1, zorder=1)
ax.add_patch(patch)
ax.relim()
ax.axis('off')
ax.autoscale_view(True,True,True)
ax.set_title(r"$%s$"%cl,fontsize=25)
plt.savefig('figures/paper/%s_neighbourhoods.png'%msa[city].replace(" ","").replace(",", ""),
bbox_inches='tight')
plt.show()
|
Python
| 0
|
@@ -3360,16 +3360,17 @@
ze=25)%0A%0A
+#
plt.save
@@ -3455,17 +3455,17 @@
%22, %22%22),%0A
-
+#
|
db4b77ee5be099cf0ac751956d010777e1ff6640
|
Add DefinitionNotFoundError
|
UM/Settings/SettingsError.py
|
UM/Settings/SettingsError.py
|
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
class SettingsError(Exception):
pass
class InvalidFileError(SettingsError):
def __init__(self, path):
super().__init__("File {0} is an invalid settings file".format(path))
class InvalidVersionError(SettingsError):
def __init__(self, path):
super().__init__("Invalid version for file {0}".format(path))
|
Python
| 0.000078
|
@@ -409,24 +409,186 @@
file %7B0%7D%22.format(path))%0A
+%0Aclass DefinitionNotFoundError(SettingsError):%0A def __init__(self, type_id):%0A super().__init__(%22Could not find machine definition %7B0%7D%22.format(type_id))%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.