commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
b94bfcbc579af9e08c4ac42466cd0c33a6d8a529
|
add textCNN model to __init__.py (#1421)
|
tensor2tensor/models/__init__.py
|
tensor2tensor/models/__init__.py
|
# coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models defined in T2T. Imports here force registration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
# pylint: disable=unused-import
from tensor2tensor.layers import modalities # pylint: disable=g-import-not-at-top
from tensor2tensor.models import basic
from tensor2tensor.models import bytenet
from tensor2tensor.models import distillation
from tensor2tensor.models import image_transformer
from tensor2tensor.models import image_transformer_2d
from tensor2tensor.models import lstm
from tensor2tensor.models import mtf_image_transformer
from tensor2tensor.models import mtf_resnet
from tensor2tensor.models import mtf_transformer
from tensor2tensor.models import mtf_transformer2
from tensor2tensor.models import neural_gpu
from tensor2tensor.models import resnet
from tensor2tensor.models import revnet
from tensor2tensor.models import shake_shake
from tensor2tensor.models import slicenet
from tensor2tensor.models import transformer
from tensor2tensor.models import vanilla_gan
from tensor2tensor.models import xception
from tensor2tensor.models.research import adafactor_experiments
from tensor2tensor.models.research import aligned
from tensor2tensor.models.research import attention_lm
from tensor2tensor.models.research import attention_lm_moe
from tensor2tensor.models.research import autoencoders
from tensor2tensor.models.research import cycle_gan
from tensor2tensor.models.research import gene_expression
from tensor2tensor.models.research import glow
from tensor2tensor.models.research import lm_experiments
from tensor2tensor.models.research import moe_experiments
from tensor2tensor.models.research import multiquery_paper
from tensor2tensor.models.research import rl
from tensor2tensor.models.research import similarity_transformer
from tensor2tensor.models.research import super_lm
from tensor2tensor.models.research import transformer_moe
from tensor2tensor.models.research import transformer_nat
from tensor2tensor.models.research import transformer_parallel
from tensor2tensor.models.research import transformer_revnet
from tensor2tensor.models.research import transformer_sketch
from tensor2tensor.models.research import transformer_symshard
from tensor2tensor.models.research import transformer_vae
from tensor2tensor.models.research import universal_transformer
from tensor2tensor.models.research import vqa_attention
from tensor2tensor.models.research import vqa_recurrent_self_attention
from tensor2tensor.models.research import vqa_self_attention
from tensor2tensor.models.video import basic_deterministic
from tensor2tensor.models.video import basic_recurrent
from tensor2tensor.models.video import basic_stochastic
from tensor2tensor.models.video import emily
from tensor2tensor.models.video import epva
from tensor2tensor.models.video import next_frame_glow
from tensor2tensor.models.video import savp
from tensor2tensor.models.video import sv2p
from tensor2tensor.models.video import svg_lp
from tensor2tensor.utils import registry
# pylint: enable=unused-import
def model(name):
return registry.model(name)
|
Python
| 0.000139
|
@@ -1573,24 +1573,66 @@
rt slicenet%0A
+from tensor2tensor.models import text_cnn%0A
from tensor2
|
251a91c1bf245b3674c2612149382a0f1e18dc98
|
Add tests for getrpcinfo
|
test/functional/interface_rpc.py
|
test/functional/interface_rpc.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests some generic aspects of the RPC interface."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class RPCInterfaceTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
results = self.nodes[0].batch([
# A basic request that will work fine.
{"method": "getblockcount", "id": 1},
# Request that will fail. The whole batch request should still
# work fine.
{"method": "invalidmethod", "id": 2},
# Another call that should succeed.
{"method": "getbestblockhash", "id": 3},
])
result_by_id = {}
for res in results:
result_by_id[res["id"]] = res
assert_equal(result_by_id[1]['error'], None)
assert_equal(result_by_id[1]['result'], 0)
assert_equal(result_by_id[2]['error']['code'], -32601)
assert_equal(result_by_id[2]['result'], None)
assert_equal(result_by_id[3]['error'], None)
assert result_by_id[3]['result'] is not None
def run_test(self):
self.test_batch_request()
if __name__ == '__main__':
RPCInterfaceTest().main()
|
Python
| 0
|
@@ -365,16 +365,46 @@
rt_equal
+, assert_greater_than_or_equal
%0A%0Aclass
@@ -540,16 +540,353 @@
= True%0A%0A
+ def test_getrpcinfo(self):%0A self.log.info(%22Testing getrpcinfo...%22)%0A%0A info = self.nodes%5B0%5D.getrpcinfo()%0A assert_equal(len(info%5B'active_commands'%5D), 1)%0A%0A command = info%5B'active_commands'%5D%5B0%5D%0A assert_equal(command%5B'method'%5D, 'getrpcinfo')%0A assert_greater_than_or_equal(command%5B'duration'%5D, 0)%0A%0A
def
@@ -1825,32 +1825,63 @@
run_test(self):%0A
+ self.test_getrpcinfo()%0A
self.tes
|
46c09fd75c6f45d68cd722cd3a12b88d04257083
|
Add tests for getrpcinfo
|
test/functional/interface_rpc.py
|
test/functional/interface_rpc.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Tests some generic aspects of the RPC interface."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class RPCInterfaceTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def test_batch_request(self):
self.log.info("Testing basic JSON-RPC batch request...")
results = self.nodes[0].batch([
# A basic request that will work fine.
{"method": "getblockcount", "id": 1},
# Request that will fail. The whole batch request should still
# work fine.
{"method": "invalidmethod", "id": 2},
# Another call that should succeed.
{"method": "getbestblockhash", "id": 3},
])
result_by_id = {}
for res in results:
result_by_id[res["id"]] = res
assert_equal(result_by_id[1]['error'], None)
assert_equal(result_by_id[1]['result'], 0)
assert_equal(result_by_id[2]['error']['code'], -32601)
assert_equal(result_by_id[2]['result'], None)
assert_equal(result_by_id[3]['error'], None)
assert result_by_id[3]['result'] is not None
def run_test(self):
self.test_batch_request()
if __name__ == '__main__':
RPCInterfaceTest().main()
|
Python
| 0
|
@@ -365,16 +365,46 @@
rt_equal
+, assert_greater_than_or_equal
%0A%0Aclass
@@ -540,16 +540,353 @@
= True%0A%0A
+ def test_getrpcinfo(self):%0A self.log.info(%22Testing getrpcinfo...%22)%0A%0A info = self.nodes%5B0%5D.getrpcinfo()%0A assert_equal(len(info%5B'active_commands'%5D), 1)%0A%0A command = info%5B'active_commands'%5D%5B0%5D%0A assert_equal(command%5B'method'%5D, 'getrpcinfo')%0A assert_greater_than_or_equal(command%5B'duration'%5D, 0)%0A%0A
def
@@ -1825,32 +1825,63 @@
run_test(self):%0A
+ self.test_getrpcinfo()%0A
self.tes
|
d8dc3f2f2972aa03781f4ff4044ac40fbf6b05ab
|
modify p2p_feefilter test to catch rounding error
|
test/functional/p2p_feefilter.py
|
test/functional/p2p_feefilter.py
|
#!/usr/bin/env python3
# Copyright (c) 2016-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test processing of feefilter messages."""
from decimal import Decimal
import time
from test_framework.messages import msg_feefilter
from test_framework.mininode import mininode_lock, P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import sync_blocks, sync_mempools
def hashToHex(hash):
return format(hash, '064x')
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
for x in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
time.sleep(1)
return False
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
def on_inv(self, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
def clear_invs(self):
with mininode_lock:
self.txinvs = []
class FeeFilterTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
node1 = self.nodes[1]
node0 = self.nodes[0]
# Get out of IBD
node1.generate(1)
sync_blocks(self.nodes)
self.nodes[0].add_p2p_connection(TestP2PConn())
# Test that invs are received for all txs at feerate of 20 sat/byte
node1.settxfee(Decimal("0.00020000"))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Set a filter of 15 sat/byte
self.nodes[0].p2p.send_and_ping(msg_feefilter(15000))
# Test that txs are still being received (paying 20 sat/byte)
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Change tx fee rate to 10 sat/byte and test they are no longer received
node1.settxfee(Decimal("0.00010000"))
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
sync_mempools(self.nodes) # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
# we can sync the test on receipt (if node1's txs were relayed, they'd
# be received by the time this node0 tx is received). This is
# unfortunately reliant on the current relay behavior where we batch up
# to 35 entries in an inv, which means that when this next transaction
# is eligible for relay, the prior transactions from node1 are eligible
# as well.
node0.settxfee(Decimal("0.00020000"))
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
# Remove fee filter and check that txs are received again
self.nodes[0].p2p.send_and_ping(msg_feefilter(0))
txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
assert(allInvsMatch(txids, self.nodes[0].p2p))
self.nodes[0].p2p.clear_invs()
if __name__ == '__main__':
FeeFilterTest().main()
|
Python
| 0
|
@@ -1332,16 +1332,411 @@
odes = 2
+%0A # We lower the various required feerates for this test%0A # to catch a corner-case where feefilter used to slightly undercut%0A # mempool and wallet feerate calculation based on GetFee%0A # rounding down 3 places, leading to stranded transactions.%0A # See issue #16499%0A self.extra_args = %5B%5B%22-minrelaytxfee=0.00000100%22, %22-mintxfee=0.00000100%22%5D%5D*self.num_nodes
%0A%0A de
@@ -2069,16 +2069,35 @@
eceived
+by test connection
for all
@@ -2102,16 +2102,26 @@
l txs at
+%0A #
feerate
@@ -2124,18 +2124,18 @@
rate of
+.
2
-0
sat/byt
@@ -2165,35 +2165,35 @@
e(Decimal(%220.000
-2
00
+2
00%22))%0A tx
@@ -2385,16 +2385,17 @@
lter of
+.
15 sat/b
@@ -2389,32 +2389,51 @@
of .15 sat/byte
+ on test connection
%0A self.no
@@ -2474,18 +2474,16 @@
lter(150
-00
))%0A%0A
@@ -2531,16 +2531,35 @@
ved
+by test connection
(paying
20 s
@@ -2554,18 +2554,19 @@
(paying
-20
+.15
sat/byt
@@ -2568,16 +2568,62 @@
t/byte)%0A
+ node1.settxfee(Decimal(%220.00000150%22))%0A
@@ -2823,18 +2823,18 @@
rate to
+.
1
-0
sat/byt
@@ -2872,16 +2872,49 @@
eceived%0A
+ # by the test connection%0A
@@ -2942,19 +2942,19 @@
l(%220.000
-1
00
+1
00%22))%0A
|
e3aceabfe36c57f439c5db39f41ce00f079c71ed
|
Add test for too-large wallet output groups
|
test/functional/wallet_groups.py
|
test/functional/wallet_groups.py
|
#!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test wallet group functionality."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
def assert_approx(v, vexp, vspan=0.00001):
if v < vexp - vspan:
raise AssertionError("%s < [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
if v > vexp + vspan:
raise AssertionError("%s > [%s..%s]" % (str(v), str(vexp - vspan), str(vexp + vspan)))
class WalletGroupTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], [], ['-avoidpartialspends']]
def run_test (self):
# Mine some coins
self.nodes[0].generate(110)
# Get some addresses from the two nodes
addr1 = [self.nodes[1].getnewaddress() for i in range(3)]
addr2 = [self.nodes[2].getnewaddress() for i in range(3)]
addrs = addr1 + addr2
# Send 1 + 0.5 coin to each address
[self.nodes[0].sendtoaddress(addr, 1.0) for addr in addrs]
[self.nodes[0].sendtoaddress(addr, 0.5) for addr in addrs]
self.nodes[0].generate(1)
self.sync_all()
# For each node, send 0.2 coins back to 0;
# - node[1] should pick one 0.5 UTXO and leave the rest
# - node[2] should pick one (1.0 + 0.5) UTXO group corresponding to a
# given address, and leave the rest
txid1 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx1 = self.nodes[1].getrawtransaction(txid1, True)
# txid1 should have 1 input and 2 outputs
assert_equal(1, len(tx1["vin"]))
assert_equal(2, len(tx1["vout"]))
# one output should be 0.2, the other should be ~0.3
v = [vout["value"] for vout in tx1["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 0.3, 0.0001)
txid2 = self.nodes[2].sendtoaddress(self.nodes[0].getnewaddress(), 0.2)
tx2 = self.nodes[2].getrawtransaction(txid2, True)
# txid2 should have 2 inputs and 2 outputs
assert_equal(2, len(tx2["vin"]))
assert_equal(2, len(tx2["vout"]))
# one output should be 0.2, the other should be ~1.3
v = [vout["value"] for vout in tx2["vout"]]
v.sort()
assert_approx(v[0], 0.2)
assert_approx(v[1], 1.3, 0.0001)
if __name__ == '__main__':
WalletGroupTest().main ()
|
Python
| 0
|
@@ -305,16 +305,116 @@
amework%0A
+from test_framework.mininode import FromHex, ToHex%0Afrom test_framework.messages import CTransaction%0A
from tes
@@ -2677,16 +2677,1131 @@
.0001)%0A%0A
+ # Empty out node2's wallet%0A self.nodes%5B2%5D.sendtoaddress(address=self.nodes%5B0%5D.getnewaddress(), amount=self.nodes%5B2%5D.getbalance(), subtractfeefromamount=True)%0A self.sync_all()%0A self.nodes%5B0%5D.generate(1)%0A%0A # Fill node2's wallet with 10000 outputs corresponding to the same%0A # scriptPubKey%0A for i in range(5):%0A raw_tx = self.nodes%5B0%5D.createrawtransaction(%5B%7B%22txid%22:%220%22*64, %22vout%22:0%7D%5D, %5B%7Baddr2%5B0%5D: 0.05%7D%5D)%0A tx = FromHex(CTransaction(), raw_tx)%0A tx.vin = %5B%5D%0A tx.vout = %5Btx.vout%5B0%5D%5D * 2000%0A funded_tx = self.nodes%5B0%5D.fundrawtransaction(ToHex(tx))%0A signed_tx = self.nodes%5B0%5D.signrawtransactionwithwallet(funded_tx%5B'hex'%5D)%0A self.nodes%5B0%5D.sendrawtransaction(signed_tx%5B'hex'%5D)%0A self.nodes%5B0%5D.generate(1)%0A%0A self.sync_all()%0A%0A # Check that we can create a transaction that only requires ~100 of our%0A # utxos, without pulling in all outputs and creating a transaction that%0A # is way too big.%0A assert self.nodes%5B2%5D.sendtoaddress(address=addr2%5B0%5D, amount=5)%0A%0A
if __nam
|
28e78b51ff7f198a21b3d46d66d8ab6399d96a53
|
increase led brightness
|
led_controller.py
|
led_controller.py
|
#!/usr/bin/env python
import time
import _rpi_ws281x as ws
class Led:
def __init__(self):
# LED configuration
self.LED_CHANNEL = 0
self.LED_COUNT = 64 # LEDs to light
self.LED_FREQ_HZ = 800000 # Frequency of LED signal (800khz | 400khz)
self.LED_DMA_NUM = 5 # DMA channel to use (0 - 14)
self.LED_GPIO = 18 # Pin connected to the signal line (PWM)
self.LED_BRIGHTNESS = 128 # 0 is dark, 255 is hella bright nigga
self.LED_INVERT = 0 # 1 inverts LED signal
# Define colors to be used (unsigned 32-bit int value
self.DOT_COLORS = [
0x100010 # purple
]
# Create struct from LED configuration
self.leds = ws.new_ws2811_t()
# Init all channels to off
for channum in range(2):
self.channel = ws.ws2811_channel_get(self.leds, channum)
ws.ws2811_channel_t_count_set(self.channel, 0)
ws.ws2811_channel_t_gpionum_set(self.channel, 0)
ws.ws2811_channel_t_invert_set(self.channel, 0)
ws.ws2811_channel_t_brightness_set(self.channel, 0)
self.channel = ws.ws2811_channel_get(self.leds, self.LED_CHANNEL)
ws.ws2811_channel_t_count_set(self.channel, self.LED_COUNT)
ws.ws2811_channel_t_gpionum_set(self.channel, self.LED_GPIO)
ws.ws2811_channel_t_invert_set(self.channel, self.LED_INVERT)
ws.ws2811_channel_t_brightness_set(self.channel, self.LED_BRIGHTNESS)
ws.ws2811_t_freq_set(self.leds, self.LED_FREQ_HZ)
ws.ws2811_t_dmanum_set(self.leds, self.LED_DMA_NUM)
# Initialize library with LED configuration.
self.resp = ws.ws2811_init(self.leds)
if self.resp != 0:
raise RuntimeError('ws2811_init failed with code {0}'.format(self.resp))
def __del__(self):
# set all lights to off
for i in range(self.LED_COUNT):
ws.ws2811_led_set(self.channel, i, 0x000000)
# Ensure ws2811_fini is called before the program quits.
ws.ws2811_fini(self.leds)
# Example of calling delete function to clean up structure memory. Isn't
# strictly necessary at the end of the program execution here, but is good practice.
ws.delete_ws2811_t(self.leds)
def do_light(self, matrix):
if len(matrix) > self.LED_COUNT:
raise RuntimeError('Matrix size mismatch error. Matrix must be 8x8')
# Wrap following code in a try/finally to ensure cleanup functions are called
# after library is initialized.
offset = 0
color = self.DOT_COLORS[0]
for i in range(len(matrix)):
#i is the led number; not matrix number
# Set the LED color buffer value.
#i/8%2==1
if (i / 8) % 2 == 1:
matrix_val = matrix[(7 - (i % 8)) + int(i / 8) * 8]
else:
matrix_val = matrix[i]
if matrix_val == 1:
ws.ws2811_led_set(self.channel, i, color)
else:
ws.ws2811_led_set(self.channel, i, 0x000000)
# Send the LED color data to the hardware.
self.resp = ws.ws2811_render(self.leds)
if self.resp != 0:
raise RuntimeError('ws2811_render failed with code {0}'.format(self.resp))
|
Python
| 0.000001
|
@@ -412,11 +412,11 @@
%09=
-128
+200
%09%09#
|
35cf00b5f05f4b1df8b40b7edc3aac76534c8903
|
enable some reduction tests
|
test/python/tests/test_reduce.py
|
test/python/tests/test_reduce.py
|
import util
class tes1t_reduce_views:
""" Test reduction of all kind of views"""
def init(self):
for cmd, ndim in util.gen_random_arrays("R", 4, dtype="np.float32"):
cmd = "R = bh.random.RandomState(42); a = %s; " % cmd
for i in range(ndim):
yield (cmd, i)
for i in range(ndim):
yield (cmd, -i)
def test_reduce(self, (cmd, axis)):
cmd += "res = M.add.reduce(a, axis=%d)" % axis
return cmd
class tes1t_reduce_sum:
""" Test reduction of sum() and prod()"""
def init(self):
for cmd, ndim in util.gen_random_arrays("R", 3, dtype="np.float32"):
cmd = "R = bh.random.RandomState(42); a = %s; " % cmd
for op in ["sum", "prod"]:
yield (cmd, op)
def test_func(self, (cmd, op)):
cmd += "res = M.%s(a)" % op
return cmd
def test_method(self, (cmd, op)):
cmd += "res = a.%s()" % op
return cmd
class test_reduce_primitives:
def init(self):
for op in ["add", "multiply", "minimum", "maximum"]:
yield (op, "np.float64")
for op in ["bitwise_or", "bitwise_xor"]:
yield (op, "np.uint64")
for op in ["add", "logical_or", "logical_and", "logical_xor"]:
yield (op, "np.bool")
def test_vector(self, (op, dtype)):
cmd = "R = bh.random.RandomState(42); a = R.random(10, dtype=%s, bohrium=BH); " % dtype
cmd += "res = M.%s.reduce(a)" % op
return cmd
|
Python
| 0.000036
|
@@ -8,33 +8,32 @@
util%0A%0A%0Aclass tes
-1
t_reduce_views:%0A
@@ -499,17 +499,16 @@
lass tes
-1
t_reduce
|
b2266a2640d542fa6f9734fa9565a7521d06f1b0
|
Bump again
|
bulbs/__init__.py
|
bulbs/__init__.py
|
__version__ = "0.11.3"
|
Python
| 0
|
@@ -17,7 +17,7 @@
.11.
-3
+4
%22%0A
|
04118d072a65df7df2b0feba87797c7228bdcc84
|
Fix app registry not ready
|
cacheops/utils.py
|
cacheops/utils.py
|
# -*- coding: utf-8 -*-
import re
import json
import inspect
from funcy import memoize, compose, wraps, any, any_fn, select_values, make_lookuper
from funcy.py3 import lmapcat
from .cross import md5hex
from django.apps import apps
from django.db import models
from django.http import HttpRequest
from django.db.migrations.recorder import MigrationRecorder
from .conf import model_profile
# NOTE: we don't serialize this fields since their values could be very long
# and one should not filter by their equality anyway.
NOT_SERIALIZED_FIELDS = (
models.FileField,
models.TextField, # One should not filter by long text equality
models.BinaryField,
)
def model_family(model):
"""
Returns a list of all proxy models, including subclasess, superclassses and siblings.
"""
def class_tree(cls):
return [cls] + lmapcat(class_tree, cls.__subclasses__())
# NOTE: we also list multitable submodels here, we just don't care.
# Cacheops doesn't support them anyway.
return class_tree(model._meta.concrete_model)
@memoize
def family_has_profile(cls):
return any(model_profile, model_family(cls))
@make_lookuper
def table_to_model():
d = {m._meta.db_table: m for m in apps.get_models(include_auto_created=True)}
d['django_migrations'] = MigrationRecorder.Migration
return d
class MonkeyProxy(object):
pass
def monkey_mix(cls, mixin):
"""
Mixes a mixin into existing class.
Does not use actual multi-inheritance mixins, just monkey patches methods.
Mixin methods can call copies of original ones stored in `_no_monkey` proxy:
class SomeMixin(object):
def do_smth(self, arg):
... do smth else before
self._no_monkey.do_smth(self, arg)
... do smth else after
"""
assert not hasattr(cls, '_no_monkey'), 'Multiple monkey mix not supported'
cls._no_monkey = MonkeyProxy()
test = any_fn(inspect.isfunction, inspect.ismethoddescriptor)
methods = select_values(test, mixin.__dict__)
for name, method in methods.items():
if hasattr(cls, name):
setattr(cls._no_monkey, name, getattr(cls, name))
setattr(cls, name, method)
@memoize
def stamp_fields(model):
"""
Returns serialized description of model fields.
"""
stamp = str(sorted((f.name, f.attname, f.db_column, f.__class__) for f in model._meta.fields))
return md5hex(stamp)
### Cache keys calculation
def obj_key(obj):
if isinstance(obj, models.Model):
return '%s.%s.%s' % (obj._meta.app_label, obj._meta.model_name, obj.pk)
elif inspect.isfunction(obj):
factors = [obj.__module__, obj.__name__]
# Really useful to ignore this while code still in development
if hasattr(obj, '__code__') and not obj.__globals__.get('CACHEOPS_DEBUG'):
factors.append(obj.__code__.co_firstlineno)
return factors
else:
return str(obj)
def func_cache_key(func, args, kwargs, extra=None):
"""
Calculate cache key based on func and arguments
"""
factors = [func, args, kwargs, extra]
return md5hex(json.dumps(factors, sort_keys=True, default=obj_key))
def view_cache_key(func, args, kwargs, extra=None):
"""
Calculate cache key for view func.
Use url instead of not properly serializable request argument.
"""
if hasattr(args[0], 'build_absolute_uri'):
uri = args[0].build_absolute_uri()
else:
uri = args[0]
return 'v:' + func_cache_key(func, args[1:], kwargs, extra=(uri, extra))
def cached_view_fab(_cached):
def force_render(response):
if hasattr(response, 'render') and callable(response.render):
response.render()
return response
def cached_view(*dargs, **dkwargs):
def decorator(func):
dkwargs['key_func'] = view_cache_key
cached_func = _cached(*dargs, **dkwargs)(compose(force_render, func))
@wraps(func)
def wrapper(request, *args, **kwargs):
assert isinstance(request, HttpRequest), \
"A view should be passed with HttpRequest as first argument"
if request.method not in ('GET', 'HEAD'):
return func(request, *args, **kwargs)
return cached_func(request, *args, **kwargs)
if hasattr(cached_func, 'invalidate'):
wrapper.invalidate = cached_func.invalidate
wrapper.key = cached_func.key
return wrapper
return decorator
return cached_view
### Whitespace handling for template tags
from django.utils.safestring import mark_safe
NEWLINE_BETWEEN_TAGS = mark_safe('>\n<')
SPACE_BETWEEN_TAGS = mark_safe('> <')
def carefully_strip_whitespace(text):
text = re.sub(r'>\s*\n\s*<', NEWLINE_BETWEEN_TAGS, text)
text = re.sub(r'>\s{2,}<', SPACE_BETWEEN_TAGS, text)
return text
|
Python
| 0.000001
|
@@ -293,68 +293,8 @@
uest
-%0Afrom django.db.migrations.recorder import MigrationRecorder
%0A%0Afr
@@ -1210,16 +1210,80 @@
=True)%7D%0A
+ from django.db.migrations.recorder import MigrationRecorder%0A
d%5B'd
|
aee41bac296eece9c30565c5824db9a019833ee0
|
Add decorator docs
|
calm/decorator.py
|
calm/decorator.py
|
def produces(resource_type):
def decor(func):
if getattr(func, 'handler_def', None):
func.handler_def.produces = resource_type
else:
func.produces = resource_type
return func
return decor
def consumes(resource_type):
def decor(func):
if getattr(func, 'handler_def', None):
func.handler_def.consumes = resource_type
else:
func.consumes = resource_type
return func
return decor
|
Python
| 0.000001
|
@@ -1,303 +1,1415 @@
-%0A%0Adef produces(resource_type):%0A def decor(func):%0A if getattr(func, 'handler_def', None):%0A func.handler_def.produces = resource_type%0A else:%0A func.produces = resource_type%0A%0A return func%0A%0A return decor%0A%0A%0Adef consumes(resource_type):%0A def decor(func):
+%22%22%22%0AThis module defines general decorators to define the Calm Application.%0A%22%22%22%0Afrom calm.resource import Resource%0Afrom calm.ex import DefinitionError%0A%0A%0Adef produces(resource_type):%0A %22%22%22Decorator to specify what kind of Resource the handler produces.%22%22%22%0A if not isinstance(resource_type, Resource):%0A raise DefinitionError('@produces value should be of type Resource.')%0A%0A def decor(func):%0A %22%22%22%0A The function wrapper.%0A%0A It checks whether the function is already defined as a Calm handler or%0A not and sets the appropriate attribute based on that. This is done in%0A order to not enforce a particular order for the decorators.%0A %22%22%22%0A if getattr(func, 'handler_def', None):%0A func.handler_def.produces = resource_type%0A else:%0A func.produces = resource_type%0A%0A return func%0A%0A return decor%0A%0A%0Adef consumes(resource_type):%0A %22%22%22Decorator to specify what kind of Resource the handler consumes.%22%22%22%0A if not isinstance(resource_type, Resource):%0A raise DefinitionError('@consumes value should be of type Resource.')%0A%0A def decor(func):%0A %22%22%22%0A The function wrapper.%0A%0A It checks whether the function is already defined as a Calm handler or%0A not and sets the appropriate attribute based on that. This is done in%0A order to not enforce a particular order for the decorators.%0A %22%22%22
%0A
|
6f0b75f0561563926afc37dca8451f886e2e2d4f
|
Handle unicode data like cdbdump
|
cdblib/cdbdump.py
|
cdblib/cdbdump.py
|
from __future__ import print_function
import argparse
import sys
import six
import cdblib
def cdbdump(parsed_args, **kwargs):
# Read binary data from stdin by default
stdin = kwargs.get('stdin')
if stdin is None:
stdin = sys.stdin if six.PY2 else sys.stdin.buffer
# Print text data to stdout by default
stdout = kwargs.get('stdout', sys.stdout)
encoding = kwargs.get('encoding', sys.getdefaultencoding())
# Consume stdin and parse the cdb file
reader_cls = cdblib.Reader64 if vars(parsed_args)['64'] else cdblib.Reader
data = stdin.read()
reader = reader_cls(data)
# Dump the file's contents to the ouput stream
for key, value in reader.iteritems():
item = '+{:d},{:d}:{:s}->{:s}'.format(
len(key),
len(value),
key.decode(encoding),
value.decode(encoding)
)
print(item, file=stdout)
# Print final newline
print()
def main(args=None):
args = sys.argv[1:] if (args is None) else args
parser = argparse.ArgumentParser(
description=(
"Python version of djb's cdbdump. "
"Supports standard 32-bit cdb files as well as 64-bit variants."
)
)
parser.add_argument(
'-64', action='store_true', help='Use non-standard 64-bit file offsets'
)
parsed_args = parser.parse_args(args)
cdbdump(parsed_args)
if __name__ == '__main__':
main()
|
Python
| 0.999719
|
@@ -362,85 +362,94 @@
out'
-, sys.stdout)%0A encoding = kwargs.get('encoding', sys.getdefaultencoding())
+)%0A if stdout is None:%0A stdout = sys.stdout if six.PY2 else sys.stdout.buffer
%0A%0A
@@ -735,39 +735,27 @@
m =
-'+%7B:d%7D,%7B:d%7D:%7B:s%7D-%3E%7B:s%7D'.format(
+(%0A b'+',
%0A
@@ -767,16 +767,20 @@
+str(
len(key)
,%0A
@@ -779,144 +779,227 @@
key)
-,%0A len(value),%0A key.decode(encoding),%0A value.decode(encoding)%0A )%0A print(item, file=stdout
+).encode('ascii'),%0A b',',%0A str(len(value)).encode('ascii'),%0A b':',%0A key,%0A b'-%3E',%0A value,%0A b'%5Cn',%0A )%0A stdout.write(b''.join(item)
)%0A%0A
@@ -1031,14 +1031,26 @@
-print(
+stdout.write(b'%5Cn'
)%0A%0A%0A
|
9e22b82b9f5848ae3bfc8def66fe7b3d23c8f5b8
|
Change Alfred date of posting to be iso8601 compatible.
|
jobs/spiders/alfred.py
|
jobs/spiders/alfred.py
|
import json
import urlparse
import scrapy
from jobs.items import JobsItem
from jobs.spiders.visir import decode_date_string
class AlfredSpider(scrapy.Spider):
name = "alfred"
start_urls = ['https://api.alfred.is/api/v3/web/open/jobs?cat=0&limit=100&page=0']
def parse(self, response):
# we're using an api rather than scraping a website so we need to grok the json response
content = json.loads(response.text)
# each job under the 'data' key refers to companies listed in the `included` key, so to make
# it easy to get at the data we make a dict keyed to the id of the company
included_data = {entry['id']: entry for entry in content['included']}
for job in content['data']:
job_id = job['id']
company_id = job['relationships']['brand']['data']['id']
item = JobsItem()
item['spider'] = self.name
item['company'] = included_data[company_id]['attributes']['name']
item['url'] = urlparse.urljoin('https://alfred.is/starf/', job_id)
api_url = urlparse.urljoin('https://api.alfred.is/api/v3/web/open/jobs/', job_id)
request = scrapy.Request(api_url, callback=self.parse_specific_job)
request.meta['item'] = item
yield request
def parse_specific_job(self, response):
content = json.loads(response.text)
job = content['data']['attributes']
item = response.meta['item']
item['title'] = job['title']
item['posted'] = job['start']
item['deadline'] = decode_date_string(job['deadline'])
yield item
|
Python
| 0
|
@@ -22,16 +22,39 @@
lparse%0A%0A
+import dateutil.parser%0A
import s
@@ -96,58 +96,8 @@
tem%0A
-from jobs.spiders.visir import decode_date_string%0A
%0A%0Acl
@@ -1553,25 +1553,28 @@
= d
-ecode_date_string
+ateutil.parser.parse
(job
@@ -1586,16 +1586,28 @@
dline'%5D)
+.isoformat()
%0A
|
4b1ba931091448b4e5d980cb0695b4a8aa85b459
|
Use markets.ft.com instead of fixer.io
|
bumblebee/modules/currency.py
|
bumblebee/modules/currency.py
|
# -*- coding: UTF-8 -*-
# pylint: disable=C0111,R0903
"""Displays currency exchange rates. Currently, displays currency between GBP and USD/EUR only.
Requires the following python packages:
* requests
Parameters:
* currency.interval: Interval in minutes between updates, default is 1.
* currency.source: Source currency (defaults to "GBP")
* currency.destination: Comma-separated list of destination currencies (defaults to "USD,EUR")
* currency.sourceformat: String format for source formatting; Defaults to "{}: {}" and has two variables,
the base symbol and the rate list
* currency.destinationdelimiter: Delimiter used for separating individual rates (defaults to "|")
Note: source and destination names right now must correspond to the names used by the API of http://fixer.io
"""
import bumblebee.input
import bumblebee.output
import bumblebee.engine
import json
import time
try:
import requests
from requests.exceptions import RequestException
except ImportError:
pass
SYMBOL = {
"GBP": u"£", "EUR": u"€", "USD": u"$"
}
class Module(bumblebee.engine.Module):
def __init__(self, engine, config):
super(Module, self).__init__(engine, config,
bumblebee.output.Widget(full_text=self.price)
)
self._data = {}
self._interval = int(self.parameter("interval", 1))
self._base = self.parameter("source", "GBP")
self._symbols = self.parameter("destination", "USD,EUR")
self._nextcheck = 0
def price(self, widget):
if self._data == {}:
return "?"
rates = []
for sym in self._data["rates"]:
rates.append(u"{}{}".format(self._data["rates"][sym], SYMBOL[sym] if sym in SYMBOL else sym))
basefmt = u"{}".format(self.parameter("sourceformat", "{}: {}"))
ratefmt = u"{}".format(self.parameter("destinationdelimiter", "|"))
return basefmt.format(SYMBOL[self._base] if self._base in SYMBOL else self._base, ratefmt.join(rates))
def update(self, widgets):
timestamp = int(time.time())
if self._nextcheck < int(time.time()):
self._data = {}
self._nextcheck = int(time.time()) + self._interval*60
url = "http://api.fixer.io/latest?symbols={}&base={}".format(self._symbols, self._base)
try:
self._data = json.loads(requests.get(url).text)
except Exception:
pass
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
|
Python
| 0
|
@@ -824,19 +824,26 @@
http
+s
://
-fixer.io
+markets.ft.com
%0A%22%22%22
@@ -1103,16 +1103,114 @@
u%22$%22%0A%7D%0A%0A
+API_URL = %22https://markets.ft.com/data/currencies/ajax/conversion?baseCurrency=%7B%7D&comparison=%7B%7D%22%0A%0A
class Mo
@@ -1598,24 +1598,35 @@
, %22USD,EUR%22)
+.split(%22,%22)
%0A sel
@@ -1755,24 +1755,30 @@
for sym
+, rate
in self._da
@@ -1779,25 +1779,24 @@
lf._data
-%5B%22rates%22%5D
+.items()
:%0A
@@ -1833,32 +1833,12 @@
mat(
-self._data%5B%22rates%22%5D%5Bsym%5D
+rate
, SY
@@ -2364,177 +2364,249 @@
-url = %22http://api.fixer.io/latest?symbols=%7B%7D&base=%7B%7D%22.format(self._symbols, self._base)%0A try:%0A self._data = json.loads(requests.get(url).text)%0A
+for symbol in self._symbols:%0A url = API_URL.format(self._base, symbol)%0A try:%0A response = requests.get(url).json()%0A self._data%5Bsymbol%5D = response%5B'data'%5D%5B'exchangeRate'%5D%0A
@@ -2631,16 +2631,20 @@
eption:%0A
+
|
210c2cf58c246c3733542b8fee7c3eb9fe5d860d
|
bump version
|
callisto/delivery/__init__.py
|
callisto/delivery/__init__.py
|
__version__ = '0.4.0'
|
Python
| 0
|
@@ -12,11 +12,11 @@
= '0.4.
-0
+1
'%0A
|
b0245dc4e109a6e381a84a9d3f56ad77425185b8
|
add missing :
|
cavedb/docgen_entrance_csv.py
|
cavedb/docgen_entrance_csv.py
|
# Copyright 2016-2017 Brian Masney <masneyb@onstation.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import cavedb.docgen_common
import cavedb.utils
class EntranceCsv(cavedb.docgen_common.Common):
def __init__(self, filename, download_url):
cavedb.docgen_common.Common.__init__(self)
self.filename = filename
self.download_url = download_url
self.csvfile = None
self.csvwriter = None
def open(self, all_regions_gis_hash):
cavedb.docgen_common.create_base_directory(self.filename)
self.csvfile = open(self.filename, 'w')
self.csvwriter = csv.writer(self.csvfile, delimiter=',')
self.csvwriter.writerow(['internal_id', 'locid', 'survey_id', 'name', 'alternate_names',
'type', 'coord_acquision', 'wgs84_lon', 'wgs84_lat',
'nad83_utmzone', 'nad83_utmeast', 'nad83_utmnorth', 'elevation',
'region', 'county', 'quad', 'length', 'depth', 'length_based_on',
'significant', 'access enum', 'access descr', 'todo enum',
'todo descr', 'source', 'gislbl_pri'])
def feature_entrance(self, feature, entrance, coordinates):
name = cavedb.docgen_common.get_entrance_name(feature, entrance)
if feature.is_significant:
gislbl_pri = 10
elif feature.feature_type == 'FRO':
gislbl_pri = 6
else:
gislbl_pri = 8
lat_lon_wgs84 = coordinates.get_lon_lat_wgs84()
utm_nad83 = coordinates.get_utm_nad83()
if feature.survey_id:
survey_id = feature.survey_county.survey_short_name + feature.survey_id
else
survey_id = ""
self.csvwriter.writerow([feature.id, entrance.id, survey_id,
name, feature.alternate_names, feature.feature_type,
entrance.coord_acquision, lat_lon_wgs84[0],
lat_lon_wgs84[1], entrance.utmzone, utm_nad83[0], utm_nad83[1],
entrance.elevation_ft, feature.bulletin_region.region_name,
feature.survey_county.county_name,
entrance.quad.quad_name if entrance.quad else '',
feature.length_ft, feature.depth_ft, feature.length_based_on,
feature.is_significant, feature.access_enum, feature.access_descr,
feature.todo_enum, feature.todo_descr, feature.source,
gislbl_pri])
def close(self):
self.csvfile.close()
def create_html_download_urls(self):
return self.create_url(self.download_url, 'Spreadsheet (CSV)', self.filename)
def create_for_bulletin(bulletin):
return EntranceCsv(get_bulletin_csv_filename(bulletin.id), \
'bulletin/%s/csv' % (bulletin.id))
def create_for_global():
return EntranceCsv(get_global_csv_filename(), None)
def get_bulletin_csv_filename(bulletin_id):
return '%s/csv/bulletin_%s.csv' % (cavedb.utils.get_output_base_dir(bulletin_id), bulletin_id)
def get_global_csv_filename():
return '%s/csv/all.csv' % (cavedb.utils.get_global_output_base_dir())
|
Python
| 0.000027
|
@@ -2253,16 +2253,17 @@
else
+:
%0A
|
623ce2d8624a1a04156a35ae762d29a19fbc7b52
|
fix broken docstring
|
ceph_deploy/util/templates.py
|
ceph_deploy/util/templates.py
|
ceph_repo = """
[ceph]
name=Ceph packages for $basearch
baseurl={repo_url}/$basearch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey={gpg_url}
[ceph-noarch]
name=Ceph noarch packages
baseurl={repo_url}/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey={gpg_url}
[ceph-source]
name=Ceph source packages
baseurl={repo_url}/SRPMS
enabled=0
gpgcheck=1
type=rpm-md
gpgkey={gpg_url}
"""
def custom_repo(**kw):
"""
Repo files need special care in that a whole line should not be present
if there is no value for it. Because we were using `format()` we could
not conditionally add a line for a repo file. So the end result would
contain a key with a missing value (say if we were passing `None`).
For example, it could look like::
[ceph repo]
name= ceph repo
proxy=
gpgcheck=
Which breaks. This function allows us to conditionally add lines,
preserving an order and be more careful.
Previously, and for historical purposes, this is how the template used
to look::
custom_repo = """
[{repo_name}]
name={name}
baseurl={baseurl}
enabled={enabled}
gpgcheck={gpgcheck}
type={_type}
gpgkey={gpgkey}
proxy={proxy}
"""
"""
lines = []
# by using tuples (vs a dict) we preserve the order of what we want to
# return, like starting with a [repo name]
tmpl = (
('reponame', '[%s]'),
('baseurl', 'baseurl=%s'),
('enabled', 'enabled=%s'),
('gpgcheck', 'gpgcheck=%s'),
('_type', 'type=%s'),
('gpgkey', 'gpgkey=%s'),
('proxy', 'proxy=%s'),
)
for line in tmpl:
tmpl_key, tmpl_value = line # key values from tmpl
# ensure that there is an actual value (not None nor empty string)
if tmpl_key in kw and kw.get(tmpl_key) not in (None, ''):
lines.append(tmpl_value % kw.get(tmpl_key))
return '\n'.join(lines)
|
Python
| 0
|
@@ -1042,20 +1042,16 @@
m_repo =
- %22%22%22
%0A
@@ -1235,20 +1235,8 @@
oxy%7D
-%0A %22%22%22
%0A%0A
|
f4b8246aead0657e0f997773efed5fbc2147cce7
|
add '# noqa' to imports to make flake8 happy
|
ceph_medic/remote/__init__.py
|
ceph_medic/remote/__init__.py
|
import mon
import osd
import common
import functions
import commands
|
Python
| 0
|
@@ -3,16 +3,24 @@
port mon
+ # noqa
%0Aimport
@@ -22,16 +22,24 @@
port osd
+ # noqa
%0Aimport
@@ -44,16 +44,24 @@
t common
+ # noqa
%0Aimport
@@ -69,16 +69,24 @@
unctions
+ # noqa
%0Aimport
@@ -93,9 +93,17 @@
commands
+ # noqa
%0A
|
69a9ba2fed2ab95f15b0292b796e317df00446a9
|
Use type hint in _chainerx.py
|
chainer/backends/_chainerx.py
|
chainer/backends/_chainerx.py
|
import numpy
import chainer
from chainer import _backend
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer.backends import intel64
import chainerx
class ChainerxDevice(_backend.Device):
"""Device for ChainerX backend"""
xp = chainerx
supported_array_types = (chainerx.ndarray,)
__hash__ = _backend.Device.__hash__
def __init__(self, device):
# type: (chainerx.Device) -> None
assert isinstance(device, chainerx.Device)
super(ChainerxDevice, self).__init__()
self.device = device # type: chainerx.Device
@staticmethod
def from_array(array):
if isinstance(array, chainerx.ndarray) and array.device is not None:
return ChainerxDevice(array.device)
return None
@staticmethod
def from_fallback_device(device):
"""Returns a :class:`~chainer.backend.ChainerxDevice` corresponding \
to the fallback device.
.. seealso::
:data:`~chainer.backend.ChainerxDevice.fallback_device`
"""
assert isinstance(device, _backend.Device)
if isinstance(device, _cpu.CpuDevice):
return ChainerxDevice(chainerx.get_device('native', 0))
if isinstance(device, cuda.GpuDevice):
return ChainerxDevice(
chainerx.get_device('cuda', device.device.id))
raise RuntimeError(
'Only CPU or GPU devices are allowed. '
'Actual: {}'.format(device))
@property
def name(self):
return self.device.name
@property
def fallback_device(self):
"""Fallback device.
A fallback device is either a :class:`~chainer.backend.CpuDevice` or
a :class:`~chainer.backend.GpuDevice` which shares the same physical
device with the original ChainerX device.
For example, the fallback device of ``native:0`` ChainerX device is
:class:`~chainer.backend.CpuDevice`. The fallback device of ``cuda:1``
ChainerX device is :class:`~chainer.backend.GpuDevice` with device ID
1.
"""
backend_name = self.device.backend.name
if backend_name == 'native':
return _cpu.CpuDevice()
if backend_name == 'cuda':
return cuda.GpuDevice.from_device_id(self.device.index)
raise RuntimeError(
'Only \'native\' or \'cuda\' devices have corresponding fallback '
'devices. Actual: {}'.format(backend_name))
def __eq__(self, other):
return (
isinstance(other, ChainerxDevice)
and other.device == self.device)
def __repr__(self):
return '<{} {}>'.format(
self.__class__.__name__, self.device.name)
def create_context(self):
# Returns a context that sets the default device.
return chainerx.using_device(self.device)
def send_array(self, array):
device = self.device
if isinstance(array, chainerx.ndarray):
if array.device is device:
return array
return array.to_device(device)
return _array_to_chainerx(array, device)
def use(self):
chainerx.set_default_device(self.device)
def is_array_supported(self, array):
return (
isinstance(array, chainerx.ndarray)
and self.device == array.device)
def to_chx(array):
"""Converts an array or arrays to ChainerX.
Destination ChainerX devices are chosen according to the types of input
arrays.
"""
return _backend._convert_arrays(array, _array_to_chainerx)
def from_chx(array):
"""Converts an array or arrays from ChainerX to NumPy or CuPy ones.
Destination array types are chosen such that no copies occur.
"""
return _backend._convert_arrays(array, _array_from_chainerx)
def _get_chainerx_device(device_spec):
# Returns chainerx.Device
if isinstance(device_spec, chainerx.Device):
return device_spec
return chainerx.get_device(device_spec)
def _array_to_chainerx(array, device=None):
# If device is None, appropriate device is chosen according to the input
# arrays.
assert device is None or isinstance(device, chainerx.Device)
if array is None:
return None
if array.dtype not in chainerx.all_dtypes:
raise TypeError(
'Dtype {} is not supported in ChainerX.'.format(array.dtype.name))
if isinstance(array, chainerx.ndarray):
if device is None:
return array
if device is array.device:
return array
return array.to_device(device)
if isinstance(array, numpy.ndarray):
if device is None:
device = chainerx.get_device('native', 0)
return chainerx.array(array, device=device, copy=False)
if isinstance(array, cuda.ndarray):
if device is None:
device = chainerx.get_device('cuda', array.device.id)
elif device.backend.name != 'cuda':
# cupy to non-cuda backend
# TODO(niboshi): Remove conversion to numpy when both CuPy and
# ChainerX support the array interface.
array = _cpu._to_cpu(array)
return chainerx.array(array, device=device, copy=False)
elif device.index != array.device.id:
# cupy to cuda backend but different device
array = cuda.to_gpu(array, device=device.index)
# cupy to cuda backend with the same device
return chainerx._core._fromrawpointer(
array.data.mem.ptr,
array.shape,
array.dtype,
array.strides,
device,
array.data.ptr - array.data.mem.ptr,
array)
if isinstance(array, intel64.mdarray):
return _array_to_chainerx(numpy.array(array), device)
if numpy.isscalar(array):
return chainerx.asarray(array)
raise TypeError(
'Array cannot be converted into chainerx.ndarray'
'\nActual type: {0}.'.format(type(array)))
def _array_from_chainerx(array):
if array is None:
return None
if not isinstance(array, chainerx.ndarray):
if isinstance(array, chainer.get_array_types()):
return array
raise TypeError(
'Tried to convert to a non-ChainerX array from an invalid type: '
'{}'.format(type(array)))
backend_name = array.device.backend.name
if backend_name == 'native':
return _cpu._to_cpu(array)
if backend_name == 'cuda':
return cuda.to_gpu(array, array.device.index)
raise ValueError(
'Only ChainerX arrays with native or cuda backends can be converted '
'to non-ChainerX arrays.\nActual: {0}.'.format(backend_name))
|
Python
| 0.000001
|
@@ -394,28 +394,11 @@
vice
-):%0A # type: (
+: '
chai
@@ -408,16 +408,17 @@
x.Device
+'
) -%3E Non
@@ -414,25 +414,25 @@
ce') -%3E None
-%0A
+:
%0A ass
|
20e8ef6bd68100a70b9d50013630ff71d8b7ec94
|
Support wildcard matches on coverage/junit results
|
changes/artifacts/__init__.py
|
changes/artifacts/__init__.py
|
from __future__ import absolute_import, print_function
from .manager import Manager
from .coverage import CoverageHandler
from .xunit import XunitHandler
manager = Manager()
manager.register(CoverageHandler, ['coverage.xml'])
manager.register(XunitHandler, ['xunit.xml', 'junit.xml'])
|
Python
| 0
|
@@ -219,16 +219,34 @@
age.xml'
+, '*.coverage.xml'
%5D)%0Amanag
@@ -292,15 +292,45 @@
'junit.xml'
+, '*.xunit.xml', '*.junit.xml'
%5D)%0A
|
3109b98d8f8befdb927828908bd213000cf9ae51
|
Handle empty result for previous runs/test failures in origin finder
|
changes/utils/originfinder.py
|
changes/utils/originfinder.py
|
from __future__ import absolute_import
from collections import defaultdict
from changes.constants import Result, Status
from changes.models import Job, TestGroup
def first(key, iterable):
for x in iterable:
if key(x):
return x
return None
def find_failure_origins(job, test_failures):
"""
Attempt to find originating causes of failures.
Returns a mapping of {TestGroup.name_sha: Job}.
"""
project = job.project
# find any existing failures in the previous runs
# to do this we first need to find the last passing job
last_pass = Job.query.filter(
Job.project == project,
Job.date_created <= job.date_created,
Job.status == Status.finished,
Job.result == Result.passed,
Job.id != job.id,
Job.patch == None, # NOQA
).order_by(Job.date_created.desc()).first()
if last_pass is None:
return {}
# We have to query all runs between job and last_pass, but we only
# care about runs where the suite failed. Because we're paranoid about
# performance, we limit this to 100 results.
previous_runs = Job.query.filter(
Job.project == project,
Job.date_created <= job.date_created,
Job.date_created >= last_pass.date_created,
Job.status == Status.finished,
Job.result.in_([Result.failed, Result.passed]),
Job.id != job.id,
Job.id != last_pass.id,
Job.patch == None, # NOQA
).order_by(Job.date_created.desc())[:100]
# we now have a list of previous_runs so let's find all test failures in
# these runs
queryset = TestGroup.query.filter(
TestGroup.job_id.in_(b.id for b in previous_runs),
TestGroup.result == Result.failed,
TestGroup.num_leaves == 0,
TestGroup.name_sha.in_(t.name_sha for t in test_failures),
)
previous_test_failures = defaultdict(set)
for t in queryset:
previous_test_failures[t.job_id].add(t.name_sha)
failures_at_job = dict()
searching = set(t for t in test_failures)
last_checked_run = job
for p_job in previous_runs:
p_job_failures = previous_test_failures[p_job.id]
# we have to copy the set as it might change size during iteration
for f_test in list(searching):
if f_test.name_sha not in p_job_failures:
failures_at_job[f_test] = last_checked_run
searching.remove(f_test)
last_checked_run = p_job
for f_test in searching:
failures_at_job[f_test] = last_checked_run
return failures_at_job
|
Python
| 0
|
@@ -460,16 +460,61 @@
roject%0A%0A
+ if not test_failures:%0A return %7B%7D%0A%0A
# fi
@@ -1560,16 +1560,61 @@
%5B:100%5D%0A%0A
+ if not previous_runs:%0A return %7B%7D%0A%0A
# we
|
0bf6441863433575aebcbd0b238d27d95830c015
|
Fix .iob converter (closes #3620)
|
spacy/cli/converters/iob2json.py
|
spacy/cli/converters/iob2json.py
|
# coding: utf8
from __future__ import unicode_literals
import re
from ...gold import iob_to_biluo
from ...util import minibatch
def iob2json(input_data, n_sents=10, *args, **kwargs):
"""
Convert IOB files into JSON format for use with train cli.
"""
docs = []
for group in minibatch(docs, n_sents):
group = list(group)
first = group.pop(0)
to_extend = first["paragraphs"][0]["sentences"]
for sent in group[1:]:
to_extend.extend(sent["paragraphs"][0]["sentences"])
docs.append(first)
return docs
def read_iob(raw_sents):
sentences = []
for line in raw_sents:
if not line.strip():
continue
# tokens = [t.split("|") for t in line.split()]
tokens = [re.split("[^\w\-]", line.strip())]
if len(tokens[0]) == 3:
words, pos, iob = zip(*tokens)
elif len(tokens[0]) == 2:
words, iob = zip(*tokens)
pos = ["-"] * len(words)
else:
raise ValueError(
"The iob/iob2 file is not formatted correctly. Try checking whitespace and delimiters."
)
biluo = iob_to_biluo(iob)
sentences.append(
[
{"orth": w, "tag": p, "ner": ent}
for (w, p, ent) in zip(words, pos, biluo)
]
)
sentences = [{"tokens": sent} for sent in sentences]
paragraphs = [{"sentences": [sent]} for sent in sentences]
docs = [{"id": 0, "paragraphs": [para]} for para in paragraphs]
return docs
|
Python
| 0
|
@@ -58,16 +58,50 @@
mport re
+%0Afrom cytoolz import partition_all
%0A%0Afrom .
@@ -131,38 +131,8 @@
luo%0A
-from ...util import minibatch%0A
%0A%0Ade
@@ -271,295 +271,98 @@
-docs = %5B%5D%0A for group in minibatch(docs, n_sents):%0A group =
+sentences = read_iob(input_data.sp
li
-s
t(
-group
+%22%5Cn%22)
)%0A
- first = group.pop(0)%0A to_extend = first%5B%22paragraphs%22%5D%5B0%5D%5B%22sentences%22%5D%0A for sent in group%5B1:%5D:%0A to_extend.extend(sent%5B%22paragraphs%22%5D%5B0%5D%5B%22sentences%22%5D)%0A docs.append(first
+docs = merge_sentences(sentences, n_sents
)%0A
@@ -477,16 +477,16 @@
trip():%0A
+
@@ -502,64 +502,8 @@
nue%0A
- # tokens = %5Bt.split(%22%7C%22) for t in line.split()%5D%0A
@@ -1291,24 +1291,381 @@
graphs%5D%0A return docs%0A
+%0A%0Adef merge_sentences(docs, n_sents):%0A merged = %5B%5D%0A for group in partition_all(n_sents, docs):%0A group = list(group)%0A first = group.pop(0)%0A to_extend = first%5B%22paragraphs%22%5D%5B0%5D%5B%22sentences%22%5D%0A for sent in group%5B1:%5D:%0A to_extend.extend(sent%5B%22paragraphs%22%5D%5B0%5D%5B%22sentences%22%5D)%0A merged.append(first)%0A return merged%0A
|
f190916a828ab4b8ecf16cc6a82ebf3cf8f821e1
|
Add a test for executing specs with tags
|
spec/execution_with_tags_spec.py
|
spec/execution_with_tags_spec.py
|
from mamba import description, before, context, it
from doublex import Spy
from expects import expect, be_true, be_false
from mamba import reporter, runnable
from mamba.example import Example
from mamba.example_group import ExampleGroup
from spec.object_mother import an_example_group
TAGS = ['any_tag']
with description('Example execution using tags') as self:
with before.each:
self.reporter = Spy(reporter.Reporter)
self.example_group = an_example_group()
self.example = Example(lambda x: x, parent=self.example_group,
tags=TAGS)
self.other_example = Example(lambda x: x, parent=self.example_group)
with context('when tag is included in example tags'):
with it('executes example'):
self.example.execute(self.reporter,
runnable.ExecutionContext(),
tags=TAGS)
expect(self.example.was_run).to(be_true)
with context('when tag is not included in example tags'):
with it('does not execute example'):
self.other_example.execute(self.reporter,
runnable.ExecutionContext(),
tags=TAGS)
expect(self.other_example.was_run).to(be_false)
with context('when tag is included in example_group tags'):
with it('executes children'):
self.example_group = ExampleGroup('any example_group', tags=TAGS)
self.example = Example(lambda x: x)
self.other_example = Example(lambda x: x)
self.example_group.append(self.example)
self.example_group.append(self.other_example)
self.example_group.execute(self.reporter,
runnable.ExecutionContext(),
tags=TAGS)
expect(self.example.was_run).to(be_true)
expect(self.other_example.was_run).to(be_true)
|
Python
| 0
|
@@ -489,32 +489,42 @@
self.example
+_with_tags
= Example(lambd
@@ -522,32 +522,73 @@
ple(lambda x: x,
+%0A
parent=self.exa
@@ -599,16 +599,26 @@
_group,%0A
+
@@ -844,24 +844,34 @@
self.example
+_with_tags
.execute(sel
@@ -874,32 +874,42 @@
(self.reporter,%0A
+
@@ -979,32 +979,42 @@
+
tags=TAGS)%0A%0A
@@ -1032,32 +1032,42 @@
ect(self.example
+_with_tags
.was_run).to(be_
|
cb1912234a058dd95c20cd765771552e76224c7a
|
fix c3 settings
|
chipyprj/chipyprj/settings.py
|
chipyprj/chipyprj/settings.py
|
"""
Django settings for chipyprj project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'f0&n*v0r5yi8j*aylxzdre*4l1oa#+bvbjrow_nx$lylati!yd'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'chipyapp'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'chipyprj.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'chipyprj.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'mentorship',
'USER': 'mentorship',
'PASSWORD': 'mentorship123',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = BASE_DIR+'/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
|
Python
| 0.000002
|
@@ -2791,16 +2791,21 @@
'/static
+-root
/'%0A%0ASTAT
|
aca08449ad9b382b71a191d23874c31b83d4c44b
|
Fix retrieval of value
|
spectral_cube/tests/test_dask.py
|
spectral_cube/tests/test_dask.py
|
# Tests specific to the dask class
import pytest
from numpy.testing import assert_allclose
from astropy.tests.helper import assert_quantity_allclose
from astropy import units as u
from spectral_cube import DaskSpectralCube
from .test_casafuncs import make_casa_testimage
try:
import casatools
from casatools import image
CASA_INSTALLED = True
except ImportError:
try:
from taskinit import ia as image
CASA_INSTALLED = True
except ImportError:
CASA_INSTALLED = False
class Array:
args = None
kwargs = None
def compute(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
def test_scheduler(data_adv):
cube = DaskSpectralCube.read(data_adv)
fake_array = Array()
cube._compute(fake_array)
assert fake_array.kwargs == {'scheduler': 'synchronous'}
with cube.use_dask_scheduler('threads'):
cube._compute(fake_array)
assert fake_array.kwargs == {'scheduler': 'threads'}
cube._compute(fake_array)
assert fake_array.kwargs == {'scheduler': 'synchronous'}
cube.use_dask_scheduler('threads')
cube._compute(fake_array)
assert fake_array.kwargs == {'scheduler': 'threads'}
with cube.use_dask_scheduler('processes', num_workers=4):
cube._compute(fake_array)
assert fake_array.kwargs == {'scheduler': 'processes', 'num_workers': 4}
cube._compute(fake_array)
assert fake_array.kwargs == {'scheduler': 'threads'}
def test_save_to_tmp_dir(data_adv):
pytest.importorskip('zarr')
cube = DaskSpectralCube.read(data_adv)
cube_new = cube.sigma_clip_spectrally(3, save_to_tmp_dir=True)
# The following test won't necessarily always work in future since the name
# is not really guaranteed, but this is pragmatic enough for now
assert cube_new._data.name.startswith('from-zarr')
def test_rechunk(data_adv):
cube = DaskSpectralCube.read(data_adv)
assert cube._data.chunksize == (4, 3, 2)
cube_new = cube.rechunk(chunks=(1, 2, 3))
# note last element is 2 because the chunk size we asked for
# is larger than cube - this is fine and deliberate in this test
assert cube_new._data.chunksize == (1, 2, 2)
def test_statistics(data_adv):
cube = DaskSpectralCube.read(data_adv)
stats = cube.statistics()
assert_quantity_allclose(stats['npts'], 24)
assert_quantity_allclose(stats['mean'], 0.4941651776136591 * u.K)
assert_quantity_allclose(stats['sigma'], 0.3021908870982011 * u.K)
assert_quantity_allclose(stats['sum'], 11.85996426272782 * u.K)
assert_quantity_allclose(stats['sumsq'], 7.961125988022091 * u.K ** 2)
assert_quantity_allclose(stats['min'], 0.0363300285196364 * u.K)
assert_quantity_allclose(stats['max'], 0.9662900439556562 * u.K)
assert_quantity_allclose(stats['rms'], 0.5759458158839716 * u.K)
@pytest.mark.skipif(not CASA_INSTALLED, reason='Requires CASA to be installed')
def test_statistics_consistency_casa(data_adv, tmp_path):
# Similar to test_statistics but compares to CASA directly.
cube = DaskSpectralCube.read(data_adv)
stats = cube.statistics()
make_casa_testimage(data_adv, tmp_path / 'casa.image')
ia = casatools.image()
ia.open(str(tmp_path / 'casa.image'))
stats_casa = ia.statistics()
ia.close()
for key in stats:
assert_allclose(stats[key].value, stats_casa[key])
|
Python
| 0.000008
|
@@ -3331,24 +3331,153 @@
ts:%0A
+if isinstance(stats%5Bkey%5D, u.Quantity):%0A value = stats%5Bkey%5D.value%0A else:%0A value = stats%5Bkey%5D%0A
assert_allcl
@@ -3480,27 +3480,16 @@
llclose(
-stats%5Bkey%5D.
value, s
|
4cea85888411aef213fc69a80efdff5e2e25e8c6
|
Extend error message
|
sqlobject/tests/test_datetime.py
|
sqlobject/tests/test_datetime.py
|
import py.test
from sqlobject import *
from sqlobject.tests.dbtest import *
########################################
## Date/time columns
########################################
from sqlobject import col
col.default_datetime_implementation = DATETIME_IMPLEMENTATION
from datetime import datetime, date, time
class DateTime1(SQLObject):
col1 = DateTimeCol()
col2 = DateCol()
col3 = TimeCol()
def test_dateTime():
setupClass(DateTime1)
_now = datetime.now()
dt1 = DateTime1(col1=_now, col2=_now, col3=_now.time())
assert isinstance(dt1.col1, datetime)
assert dt1.col1.year == _now.year
assert dt1.col1.month == _now.month
assert dt1.col1.day == _now.day
assert dt1.col1.hour == _now.hour
assert dt1.col1.minute == _now.minute
assert dt1.col1.second == _now.second
assert isinstance(dt1.col2, date)
assert not isinstance(dt1.col2, datetime)
assert dt1.col2.year == _now.year
assert dt1.col2.month == _now.month
assert dt1.col2.day == _now.day
assert isinstance(dt1.col3, time)
assert dt1.col3.hour == _now.hour
assert dt1.col3.minute == _now.minute
assert dt1.col3.second == _now.second
def test_microseconds():
connection = getConnection()
if hasattr(connection, 'can_use_microseconds') and \
not connection.can_use_microseconds():
py.test.skip("The database doesn't support microseconds; microseconds are supported by MariaDB since version 5.3.0 and by MySQL since version 5.6.4.")
setupClass(DateTime1)
_now = datetime.now()
dt1 = DateTime1(col1=_now, col2=_now, col3=_now.time())
assert dt1.col1.microsecond == _now.microsecond
assert dt1.col3.microsecond == _now.microsecond
if mxdatetime_available:
col.default_datetime_implementation = MXDATETIME_IMPLEMENTATION
from mx.DateTime import now, Time
dateFormat = None # use default
connection = getConnection()
if connection.dbName == "sqlite":
if connection.using_sqlite2:
# mxDateTime sends and PySQLite2 returns full date/time for dates
dateFormat = "%Y-%m-%d %H:%M:%S.%f"
class DateTime2(SQLObject):
col1 = DateTimeCol()
col2 = DateCol(dateFormat=dateFormat)
col3 = TimeCol()
def test_mxDateTime():
setupClass(DateTime2)
_now = now()
dt2 = DateTime2(col1=_now, col2=_now, col3=Time(_now.hour, _now.minute, _now.second))
assert isinstance(dt2.col1, col.DateTimeType)
assert dt2.col1.year == _now.year
assert dt2.col1.month == _now.month
assert dt2.col1.day == _now.day
assert dt2.col1.hour == _now.hour
assert dt2.col1.minute == _now.minute
assert dt2.col1.second == int(_now.second)
assert isinstance(dt2.col2, col.DateTimeType)
assert dt2.col2.year == _now.year
assert dt2.col2.month == _now.month
assert dt2.col2.day == _now.day
if getConnection().dbName == "sqlite":
assert dt2.col2.hour == _now.hour
assert dt2.col2.minute == _now.minute
assert dt2.col2.second == int(_now.second)
else:
assert dt2.col2.hour == 0
assert dt2.col2.minute == 0
assert dt2.col2.second == 0
assert isinstance(dt2.col3, (col.DateTimeType, col.TimeType))
assert dt2.col3.hour == _now.hour
assert dt2.col3.minute == _now.minute
assert dt2.col3.second == int(_now.second)
|
Python
| 0.000002
|
@@ -1361,16 +1361,29 @@
st.skip(
+%0A
%22The dat
@@ -1418,16 +1418,31 @@
econds;
+%22%0A %22
microsec
@@ -1490,21 +1490,33 @@
on 5.3.0
- and
+, %22%0A %22
by MySQL
@@ -1535,16 +1535,66 @@
on 5.6.4
+, %22%0A %22by MSSQL since MS SQL Server 2008
.%22)%0A%0A
|
01e69c34ca8a7873e5fc990508464393fa3184bd
|
Implement port accessors
|
stoqdrivers/devices/base.py
|
stoqdrivers/devices/base.py
|
# -*- Mode: Python; coding: iso-8859-1 -*-
# vi:si:et:sw=4:sts=4:ts=4
##
## Stoqdrivers
## Copyright (C) 2006 Async Open Source <http://www.async.com.br>
## All rights reserved
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
## USA.
##
## Author(s): Henrique Romano <henrique@async.com.br>
##
"""
stoqdrivers/devices/base.py:
Generic base class implementation for all devices.
"""
import gobject
from stoqdrivers.log import Logger
from stoqdrivers.configparser import StoqdriversConfig
from stoqdrivers.exceptions import CriticalError, ConfigError
from stoqdrivers.constants import PRINTER_DEVICE, SCALE_DEVICE
from stoqdrivers.translation import stoqdrivers_gettext
from stoqdrivers.devices.serialbase import SerialPort
_ = lambda msg: stoqdrivers_gettext(msg)
class BaseDevice(Logger):
""" Base class for all device interfaces, responsible for instantiate
the device driver itself based on the brand and model specified or in
the configuration file.
"""
log_domain = "stoqdrivers"
typename_translate_dict = {PRINTER_DEVICE: "Printer",
SCALE_DEVICE: "Scale"}
# Subclasses must define these attributes
device_dirname = None
required_interfaces = None
device_type = None
def __init__(self, brand=None, model=None, device=None, config_file=None,
port=None, consts=None):
Logger.__init__(self)
if not self.device_dirname:
raise ValueError("Subclasses must define the "
"`device_dirname' attribute")
elif self.device_type is None:
raise ValueError("device_type must be defined")
self.brand = brand
self.device = device
self.model = model
self._port = port
self._driver_constants = consts
self._load_configuration(config_file)
def _load_configuration(self, config_file):
section_name = BaseDevice.typename_translate_dict[self.device_type]
if not self.model or not self.brand or (not self.device and not self._port):
self.config = StoqdriversConfig(config_file)
if not self.config.has_section(section_name):
raise ConfigError(_("There is no section named `%s'!")
% section_name)
self.brand = self.config.get_option("brand", section_name)
self.device = self.config.get_option("device", section_name)
self.model = self.config.get_option("model", section_name)
name = "stoqdrivers.devices.%s.%s.%s" % (self.device_dirname,
self.brand, self.model)
try:
module = __import__(name, None, None, 'stoqdevices')
except ImportError, reason:
raise CriticalError("Could not load driver %s %s: %s"
% (self.brand.capitalize(),
self.model.upper(), reason))
class_name = self.model
driver_class = getattr(module, class_name, None)
if not driver_class:
raise CriticalError("Device driver at %s needs a class called %s"
% (name, class_name))
if not self._port:
self._port = SerialPort(self.device)
self._driver = driver_class(self._port, consts=self._driver_constants)
self.debug(("Config data: brand=%s,device=%s,model=%s\n"
% (self.brand, self.device, self.model)))
self.check_interfaces()
def get_model_name(self):
return self._driver.model_name
def check_interfaces(self):
""" This method must be implemented in subclass and must ensure that the
driver implements a valid interface for the current operation state.
"""
raise NotImplementedError
def notify_read(self, func):
""" This function can be called when the callsite must know when data
is coming from the serial port. It is necessary that a gobject main
loop is already running before calling this method.
"""
gobject.io_add_watch(self._driver.fd, gobject.IO_IN,
lambda fd, cond: func(self, cond))
def get_driver(self):
""" Get the internal driver, this is normally not needed to be able
to print or use the driver.
@returns: the driver
"""
return self._driver
|
Python
| 0.000001
|
@@ -4887,16 +4887,147 @@
cond))%0A%0A
+ def set_port(self, port):%0A self._driver.set_port(port)%0A%0A def get_port(self):%0A return self._driver.get_port()%0A%0A
def
|
04686df17ae26f86484965365d12039161d8ee2d
|
Add comment
|
synapse/handlers/profile.py
|
synapse/handlers/profile.py
|
# -*- coding: utf-8 -*-
# Copyright 2014-2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import defer
from synapse.api.errors import SynapseError, AuthError, CodeMessageException
from synapse.types import UserID, Requester
from synapse.util import unwrapFirstError
from ._base import BaseHandler
import logging
logger = logging.getLogger(__name__)
def changed_presencelike_data(distributor, user, state):
return distributor.fire("changed_presencelike_data", user, state)
def collect_presencelike_data(distributor, user, content):
return distributor.fire("collect_presencelike_data", user, content)
class ProfileHandler(BaseHandler):
def __init__(self, hs):
super(ProfileHandler, self).__init__(hs)
self.federation = hs.get_replication_layer()
self.federation.register_query_handler(
"profile", self.on_profile_query
)
distributor = hs.get_distributor()
self.distributor = distributor
distributor.observe("registered_user", self.registered_user)
distributor.observe(
"collect_presencelike_data", self.collect_presencelike_data
)
def registered_user(self, user):
return self.store.create_profile(user.localpart)
@defer.inlineCallbacks
def get_displayname(self, target_user):
if self.hs.is_mine(target_user):
displayname = yield self.store.get_profile_displayname(
target_user.localpart
)
defer.returnValue(displayname)
else:
try:
result = yield self.federation.make_query(
destination=target_user.domain,
query_type="profile",
args={
"user_id": target_user.to_string(),
"field": "displayname",
}
)
except CodeMessageException as e:
if e.code != 404:
logger.exception("Failed to get displayname")
raise
except:
logger.exception("Failed to get displayname")
else:
defer.returnValue(result["displayname"])
@defer.inlineCallbacks
def set_displayname(self, target_user, auth_user, new_displayname):
"""target_user is the user whose displayname is to be changed;
auth_user is the user attempting to make this change."""
if not self.hs.is_mine(target_user):
raise SynapseError(400, "User is not hosted on this Home Server")
if target_user != auth_user:
raise AuthError(400, "Cannot set another user's displayname")
if new_displayname == '':
new_displayname = None
yield self.store.set_profile_displayname(
target_user.localpart, new_displayname
)
yield changed_presencelike_data(self.distributor, target_user, {
"displayname": new_displayname,
})
yield self._update_join_states(target_user)
@defer.inlineCallbacks
def get_avatar_url(self, target_user):
if self.hs.is_mine(target_user):
avatar_url = yield self.store.get_profile_avatar_url(
target_user.localpart
)
defer.returnValue(avatar_url)
else:
try:
result = yield self.federation.make_query(
destination=target_user.domain,
query_type="profile",
args={
"user_id": target_user.to_string(),
"field": "avatar_url",
}
)
except CodeMessageException as e:
if e.code != 404:
logger.exception("Failed to get avatar_url")
raise
except:
logger.exception("Failed to get avatar_url")
defer.returnValue(result["avatar_url"])
@defer.inlineCallbacks
def set_avatar_url(self, target_user, auth_user, new_avatar_url):
"""target_user is the user whose avatar_url is to be changed;
auth_user is the user attempting to make this change."""
if not self.hs.is_mine(target_user):
raise SynapseError(400, "User is not hosted on this Home Server")
if target_user != auth_user:
raise AuthError(400, "Cannot set another user's avatar_url")
yield self.store.set_profile_avatar_url(
target_user.localpart, new_avatar_url
)
yield changed_presencelike_data(self.distributor, target_user, {
"avatar_url": new_avatar_url,
})
yield self._update_join_states(target_user)
@defer.inlineCallbacks
def collect_presencelike_data(self, user, state):
if not self.hs.is_mine(user):
defer.returnValue(None)
(displayname, avatar_url) = yield defer.gatherResults(
[
self.store.get_profile_displayname(user.localpart),
self.store.get_profile_avatar_url(user.localpart),
],
consumeErrors=True
).addErrback(unwrapFirstError)
state["displayname"] = displayname
state["avatar_url"] = avatar_url
defer.returnValue(None)
@defer.inlineCallbacks
def on_profile_query(self, args):
user = UserID.from_string(args["user_id"])
if not self.hs.is_mine(user):
raise SynapseError(400, "User is not hosted on this Home Server")
just_field = args.get("field", None)
response = {}
if just_field is None or just_field == "displayname":
response["displayname"] = yield self.store.get_profile_displayname(
user.localpart
)
if just_field is None or just_field == "avatar_url":
response["avatar_url"] = yield self.store.get_profile_avatar_url(
user.localpart
)
defer.returnValue(response)
@defer.inlineCallbacks
def _update_join_states(self, user):
if not self.hs.is_mine(user):
return
self.ratelimit(user.to_string())
joins = yield self.store.get_rooms_for_user(
user.to_string(),
)
for j in joins:
handler = self.hs.get_handlers().room_member_handler
try:
# Assume the user isn't a guest because we don't let guests set
# profile or avatar data.
requester = Requester(user, "", False)
yield handler.update_membership(
requester,
user,
j.room_id,
"join", # We treat a profile update like a join.
ratelimit=False,
)
except Exception as e:
logger.warn(
"Failed to update join event for room %s - %s",
j.room_id, str(e.message)
)
|
Python
| 0
|
@@ -7340,16 +7340,64 @@
t=False,
+ # Try to hide that these events aren't atomic.
%0A
|
887bba729cd4f4f7391ac6f08ab7601976bcd1ca
|
Update __init__.py
|
templated_email/__init__.py
|
templated_email/__init__.py
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from templated_email.backends.vanilla_django import TemplateBackend
import warnings
warnings.filterwarnings('error', 'django.utils.importlib')
try:
# Django <= 1.7
from django.utils.importlib import import_module
except:
# Django >= 1.8
from importlib import import_module
import six
def get_connection(backend=None, template_prefix=None, template_suffix=None,
fail_silently=False, **kwargs):
"""Load a templated e-mail backend and return an instance of it.
If backend is None (default) settings.TEMPLATED_EMAIL_BACKEND is used.
Both fail_silently and other keyword arguments are used in the
constructor of the backend.
"""
# This method is mostly a copy of the backend loader present in
# django.core.mail.get_connection
klass_path = backend or getattr(settings, 'TEMPLATED_EMAIL_BACKEND',
TemplateBackend)
if isinstance(klass_path, six.string_types):
try:
# First check if class name is omitted and we have module in settings
mod = import_module(klass_path)
klass_name = 'TemplateBackend'
except ImportError as e:
# Fallback to class name
try:
mod_name, klass_name = klass_path.rsplit('.', 1)
mod = import_module(mod_name)
except ImportError as e:
raise ImproperlyConfigured(
('Error importing templated email backend module %s: "%s"'
% (mod_name, e)))
try:
klass = getattr(mod, klass_name)
except AttributeError:
raise ImproperlyConfigured(('Module "%s" does not define a '
'"%s" class' % (mod_name, klass_name)))
else:
klass = klass_path
return klass(fail_silently=fail_silently, template_prefix=template_prefix,
template_suffix=template_suffix, **kwargs)
def get_templated_mail(template_name, context, from_email=None, to=None,
cc=None, bcc=None, headers=None,
template_prefix=None, template_suffix=None,
template_dir=None, file_extension=None):
"""Returns a templated EmailMessage instance without a connection using
the django templating backend."""
template_prefix = template_prefix or template_dir
template_suffix = template_suffix or file_extension
templater = TemplateBackend(template_prefix=template_prefix,
template_suffix=template_suffix)
return templater.get_email_message(template_name, context,
from_email=from_email, to=to,
cc=cc, bcc=bcc, headers=headers,
template_prefix=template_prefix,
template_suffix=template_suffix)
def send_templated_mail(template_name, from_email, recipient_list, context,
cc=None, bcc=None, fail_silently=False, connection=None,
headers=None, template_prefix=None,
template_suffix=None, **kwargs):
"""Easy wrapper for sending a templated email to a recipient list.
Final behaviour of sending depends on the currently selected engine.
See BackendClass.send.__doc__
"""
connection = connection or get_connection(template_prefix=template_prefix,
template_suffix=template_suffix)
return connection.send(template_name, from_email, recipient_list, context,
cc=cc, bcc=bcc, fail_silently=fail_silently,
headers=headers, **kwargs)
|
Python
| 0.000072
|
@@ -151,16 +151,27 @@
ackend%0A%0A
+import six%0A
import w
@@ -389,20 +389,8 @@
le%0A%0A
-import six%0A%0A
%0Adef
|
d57fb3ca8c1f4329c8ac90cb785b27123d98aee5
|
Bump the version to 0.3.1
|
backlog/__init__.py
|
backlog/__init__.py
|
"""A Simple Note Manager"""
from __future__ import absolute_import
from backlog.backlog import Backlog
__version__ = '0.3.0'
|
Python
| 0.999999
|
@@ -117,11 +117,11 @@
= '0.3.
-0
+1
'%0A
|
eef0663675b741d32f399bdbca1a95b943a1fb68
|
Create a script that uses an autoencoder to extract bow
|
bag-of-words/bow.py
|
bag-of-words/bow.py
|
#!/usr/bin/env python
"""This module contains function and classes relevant with the computation of
a bag of words model. At start we suppose that suitable descriptors for our
dataset are already extracted. Subsequently we procceed to the second step of
quantization, in this step we use a clustering algorithm such as Kmeas to
create our visual vocabulary. At the end in the final step we represent all our
features according to the previously caclulated vocabulary.
"""
import numpy as np
from sklearn.cluster import KMeans
class Encoding:
"""This class is responsible for computing a Bag of Words model"""
def __init__(self, n_codewords, iterations, clusterer=KMeans):
"""Initialize the class instance.
Parameters:
-----------
n_codewords: int
The number of clusters to be created. Each cluster's
centroid corresponds to a codeword.
iterations: int
The maximum number of iterations performed by the
clusterer.
clusterer: callable
A callable that when given the number of clusters it
returns a clusterer that implements the fit and predict
method.
"""
self.n_codewords = n_codewords
self.iterations = iterations
self._clusterer = clusterer(
n_clusters=n_codewords,
max_iter=iterations
)
def fit(self, data):
"""Build a visual dictionary for the Bag of Words model.
Apply a clustering algorithm to the data, the default option is Kmeans,
in order to create a suitable visual vocabulary. If Kmeans is chosen,
every centroid corresponds to a visual codeword of our vocabulary
Parameters:
-----------
data: array_like
Data of datapoints used to create visual vocabulary.
"""
# Compute clustering
self._clusterer.fit(data)
def encode(self, data, density):
"""Encode a list of data using the learnt Bag of Words model
Parameters:
-----------
data: array_like
List of data points that will be encoded using the already
computed Bag of Words model
"""
# If there are no features for a specific video return a zero array
if len(data) == 0:
return np.zeros(self.n_codewords)
# Represent each datapoint as histogram. When n_codewords is sequence
# bins arguement corresponds to bin edges, this is the reason why we
# add 1. Moreover we subtract 0.5 so that each bin's label is in the
# middle of it's corresponding bin.
hist, edges = np.histogram(
self._clusterer.predict(data),
bins=np.arange(self.n_codewords + 1) - .5,
density=density
)
return hist
|
Python
| 0.000001
|
@@ -1459,16 +1459,274 @@
)%0A%0A
+ @property%0A def centroids(self):%0A %22%22%22The centroids of the encoding%22%22%22%0A return self._clusterer.cluster_centers_.copy()%0A%0A @centroids.setter%0A def centroids(self, centroids):%0A self._clusterer.cluster_centers_ = centroids.copy()%0A%0A
def
|
f33de839f7eaf4c983a542ccfb82f2f7d304c905
|
remove debug
|
bd_elk/ip/common.py
|
bd_elk/ip/common.py
|
from elasticsearch_dsl import DocType
from bd_elk.common_es import CommonEs
from ultis.commons import ComFunc
class CommonIp(DocType, CommonEs):
# todo should use pandas to do json format
"""
基于Ip 数据公用的类
"""
# 指定的类型(src,dst...)
_type = None
@classmethod
def get_total_stats(cls, **kwargs):
"""
get ip stats
:return:
"""
cache_key = 'ip-stats-{0}'.format(cls._type)
json_res = ComFunc.cache(cache_key, update=True)
if not json_res:
s = cls.search().extra(size=0)
s.aggs.bucket('ip_terms', 'terms', field='ip.keyword')
s.aggs['ip_terms'].metric('flows_per_ip', 'sum', field='flows')
s.aggs['ip_terms'].metric('bytes_per_ip', 'sum', field='bytes')
s.aggs['ip_terms'].metric('packets_per_ip', 'sum', field='packets')
cls.debug_query(s)
response = s.execute()
json_res = []
for stats in response.aggregations.ip_terms.buckets:
json_res.append({
'ip': stats.key,
'flows': ComFunc.number_convert(
stats.flows_per_ip.value, 'k'
),
'packets': ComFunc.number_convert(
stats.packets_per_ip.value, 'k'
),
'bytes': ComFunc.bytes_convert(
stats.bytes_per_ip.value, 'mb'
)
})
ComFunc.cache(cache_key, data=json_res)
return json_res
@classmethod
def get_ip_date_history(cls, **kwargs):
"""
读取某IP基于时间段的数据
:param kwargs:
:return:
"""
ip_str = kwargs.get('ip')
_interval = kwargs.get('interval', '1h')
cache_key = 'date-record-{0}-{1}'.format(
ip_str, cls._type
)
json_res = ComFunc.cache(cache_key)
if not json_res:
s = cls.search().query("match", ip=ip_str).extra(size=0)
s.aggs.bucket(
'ip_per_hour', 'date_histogram', field='@timestamp',
interval=_interval, time_zone=cls.time_zone
)
s.aggs['ip_per_hour'].metric(
'flows_per_hour', 'avg', field='flows'
)
s.aggs['ip_per_hour'].metric(
'bytes_per_hour', 'avg', field='bytes'
)
s.aggs['ip_per_hour'].metric(
'packets_per_hour', 'avg', field='packets'
)
# cls.debug_query(s)
response = s.execute()
json_res = []
for dt in response.aggregations.ip_per_hour.buckets:
datetime = dt.key_as_string
json_res.append({
'datetime': datetime,
'flows': ComFunc.number_convert(
dt.flows_per_hour.value, 'k'
),
'packets': ComFunc.number_convert(
dt.packets_per_hour.value, 'k'
),
'bytes': ComFunc.bytes_convert(
dt.bytes_per_hour.value, 'mb'
)
})
ComFunc.cache(cache_key, data=json_res)
return json_res
@classmethod
def get_top_date_history(cls, **kwargs):
"""
按时间段,读取top7 IP的平均数据流
:param kwargs:
:return:
"""
# 默认获取每1小时的数据
_interval = kwargs.get('interval', '1h')
cache_key = 'all-ip-date-record-{0}'.format(cls._type)
json_res = ComFunc.cache(cache_key)
if not json_res:
s = cls.search().extra(size=0)
s.aggs.bucket(
'ips', 'terms', field='ip.keyword', size=7,
order={"avg_flow": "desc"}
)
s.aggs['ips'].metric('avg_flow', 'avg', field='flows')
s.aggs['ips'].bucket(
'date_avg_flow', 'date_histogram', field='@timestamp',
time_zone=cls.time_zone, interval=_interval
)
s.aggs['ips']['date_avg_flow'].metric(
'ip_avg_flow', 'avg', field='flows'
)
# cls.debug_query(s)
response = s.execute()
json_res = {}
for dt in response.aggregations.ips.buckets:
_ip = dt.key
json_res[_ip] = []
for date_flow in dt.date_avg_flow.buckets:
json_res[_ip].append({
'avg_flow': ComFunc.number_convert(
date_flow.ip_avg_flow.value
),
'datetime': date_flow.key_as_string
})
ComFunc.cache(cache_key, data=json_res)
return json_res
|
Python
| 0.000002
|
@@ -501,21 +501,8 @@
_key
-, update=True
)%0D%0A%0D
@@ -888,16 +888,18 @@
+ #
cls.deb
|
ffe61046f8fa2052a7ef75bae7da4b5aaafc24ef
|
check for label in label info from MB (fixes #254)
|
beets/autotag/mb.py
|
beets/autotag/mb.py
|
# This file is part of beets.
# Copyright 2011, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Searches for albums in the MusicBrainz database.
"""
import logging
from . import musicbrainz3
import beets.autotag.hooks
import beets
SEARCH_LIMIT = 5
VARIOUS_ARTISTS_ID = '89ad4ac3-39f7-470e-963a-56509c546377'
musicbrainz3._useragent = 'beets/%s' % beets.__version__
class ServerBusyError(Exception): pass
class BadResponseError(Exception): pass
log = logging.getLogger('beets')
# We hard-code IDs for artists that can't easily be searched for.
SPECIAL_CASE_ARTISTS = {
'!!!': 'f26c72d3-e52c-467b-b651-679c73d8e1a7',
}
RELEASE_INCLUDES = ['artists', 'media', 'recordings', 'release-groups',
'labels', 'artist-credits']
TRACK_INCLUDES = ['artists']
def _adapt_criteria(criteria):
"""Special-case artists in a criteria dictionary before it is passed
to the MusicBrainz search server. The dictionary supplied is
mutated; nothing is returned.
"""
if 'artist' in criteria:
for artist, artist_id in SPECIAL_CASE_ARTISTS.items():
if criteria['artist'] == artist:
criteria['arid'] = artist_id
del criteria['artist']
break
def track_info(recording):
"""Translates a MusicBrainz recording result dictionary into a beets
``TrackInfo`` object.
"""
info = beets.autotag.hooks.TrackInfo(recording['title'],
recording['id'])
if 'artist-credit' in recording: # XXX: when is this not included?
artist = recording['artist-credit'][0]['artist']
info.artist = artist['name']
info.artist_id = artist['id']
if recording.get('length'):
info.length = int(recording['length'])/(1000.0)
return info
def _set_date_str(info, date_str):
"""Given a (possibly partial) YYYY-MM-DD string and an AlbumInfo
object, set the object's release date fields appropriately.
"""
if date_str:
date_parts = date_str.split('-')
for key in ('year', 'month', 'day'):
if date_parts:
setattr(info, key, int(date_parts.pop(0)))
def album_info(release):
"""Takes a MusicBrainz release result dictionary and returns a beets
AlbumInfo object containing the interesting data about that release.
"""
# Basic info.
artist = release['artist-credit'][0]['artist']
tracks = []
for medium in release['medium-list']:
tracks.extend(i['recording'] for i in medium['track-list'])
info = beets.autotag.hooks.AlbumInfo(
release['title'],
release['id'],
artist['name'],
artist['id'],
[track_info(track) for track in tracks],
)
info.va = info.artist_id == VARIOUS_ARTISTS_ID
if 'asin' in release:
info.asin = release['asin']
# Release type not always populated.
if 'type' in release['release-group']:
reltype = release['release-group']['type']
if reltype:
info.albumtype = reltype.lower()
# Release date.
if 'first-release-date' in release['release-group']:
# Try earliest release date for the entire group first.
_set_date_str(info, release['release-group']['first-release-date'])
elif 'date' in release:
# Fall back to release-specific date.
_set_date_str(info, release['date'])
# Label name.
if release.get('label-info-list'):
label = release['label-info-list'][0]['label']['name']
if label != '[no label]':
info.label = label
return info
def match_album(artist, album, tracks=None, limit=SEARCH_LIMIT):
"""Searches for a single album ("release" in MusicBrainz parlance)
and returns an iterator over AlbumInfo objects.
The query consists of an artist name, an album name, and,
optionally, a number of tracks on the album.
"""
# Build search criteria.
criteria = {'release': album}
if artist is not None:
criteria['artist'] = artist
else:
# Various Artists search.
criteria['arid'] = VARIOUS_ARTISTS_ID
if tracks is not None:
criteria['tracks'] = str(tracks)
_adapt_criteria(criteria)
res = musicbrainz3.release_search(limit=limit, **criteria)
for release in res['release-list']:
# The search result is missing some data (namely, the tracks),
# so we just use the ID and fetch the rest of the information.
yield album_for_id(release['id'])
def match_track(artist, title, limit=SEARCH_LIMIT):
"""Searches for a single track and returns an iterable of TrackInfo
objects.
"""
criteria = {
'artist': artist,
'recording': title,
}
_adapt_criteria(criteria)
res = musicbrainz3.recording_search(limit=limit, **criteria)
for recording in res['recording-list']:
yield track_info(recording)
def album_for_id(albumid):
"""Fetches an album by its MusicBrainz ID and returns an AlbumInfo
object or None if the album is not found.
"""
try:
res = musicbrainz3.get_release_by_id(albumid, RELEASE_INCLUDES)
except musicbrainz3.ResponseError:
log.debug('Album ID match failed.')
return None
return album_info(res['release'])
def track_for_id(trackid):
"""Fetches a track by its MusicBrainz ID. Returns a TrackInfo object
or None if no track is found.
"""
try:
res = musicbrainz3.get_recording_by_id(trackid, TRACK_INCLUDES)
except musicbrainz3.ResponseError:
log.debug('Track ID match failed.')
return None
return track_info(res['recording'])
|
Python
| 0
|
@@ -3999,16 +3999,21 @@
label
+_info
= relea
@@ -4036,16 +4036,79 @@
ist'%5D%5B0%5D
+%0A if label_info%5B'label'%5D:%0A label = label_info
%5B'label'
@@ -4109,32 +4109,36 @@
label'%5D%5B'name'%5D%0A
+
if label
@@ -4155,16 +4155,20 @@
abel%5D':%0A
+
|
dee0b795e38118813da9a04fe725a8a40ccbf44b
|
Fix bug that ignore to completed tasks over 50 on daily report
|
chalicelib/api.py
|
chalicelib/api.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from owlmixin.owlcollections import TList
from typing import Text
import json
import uuid
from pytz import utc
import requests
from datetime import datetime
from chalicelib.models import *
from fn import _
USERNAME_DEFAULT = "TINA"
TOGGL_API_URL = 'https://www.toggl.com/api/v8'
TOGGL_REPORT_API_URL = 'https://www.toggl.com/reports/api/v2'
TODOIST_API_URL = 'https://todoist.com/API/v7'
def fetch_reports(workspace_id, since, api_token):
# type: (int, datetime, Text) -> TList[TogglApiReport]
path = '/details?workspace_id={}&since={}&user_agent=tina'.format(
workspace_id, since.strftime('%Y-%m-%d')
)
return TogglApiDetail.from_dict(
access_toggl(path, api_token, True).json()
).data
def access_toggl(path, api_token, is_report=False):
url = TOGGL_REPORT_API_URL if is_report else TOGGL_API_URL
return requests.get(url + path, auth=(api_token, 'api_token'))
def notify_slack(message, config):
# type: (Text, Config) -> any
payload = {
"text": message,
"username": config.slack.username or USERNAME_DEFAULT,
"icon_emoji": config.slack.icon_emoji,
"icon_url": config.slack.icon_url,
"channel": config.slack.channel,
"link_names": 1
}
r = requests.post(config.slack.url, data=json.dumps(payload, ensure_ascii=False).encode('utf8'))
# for debug
print(r.status_code)
print(r.content)
return r
def fetch_uncompleted_tasks(todoist_token):
# type: (Text) -> TList[TodoistApiTask]
items = requests.get(TODOIST_API_URL + '/sync', data={
"token": todoist_token,
"sync_token": "*",
"resource_types": '["items"]'
}).json()['items']
return TodoistApiTask.from_dicts(items).reject(_.checked)
def fetch_completed_tasks(todoist_token, since):
return TList(requests.get(TODOIST_API_URL + '/completed/get_all', data={
"token": todoist_token,
"since": since.astimezone(utc).strftime('%Y-%m-%dT%H:%M'),
"limit": 50
}).json()['items'])
def fetch_activities(todoist_token, object_event_types, since):
return requests.get(TODOIST_API_URL + '/activity/get', data={
"token": todoist_token,
"since": since.astimezone(utc).strftime('%Y-%m-%dT%H:%M'),
"object_event_types": object_event_types,
"limit": 100
}).json()
def add_reminder(todoist_token, item_id, remind_time):
commands = [{
"type": "reminder_add",
"uuid": str(uuid.uuid4()),
"temp_id": str(uuid.uuid4()),
"args": {
"item_id": item_id,
"service": "push",
"due_date_utc": remind_time.astimezone(utc).strftime('%Y-%m-%dT%H:%M')
}
}]
r = requests.get(TODOIST_API_URL + '/sync', data={
"token": todoist_token,
"commands": json.dumps(commands)
})
return r.ok
def update_day_orders(todoist_token, ids):
commands = [{
"type": "item_update_day_orders",
"uuid": str(uuid.uuid4()),
"args": {
"ids_to_orders": {x: i+1 for i, x in enumerate(ids)},
}
}]
r = requests.get(TODOIST_API_URL + '/sync', data={
"token": todoist_token,
"commands": json.dumps(commands)
})
return r.ok
|
Python
| 0
|
@@ -1870,22 +1870,112 @@
e):%0A
-return
+def fetch_all():%0A offset = 0%0A max_limit = 50%0A%0A while True:%0A rs =
TList(r
@@ -2025,32 +2025,40 @@
et_all', data=%7B%0A
+
%22token%22:
@@ -2073,32 +2073,40 @@
_token,%0A
+
+
%22since%22: since.a
@@ -2160,42 +2160,261 @@
-%22limit%22: 50%0A %7D).json()%5B'items'%5D
+ %22offset%22: offset,%0A %22limit%22: max_limit%0A %7D).json()%5B'items'%5D)%0A yield rs%0A%0A if len(rs) != max_limit:%0A break%0A offset = offset + max_limit%0A%0A return TList(fetch_all()).flatten(
)%0A%0A%0A
|
176a7571a2151db466d83db20f3f5efe1dc8dce9
|
fix king me
|
checkers/board.py
|
checkers/board.py
|
import enums
from tkinter import *
from piece import Piece
root = Tk()
board = [[[0 for x in range(8)]for y in range(8)] for z in range(2)]
global_x = -1
global_y = -1
def compress(x,y):
board[1][y][x] = board[1][global_y][global_x]
board[1][global_y][global_x] = 0
create_piece(y,x)
board[0][global_y][global_x].delete("all")
def jump_logic(x, y, w,z):
if(0 < global_y + y and global_y + y < 8 and 0 < global_x + x and global_x + x < 8 and board[1][global_y + y][global_x+x] !=0 and board[1][global_y][global_x].color != board[1][global_y + y][global_x+x].color):
board[0][global_y + y][global_x+x].delete("all")
board[1][global_y + y][global_x+x] = 0;
return compress(w,z)
else:
if( 0 < global_y - y and global_y - y < 8 and 0 < global_x + x and global_x + x < 8 and board[1][global_y -y][global_x+x] !=0 and board[1][global_y][global_x].color != board[1][global_y - y][global_x+x].color):
board[0][global_y -y][global_x+x].delete("all")
board[1][global_y -y][global_x+x] = 0;
return compress(w,z)
def valid_move(x, y):
global global_y
global global_x
if(board[1][global_y][global_x].type): #if its a king
if(abs(global_x-x) == 1 and abs(global_y-y) == 1):
return compress(x,y)
elif(abs(global_x-x) == 2 and abs(global_y-y) == 2):
if(global_x-x == -2): #if its on the right
return jump_logic(1,1,x,y)
elif(global_x - x == 2):
return jump_logic(-1,-1,x,y)
else:
if(abs(global_x-x) == 1 and global_y-y == (1 if board[1][global_y][global_x].color == "red" else -1)):
return compress(x,y)
elif(abs(global_x-x) == 2 and global_y-y == (2 if board[1][global_y][global_x].color == "red" else -2)):
if(global_x-x == -2): #if its on the right
return jump_logic(1,-1 if board[1][global_y][global_x].color == "red" else 1,x,y)
else:
return jump_logic(-1,-1 if board[1][global_y][global_x].color == "red" else 1,x,y)
def click(event):
global global_y
global global_x
x = int((root.winfo_pointerx() - root.winfo_rootx())/80)
y = int((root.winfo_pointery() - root.winfo_rooty())/80)
if(board[0][y][x]["background"] == "black"):
if(global_x == -1 and global_y == -1): #no piece is selected
if(board[1][y][x] != 0): #if there is a piece there
board[0][y][x]["background"] = "blue"
global_x = x
global_y = y
else:
if(board[1][y][x] != 0):
board[0][global_y][global_x]["background"] = "black"
board[0][y][x]["background"] = "blue"
global_x = x
global_y = y
else:
board[0][global_y][global_x]["background"] = "black"
if(valid_move(x,y)):
if(y == 0 or y== 7 and not board[1][y][x].type):
board[1][y][x].king_me()
board[0][y][x].delete("all")
create_piece(y,x)
board[1][global_y][global_x] = 0
global_x = -1
global_y = -1
def create_piece(r,c):
color = board[1][r][c].color
board[0][r][c].create_oval(5, 5, 75, 75, fill="white")
board[0][r][c].create_oval(7, 7, 73, 73, fill=color)
board[0][r][c].create_oval(10, 10, 70, 70, fill="white")
board[0][r][c].create_oval(12, 12, 68, 68, fill=color)
if(board[1][r][c].type):
board[0][r][c].create_oval(17, 17, 63, 63, fill="white")
board[0][r][c].create_oval(19, 19, 61, 61, fill=color)
for r in range(8):
for c in range(8):
if(((r % 2 == 0) and (c % 2 == 0)) or ((r % 2 == 1) and (c % 2 == 1))):
board[0][r][c] = Canvas(root, bg="red", height=80, width=80, bd=0, highlightthickness=0, relief='ridge')
board[0][r][c].grid(row = r,column = c)
else:
board[0][r][c] = Canvas(root, bg="black", height=80, width=80, bd=0, highlightthickness=0, relief='ridge')
board[0][r][c].grid(row = r,column = c)
if(r < 3):
board[1][r][c] = Piece(enums.Type.PAWN, enums.Type.BLACK)
create_piece(r,c)
elif(r > 4):
board[1][r][c] = Piece(enums.Type.PAWN, enums.Type.RED)
create_piece(r,c)
root.bind("<Button-1>", click)
root.resizable(width=False, height=False)
root.mainloop()
|
Python
| 0.000001
|
@@ -337,16 +337,32 @@
(%22all%22)%0A
+ return True%0A
def jump
|
0f782215e58eba53b72667bffde667f4d03a0d4a
|
Update version.
|
client/version.py
|
client/version.py
|
__version__ = '0.1.9'
|
Python
| 0
|
@@ -14,9 +14,9 @@
'0.
-1.9
+2.0
'%0A
|
6749060a7546b7dee3c6e643c7dfad4db7934061
|
package for release
|
cliez/__init__.py
|
cliez/__init__.py
|
# -*- coding: utf-8 -*-
version = "1.6.9"
version_info = (1, 6, 9)
|
Python
| 0
|
@@ -33,17 +33,18 @@
= %221.6.
-9
+10
%22%0Aversio
@@ -63,9 +63,10 @@
6,
-9
+10
)%0A%0A%0A
|
9b77b62a687359ff9a7f35f50b13ec78b6cb8ae8
|
Clean up recursive data handling
|
cliff/complete.py
|
cliff/complete.py
|
"""Bash completion for the CLI.
"""
import logging
from cliff import command
class CompleteDictionary:
"""dictionary for bash completion
"""
def __init__(self):
self._dictionary = {}
def add_command(self, command, actions):
optstr = ' '.join(opt for action in actions
for opt in action.option_strings)
dicto = self._dictionary
for subcmd in command[:-1]:
dicto = dicto.setdefault(subcmd, {})
dicto[command[-1]] = optstr
def get_commands(self):
return ' '.join(k for k in sorted(self._dictionary.keys()))
def _get_data_recurse(self, dictionary, path):
ray = []
keys = sorted(dictionary.keys())
for cmd in keys:
if path == "":
name = cmd
else:
name = path + "_" + cmd
value = dictionary[cmd]
if isinstance(value, str):
ray.append((name, value))
else:
cmdlist = ' '.join(sorted(value.keys()))
ray.append((name, cmdlist))
ray += self._get_data_recurse(value, name)
return ray
def get_data(self):
return sorted(self._get_data_recurse(self._dictionary, ""))
class CompleteShellBase(object):
"""base class for bash completion generation
"""
def __init__(self, name, output):
self.name = str(name)
self.output = output
def write(self, cmdo, data):
self.output.write(self.get_header())
self.output.write(" cmds='{0}'\n".format(cmdo))
for datum in data:
self.output.write(' cmds_{0}=\'{1}\'\n'.format(*datum))
self.output.write(self.get_trailer())
class CompleteNoCode(CompleteShellBase):
"""completion with no code
"""
def __init__(self, name, output):
super(CompleteNoCode, self).__init__(name, output)
def get_header(self):
return ''
def get_trailer(self):
return ''
class CompleteBash(CompleteShellBase):
"""completion for bash
"""
def __init__(self, name, output):
super(CompleteBash, self).__init__(name, output)
def get_header(self):
return ('_' + self.name + """()
{
local cur prev words
COMPREPLY=()
_get_comp_words_by_ref -n : cur prev words
# Command data:
""")
def get_trailer(self):
return ("""
cmd=""
words[0]=""
completed="${cmds}"
for var in "${words[@]:1}"
do
if [[ ${var} == -* ]] ; then
break
fi
if [ -z "${cmd}" ] ; then
proposed="${var}"
else
proposed="${cmd}_${var}"
fi
local i="cmds_${proposed}"
local comp="${!i}"
if [ -z "${comp}" ] ; then
break
fi
if [[ ${comp} == -* ]] ; then
if [[ ${cur} != -* ]] ; then
completed=""
break
fi
fi
cmd="${proposed}"
completed="${comp}"
done
if [ -z "${completed}" ] ; then
COMPREPLY=( $( compgen -f -- "$cur" ) $( compgen -d -- "$cur" ) )
else
COMPREPLY=( $(compgen -W "${completed}" -- ${cur}) )
fi
return 0
}
complete -F _""" + self.name + ' ' + self.name + '\n')
class CompleteCommand(command.Command):
"""print bash completion command
"""
log = logging.getLogger(__name__ + '.CompleteCommand')
def get_parser(self, prog_name):
parser = super(CompleteCommand, self).get_parser(prog_name)
parser.add_argument(
"--name",
default=None,
metavar='<command_name>',
help="Command name to support with command completion"
)
parser.add_argument(
"--shell",
default='bash',
metavar='<shell>',
choices=['bash', 'none'],
help="Shell being used. Use none for data only (default: bash)"
)
return parser
def get_actions(self, command):
the_cmd = self.app.command_manager.find_command(command)
cmd_factory, cmd_name, search_args = the_cmd
cmd = cmd_factory(self.app, search_args)
if self.app.interactive_mode:
full_name = (cmd_name)
else:
full_name = (' '.join([self.app.NAME, cmd_name]))
cmd_parser = cmd.get_parser(full_name)
return cmd_parser._get_optional_actions()
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
if parsed_args.name:
name = parsed_args.name
else:
name = self.app.NAME
if parsed_args.shell == "none":
shell = CompleteNoCode(name, self.app.stdout)
else:
shell = CompleteBash(name, self.app.stdout)
dicto = CompleteDictionary()
for cmd in self.app.command_manager:
command = cmd[0].split()
dicto.add_command(command, self.get_actions(command))
shell.write(dicto.get_commands(), dicto.get_data())
return 0
|
Python
| 0.000319
|
@@ -47,16 +47,27 @@
ogging%0A%0A
+import six%0A
from cli
@@ -773,84 +773,8 @@
-if path == %22%22:%0A name = cmd%0A else:%0A
name
@@ -788,16 +788,33 @@
+ %22_%22 +
+ cmd if path else
cmd%0A
@@ -880,18 +880,31 @@
value, s
-tr
+ix.string_types
):%0A
|
27c7270a170a8eb3c2720390ab6e95d6bf16f8e3
|
fix option name to driver (#24)
|
cligj/__init__.py
|
cligj/__init__.py
|
# cligj
# Shared arguments and options.
import click
from .features import normalize_feature_inputs
# Arguments.
# Multiple input files.
files_in_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
required=True,
metavar="INPUTS...")
# Multiple files, last of which is an output file.
files_inout_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
required=True,
metavar="INPUTS... OUTPUT")
# Features from files, command line args, or stdin.
# Returns the input data as an iterable of GeoJSON Feature-like
# dictionaries.
features_in_arg = click.argument(
'features',
nargs=-1,
callback=normalize_feature_inputs,
metavar="FEATURES...")
# Options.
verbose_opt = click.option(
'--verbose', '-v',
count=True,
help="Increase verbosity.")
quiet_opt = click.option(
'--quiet', '-q',
count=True,
help="Decrease verbosity.")
# Format driver option.
format_opt = click.option(
'-f', '--format', '--driver',
default='GTiff',
help="Output format driver")
# JSON formatting options.
indent_opt = click.option(
'--indent',
type=int,
default=None,
help="Indentation level for JSON output")
compact_opt = click.option(
'--compact/--not-compact',
default=False,
help="Use compact separators (',', ':').")
# Coordinate precision option.
precision_opt = click.option(
'--precision',
type=int,
default=-1,
help="Decimal precision of coordinates.")
# Geographic (default), projected, or Mercator switch.
projection_geographic_opt = click.option(
'--geographic',
'projection',
flag_value='geographic',
default=True,
help="Output in geographic coordinates (the default).")
projection_projected_opt = click.option(
'--projected',
'projection',
flag_value='projected',
help="Output in dataset's own, projected coordinates.")
projection_mercator_opt = click.option(
'--mercator',
'projection',
flag_value='mercator',
help="Output in Web Mercator coordinates.")
# Feature collection or feature sequence switch.
sequence_opt = click.option(
'--sequence/--no-sequence',
default=False,
help="Write a LF-delimited sequence of texts containing individual "
"objects or write a single JSON text containing a feature "
"collection object (the default).")
use_rs_opt = click.option(
'--rs/--no-rs',
'use_rs',
default=False,
help="Use RS (0x1E) as a prefix for individual texts in a sequence "
"as per http://tools.ietf.org/html/draft-ietf-json-text-sequence-13 "
"(default is False).")
# GeoJSON output mode option.
def geojson_type_collection_opt(default=False):
return click.option(
'--collection',
'geojson_type',
flag_value='collection',
default=default,
help="Output as GeoJSON feature collection(s).")
def geojson_type_feature_opt(default=False):
return click.option(
'--feature',
'geojson_type',
flag_value='feature',
default=default,
help="Output as GeoJSON feature(s).")
def geojson_type_bbox_opt(default=False):
return click.option(
'--bbox',
'geojson_type',
flag_value='bbox',
default=default,
help="Output as GeoJSON bounding box array(s).")
|
Python
| 0
|
@@ -1039,16 +1039,26 @@
driver',
+ 'driver',
%0A def
|
05f45992e871dc0d98613fb31269c43e21869414
|
Add envy help command
|
cloudenvy/main.py
|
cloudenvy/main.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import argparse
import logging
from cloudenvy.config import EnvyConfig
from cloudenvy.commands.envy_up import EnvyUp
from cloudenvy.commands.envy_list import EnvyList
from cloudenvy.commands.envy_provision import EnvyProvision
from cloudenvy.commands.envy_snapshot import EnvySnapshot
from cloudenvy.commands.envy_ip import EnvyIP
from cloudenvy.commands.envy_scp import EnvySCP
from cloudenvy.commands.envy_dotfiles import EnvyDotfiles
from cloudenvy.commands.envy_ssh import EnvySSH
from cloudenvy.commands.envy_destroy import EnvyDestroy
from cloudenvy.commands.envy_run import EnvyRun
def _build_parser():
parser = argparse.ArgumentParser(
description='Launch a virtual machine in an openstack environment.')
parser.add_argument('-v', '--verbosity', action='count',
help='increase output verbosity')
parser.add_argument('-c', '--cloud', action='store',
help='specify which cloud to use')
subparsers = parser.add_subparsers(title='Available commands:')
# Load up all of the subparser classes
EnvyUp(subparsers)
EnvyList(subparsers)
EnvyProvision(subparsers)
EnvySnapshot(subparsers)
EnvyIP(subparsers)
EnvySCP(subparsers)
EnvyDotfiles(subparsers)
EnvySSH(subparsers)
EnvyDestroy(subparsers)
EnvyRun(subparsers)
return parser
def main():
parser = _build_parser()
args = parser.parse_args()
config = EnvyConfig(args).get_config()
if args.verbosity == 3:
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('novaclient').setLevel(logging.DEBUG)
elif args.verbosity == 2:
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger('novaclient').setLevel(logging.INFO)
elif args.verbosity == 1:
logging.getLogger().setLevel(logging.INFO)
args.func(config, args)
|
Python
| 0.999924
|
@@ -1374,16 +1374,433 @@
rsers)%0A%0A
+ def find_command_help(config, args):%0A if args.command:%0A subparsers.choices%5Bargs.command%5D.print_help()%0A else:%0A parser.print_help()%0A%0A help_subparser = subparsers.add_parser('help',%0A help='Display help information for a specfiic command')%0A help_subparser.add_argument('command', action='store', nargs='?')%0A help_subparser.set_defaults(func=find_command_help)%0A%0A
retu
|
0cad35e80a75a31a81937fb9afe2d19261a2c077
|
Add method for returning live node metrics
|
cloudkick/base.py
|
cloudkick/base.py
|
# Licensed to Cloudkick, Inc ('Cloudkick') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# Cloudkick licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = ["Connection"]
import os
import urllib
from oauth import oauth
try:
import json
except:
import simplejson as json
class Connection(object):
"""
Cloudkick API Connection Object
Provides an interface to the Cloudkick API over an HTTPS connection,
using OAuth to authenticate requests.
"""
API_SERVER = "api.cloudkick.com"
API_VERSION = "1.0"
def __init__(self, config_path = None):
self.__oauth_key = None
self.__oauth_secret = None
if config_path is None:
config_path = [os.path.join(os.path.expanduser('~'), ".cloudkick.conf"), "/etc/cloudkick.conf"]
if not isinstance(config_path, list):
config_path = [config_path]
self.config_path = config_path
def _read_config(self):
errors = []
for path in self.config_path:
try:
fp = open(path, 'r')
return self._parse_config(fp)
except Exception, e:
errors.append(e)
continue
raise IOError("Unable to open configuration files: %s %s" %
(", ".join(self.config_path),
", ".join([str(e) for e in errors])))
def _parse_config(self, fp):
for line in fp.readlines():
if len(line) < 1:
continue
if line[0] == "#":
continue
parts = line.split()
if len(parts) != 2:
continue
key = parts[0].strip()
value = parts[1].strip()
if key == "oauth_key":
self.__oauth_key = value
if key == "oauth_secret":
self.__oauth_secret = value
@property
def oauth_key(self):
if not self.__oauth_key:
self._read_config()
return self.__oauth_key
@property
def oauth_secret(self):
if not self.__oauth_secret:
self._read_config()
return self.__oauth_secret
def _request(self, url, parameters, method='GET'):
signature_method = oauth.OAuthSignatureMethod_HMAC_SHA1()
consumer = oauth.OAuthConsumer(self.oauth_key, self.oauth_secret)
url = 'https://' + self.API_SERVER + '/' + self.API_VERSION + '/' + url
oauth_request = oauth.OAuthRequest.from_consumer_and_token(consumer,
http_url=url,
parameters=parameters)
oauth_request.sign_request(signature_method, consumer, None)
url = oauth_request.to_url()
f = urllib.urlopen(url)
s = f.read()
return s
def _request_json(self, *args):
r = self._request(*args)
try:
return json.loads(r)
except ValueError:
return r
def nodes(self, query = "*"):
nodes = self._request_json("query/nodes", {'query': query})
return nodes
def checks(self, node):
checks = self._request_json("query/check", {'node': node})
return checks
def data(self, check, name, start, end, interval = 20):
data = self._request_json("query/check/data", {'interval': interval,
'metric.0.id': check,
'metric.0.name': name,
'start': start.strftime('%s'),
'end': end.strftime('%s') }
)
return data
if __name__ == "__main__":
from pprint import pprint
from datetime import datetime, timedelta
c = Connection()
nodes = c.nodes()
nid = nodes[6]['id']
checks = c.checks(nid)
check = checks[0][nid][0]
now = datetime.now()
pprint(check)
|
Python
| 0.000001
|
@@ -3563,16 +3563,252 @@
checks%0A%0A
+ def live_data(self, node_id, check_name = 'mem'):%0A if not check_name in %5B'mem', 'disk', 'cpu'%5D:%0A return False%0A%0A live_data = self._request_json(%22query/node/%25s/check/%25s%22 %25 (node_id,%0A check_name), %7B%7D)%0A return live_data%0A%0A
def da
|
1130f5822576cf79c90c525d46723af9ff2c065b
|
Stop sending redundant DMs for unknown command
|
cogbot/cog_bot.py
|
cogbot/cog_bot.py
|
import logging
from discord.ext import commands
from discord.ext.commands import Context
from discord.ext.commands.errors import *
from cogbot.cog_bot_config import CogBotConfig
log = logging.getLogger(__name__)
class CogBot(commands.Bot):
def __init__(self, config: CogBotConfig, **options):
super().__init__(
command_prefix=config.command_prefix,
description=config.description,
help_attrs=config.help_attrs,
**options)
self.config = config
if self.config.extensions:
self.load_extensions(*self.config.extensions)
else:
log.info('no extensions to load')
log.info('initialization successful')
def load_extensions(self, *extensions):
log.info(f'loading {len(extensions)} extensions...')
for ext in extensions:
log.info(f'loading extension {ext}...')
try:
self.load_extension(ext)
except Exception as e:
log.warning(f'failed to load extension {ext} because: {e.__class__.__name__}: {e}')
log.info(f'finished loading extensions')
def unload_extensions(self, *extensions):
log.info(f'unloading {len(extensions)} extensions...')
for ext in extensions:
log.info(f'unloading extension {ext}...')
try:
self.unload_extension(ext)
except Exception as e:
log.warning(f'failed to unload extension {ext} because: {e.__class__.__name__}: {e}')
log.info(f'finished unloading extensions')
async def send_error(self, ctx: Context, destination, error: CommandError):
place = '' if ctx.message.server is None else f' on **{ctx.message.server}**'
reply = f'There was a problem with your command{place}: *{error.args[0]}*'
await self.send_message(destination, reply)
async def on_ready(self):
log.info(f'logged in as {self.user.name} (id {self.user.id})')
# call on_ready() for extensions
# TODO this is gross, clean it up with an ABC or something
for cog_name, cog in self.cogs.items():
on_ready_fn = getattr(cog, 'on_ready', None)
if on_ready_fn:
await on_ready_fn()
async def on_message(self, message):
if (message.author != self.user) and message.content.startswith(self.command_prefix):
log.info(f'[{message.server}/{message.author}] {message.content}')
await super().on_message(message)
async def on_command_error(self, e: CommandError, ctx: Context):
log.warning(f'[{ctx.message.server}/{ctx.message.author}] {e.__class__.__name__}: {e.args[0]}')
error = e.original if isinstance(e, CommandInvokeError) else e
if isinstance(error, CommandNotFound):
await self.send_error(ctx, ctx.message.author, error)
await self.react_question(ctx)
elif isinstance(error, CheckFailure):
await self.react_denied(ctx)
elif isinstance(error, CommandOnCooldown):
await self.react_cooldown(ctx)
# Keep this one last because some others subclass it.
elif isinstance(error, CommandError):
await self.react_failure(ctx)
else:
await self.react_poop(ctx)
async def react_success(self, ctx: Context):
await self.add_reaction(ctx.message, u'✔')
async def react_neutral(self, ctx: Context):
await self.add_reaction(ctx.message, u'➖')
async def react_question(self, ctx: Context):
await self.add_reaction(ctx.message, u'❓')
async def react_failure(self, ctx: Context):
await self.add_reaction(ctx.message, u'❗')
async def react_denied(self, ctx: Context):
await self.add_reaction(ctx.message, u'🚫')
async def react_cooldown(self, ctx: Context):
await self.add_reaction(ctx.message, u'⏳')
async def react_poop(self, ctx: Context):
await self.add_reaction(ctx.message, u'💩')
|
Python
| 0
|
@@ -2825,74 +2825,8 @@
d):%0A
- await self.send_error(ctx, ctx.message.author, error)%0A
|
fabcd790578e904b3bff34fdf6e91edb4906a4e2
|
Add missing comma in compat.gyp
|
compat/compat.gyp
|
compat/compat.gyp
|
# Copyright 2014 The Crashpad Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'includes': [
'../build/crashpad.gypi',
],
'targets': [
{
'target_name': 'crashpad_compat',
'type': 'static_library',
'sources': [
'mac/AvailabilityMacros.h',
'mac/kern/exc_resource.h'
'mac/mach/mach.h',
'mac/mach-o/getsect.cc',
'mac/mach-o/getsect.h',
'mac/mach-o/loader.h',
'mac/sys/resource.h',
'non_mac/mach/mach.h',
'non_win/dbghelp.h',
'non_win/minwinbase.h',
'non_win/timezoneapi.h',
'non_win/verrsrc.h',
'non_win/windows.h',
'non_win/winnt.h',
'win/getopt.h',
'win/strings.cc',
'win/strings.h',
'win/sys/types.h',
'win/time.cc',
'win/time.h',
'win/winnt.h',
],
'conditions': [
['OS=="mac"', {
'dependencies': [
'../third_party/apple_cctools/apple_cctools.gyp:apple_cctools',
],
'include_dirs': [
'mac',
],
'direct_dependent_settings': {
'include_dirs': [
'mac',
],
},
}],
['OS=="win"', {
'include_dirs': [
'win',
],
'direct_dependent_settings': {
'include_dirs': [
'win',
],
},
'dependencies': [
'../third_party/getopt/getopt.gyp:getopt',
],
}, {
'include_dirs': [
'non_win',
],
'direct_dependent_settings': {
'include_dirs': [
'non_win',
],
},
}],
],
},
],
}
|
Python
| 0.000001
|
@@ -834,16 +834,17 @@
ource.h'
+,
%0A
|
79cd3dc227ff1d13faa9581b6f22caa176db2360
|
Mark Document.to_archive as abstract
|
c2corg_api/models/document.py
|
c2corg_api/models/document.py
|
from sqlalchemy import (
Column,
Integer,
Boolean,
String,
ForeignKey,
Enum
)
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
from c2corg_api.models import Base, schema
from utils import copy_attributes
quality_types = [
'stub',
'medium',
'correct',
'good',
'excellent'
]
class Culture(Base):
"""The supported languages.
"""
__tablename__ = 'cultures'
culture = Column(String(2), primary_key=True)
class _DocumentMixin(object):
"""
Contains the attributes that are common for `Document` and
`ArchiveDocument`.
"""
# move to metadata?
protected = Column(Boolean)
redirects_to = Column(Integer)
quality = Column(
Enum(name='quality_type', inherit_schema=True, *quality_types))
type = Column(String(1))
__mapper_args__ = {
'polymorphic_identity': 'd',
'polymorphic_on': type
}
class Document(Base, _DocumentMixin):
"""
The base class from which all document types will inherit. For each child
class (e.g. waypoint, route, ...) a separate table will be created, which
is linked to the base table via "joined table inheritance".
This table contains the current version of a document.
"""
__tablename__ = 'documents'
document_id = Column(Integer, primary_key=True)
# TODO constraint that there is at least one locale
locales = relationship('DocumentLocale')
_ATTRIBUTES = ['document_id', 'protected', 'redirects_to', 'quality']
def to_archive(self, doc):
copy_attributes(self, doc, Document._ATTRIBUTES)
return doc
def get_archive_locales(self):
return [locale.to_archive() for locale in self.locales]
class ArchiveDocument(Base, _DocumentMixin):
"""
The base class for the archive documents.
"""
__tablename__ = 'documents_archives'
id = Column(Integer, primary_key=True)
document_id = Column(Integer, nullable=False) # TODO as fk
# Locales for documents
class _DocumentLocaleMixin(object):
id = Column(Integer, primary_key=True)
@declared_attr
def document_id(self):
return Column(
Integer, ForeignKey(schema + '.documents.document_id'),
nullable=False)
@declared_attr
def culture(self):
return Column(
String(2), ForeignKey(schema + '.cultures.culture'),
nullable=False)
title = Column(String(150), nullable=False)
description = Column(String)
type = Column(String(1))
__mapper_args__ = {
'polymorphic_identity': 'd',
'polymorphic_on': type
}
class DocumentLocale(Base, _DocumentLocaleMixin):
__tablename__ = 'documents_locales'
_ATTRIBUTES = ['document_id', 'culture', 'title', 'description']
def to_archive(self, locale):
copy_attributes(self, locale, DocumentLocale._ATTRIBUTES)
return locale
class ArchiveDocumentLocale(Base, _DocumentLocaleMixin):
__tablename__ = 'documents_locales_archives'
|
Python
| 0.001214
|
@@ -191,16 +191,27 @@
tionship
+%0Aimport abc
%0A%0Afrom c
@@ -1572,34 +1572,353 @@
-def to_archive(self, doc):
+@abc.abstractmethod%0A def to_archive(self):%0A %22%22%22Create an %60Archive*%60 instance with the same attributes.%0A This method is supposed to be implemented by child classes.%0A %22%22%22%0A return%0A%0A def to_archive(self, doc):%0A %22%22%22Copy the attributes of this document into a passed in%0A %60Archive*%60 instance.%0A %22%22%22
%0A
|
41bb51f7f0aa1fda927af51498ec1acbf9eeddcc
|
fix history links
|
leaguebot/services/alerters/slack.py
|
leaguebot/services/alerters/slack.py
|
from leaguebot import app
import leaguebot.models.map as screepmap
import leaguebot.services.screeps as screeps
import leaguebot.services.slack as slack
import re
def sendBattleMessage(battleinfo):
message = getBattleMessageText(battleinfo)
sendToSlack(message)
def getBattleMessageText(battleinfo):
room_name = battleinfo['_id']
room_owner = screepmap.getRoomOwner(room_name)
pvp_time = str(battleinfo['lastPvpTime'])
history_link = '<https://screeps.com/a/#!/history/E53N64?t=' + pvp_time + '|' + pvp_time + '>'
message = history_link + ' - Battle: ' + '<https://screeps.com/a/#!/room/' + room_name + '|' + room_name + '>'
if not room_owner:
return message
room_level = screepmap.getRoomLevel(room_name)
if room_level and room_level > 0:
message += ' RCL ' + str(room_level)
message += ', defender ' + '<https://screeps.com/a/#!/profile/' + room_owner + '|' + room_owner + '>'
room_alliance = screepmap.getUserAlliance(room_owner)
if room_alliance:
message += ' (' + room_alliance + ')'
return message
def sendNukeMessage(nukeinfo):
message = getNukeMessageText(nukeinfo)
sendToSlack(message)
def getNukeMessageText(nukeinfo):
tick = screeps.get_time()
eta = str(nukeinfo['landTime']-tick)
room_name = nukeinfo['room']
room_owner = screepmap.getRoomOwner(room_name)
message = str(tick) + ' - Nuke: ' + room_name + ' in ' + str(eta) + ' ticks'
if not room_owner:
message += ', abandoned'
else:
room_alliance = screepmap.getUserAlliance(room_owner)
message += ', defender ' + room_owner
if room_alliance:
message += ' (' + room_alliance + ')'
return message
def sendToSlack(message):
if 'SEND_TO_SLACK' not in app.config or not app.config['SEND_TO_SLACK']:
return False
try:
channel = app.config['SLACK_CHANNEL']
slack.send_slack_message(channel, message)
print (message)
return True
except:
return False
|
Python
| 0
|
@@ -494,14 +494,25 @@
ory/
-E53N64
+' + room_name + '
?t='
|
2c9a0f9783c72af122d7c728a7760c8a2027d45f
|
Fix remove debug print
|
tests/resolver_test.py
|
tests/resolver_test.py
|
from gnr.core.gnrbag import Bag, BagCbResolver
def hello(x=''):
return 'i say : %s ' % x
b = Bag()
b.setCallBackItem('say_hello', hello, x='hello')
b.setCallBackItem('say_muu', hello, x='muu')
b.setCallBackItem('say_buzbuz', hello, x='buzbuz')
resolver = BagCbResolver(hello, x='fatto da resolver e non da setCallBackItem')
b.setItem('say_resolver', resolver)
print b['say_hello']
print b['say_muu']
print b['say_buzbuz']
print b['say_resolver']
|
Python
| 0.000019
|
@@ -362,91 +362,4 @@
ver)
-%0A%0Aprint b%5B'say_hello'%5D%0Aprint b%5B'say_muu'%5D%0Aprint b%5B'say_buzbuz'%5D%0Aprint b%5B'say_resolver'%5D
|
539c11706d91db92e36f49694603f2ed668d8cbb
|
Add a __unicode__ method to the Book model. Will show book title in admin instead of "Book object".
|
test_environment/books/models.py
|
test_environment/books/models.py
|
from dockit.schema import Document, Schema, ModelReferenceField, \
TextField, DictField, SchemaField, FileField, IntegerField, \
ReferenceField, ListField, GenericSchemaField, CharField, DateField
from django.contrib.auth.models import User
class Author(Document):
user = ModelReferenceField(User)
internal_id = TextField()
class Meta:
collection = 'author'
class Address(Schema):
street_1 = TextField()
street_2 = TextField(blank=True)
city = TextField()
postal_code = TextField()
region = TextField()
country = TextField()
extra_data = DictField(blank=True)
class Publisher(Document):
name = TextField()
address = SchemaField(Address)
def __unicode__(self):
return self.name
class Meta:
collection = 'publisher'
class Book(Document):
title = TextField()
cover_image = FileField(upload_to='book-images')
year = IntegerField()
publisher = ReferenceField(Publisher)
authors = ListField(ReferenceField(Author), db_index=True)
tags = ListField(TextField(), db_index=True)
class Meta:
collection = 'book'
Book.objects.index('tags').commit()
class SubComplexTwo(Schema):
field2 = TextField()
class SubComplexOne(Schema):
field1 = TextField()
nested = SchemaField(SubComplexTwo)
class ComplexObject(Document):
field1 = TextField()
image = FileField(upload_to='complex-images', blank=True)
addresses = ListField(SchemaField(Address), blank=True)
main_address = SchemaField(Address, blank=True)
generic_objects = ListField(GenericSchemaField(), blank=True)
nested = SchemaField(SubComplexOne, blank=True)
def __unicode__(self):
return unicode(self.field1)
class Meta:
collection = 'complex_object'
class Publication(Document):
name = CharField()
date = DateField()
class Meta:
typed_field = '_type'
class Newspaper(Publication):
city = CharField()
class Meta:
typed_key = 'newspaper'
class Magazine(Publication):
issue_number = CharField()
class Meta:
typed_key = 'magazine'
class BaseProduct(Document):
name = CharField()
class Meta:
typed_field = '_type'
class Brand(Document):
name = CharField()
products = ListField(SchemaField(BaseProduct))
class Shoes(BaseProduct):
class Meta:
typed_key = 'shoes'
class Shirt(BaseProduct):
class Meta:
typed_key = 'shirt'
|
Python
| 0
|
@@ -1100,24 +1100,82 @@
=True)%0A %0A
+ def __unicode__(self):%0A return self.title%0A %0A
class Me
|
d88013450d5e3ec62a3cb8b4fcfa2afbc173338b
|
remove from there as well
|
tests/safety/common.py
|
tests/safety/common.py
|
from panda.tests.safety import libpandasafety_py
MAX_WRONG_COUNTERS = 5
class UNSAFE_MODE:
DEFAULT = 0
DISABLE_DISENGAGE_ON_GAS = 1
DISABLE_STOCK_AEB = 2
ENABLE_WEAK_STEERING_WHILE_NOT_ENGAGED = 4
RAISE_LONGITUDINAL_LIMITS_TO_ISO_MAX = 8
def make_msg(bus, addr, length=8):
to_send = libpandasafety_py.ffi.new('CAN_FIFOMailBox_TypeDef *')
if addr >= 0x800:
to_send[0].RIR = (addr << 3) | 5
else:
to_send[0].RIR = (addr << 21) | 1
to_send[0].RDTR = length
to_send[0].RDTR |= bus << 4
return to_send
class StdTest:
@staticmethod
def test_relay_malfunction(test, addr, bus=0):
# input is a test class and the address that, if seen on specified bus, triggers
# the relay_malfunction protection logic: both tx_hook and fwd_hook are
# expected to return failure
test.assertFalse(test.safety.get_relay_malfunction())
test.safety.safety_rx_hook(make_msg(bus, addr, 8))
test.assertTrue(test.safety.get_relay_malfunction())
for a in range(1, 0x800):
for b in range(0, 3):
test.assertFalse(test.safety.safety_tx_hook(make_msg(b, a, 8)))
test.assertEqual(-1, test.safety.safety_fwd_hook(b, make_msg(b, a, 8)))
@staticmethod
def test_manually_enable_controls_allowed(test):
test.safety.set_controls_allowed(1)
test.assertTrue(test.safety.get_controls_allowed())
test.safety.set_controls_allowed(0)
test.assertFalse(test.safety.get_controls_allowed())
@staticmethod
def test_spam_can_buses(test, TX_MSGS):
for addr in range(1, 0x800):
for bus in range(0, 4):
if all(addr != m[0] or bus != m[1] for m in TX_MSGS):
test.assertFalse(test.safety.safety_tx_hook(make_msg(bus, addr, 8)))
@staticmethod
def test_allow_brake_at_zero_speed(test):
# Brake was already pressed
test.safety.safety_rx_hook(test._speed_msg(0))
test.safety.safety_rx_hook(test._brake_msg(1))
test.safety.set_controls_allowed(1)
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertTrue(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._brake_msg(0))
test.assertTrue(test.safety.get_controls_allowed())
# rising edge of brake should disengage
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertFalse(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._brake_msg(0)) # reset no brakes
@staticmethod
def test_not_allow_brake_when_moving(test, standstill_threshold):
# Brake was already pressed
test.safety.safety_rx_hook(test._brake_msg(1))
test.safety.set_controls_allowed(1)
test.safety.safety_rx_hook(test._speed_msg(standstill_threshold))
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertTrue(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._speed_msg(standstill_threshold + 1))
test.safety.safety_rx_hook(test._brake_msg(1))
test.assertFalse(test.safety.get_controls_allowed())
test.safety.safety_rx_hook(test._speed_msg(0))
|
Python
| 0
|
@@ -159,53 +159,8 @@
= 2%0A
- ENABLE_WEAK_STEERING_WHILE_NOT_ENGAGED = 4%0A
RA
|
9f40c3942a543375012aae0a2f03ca223b9fd612
|
fix a bug, mnemonic passphrase works
|
stellar_base/keypair.py
|
stellar_base/keypair.py
|
# coding:utf-8
import base64
import os
from .base58 import b58decode_check, b58encode_check
from .stellarxdr import Xdr
from .utils import XdrLengthError, decode_check, encode_check, StellarMnemonic
# noinspection PyBroadException
try:
# noinspection PyUnresolvedReferences
from pure25519 import ed25519_oop as ed25519
except:
import ed25519
import hashlib
class Keypair(object):
""" use for create stellar Keypair(StrKey) .
also support old style stellar keypair transforming
"""
def __init__(self, verifying_key, signing_key=None):
assert type(verifying_key) is ed25519.VerifyingKey
self.verifying_key = verifying_key
self.signing_key = signing_key
@classmethod
def deterministic(cls, mnemonic, passphrase='', lang='english'):
""" a deterministic keypair generator .
:type master: bytes-like object for create keypair. e.g. u'中文'.encode('utf-8')
"""
sm = StellarMnemonic(lang)
seed = sm.to_seed(mnemonic, passphrase='')
return cls.from_raw_seed(seed)
@classmethod
def random(cls):
seed = os.urandom(32)
return cls.from_raw_seed(seed)
@classmethod
def from_seed(cls, seed):
"""
create Keypair class from a strkey seed.
:type seed: StrKey base32
"""
raw_seed = decode_check("seed", seed)
return cls.from_raw_seed(raw_seed)
@classmethod
def from_raw_seed(cls, raw_seed):
signing_key = ed25519.SigningKey(raw_seed)
verifying_key = signing_key.get_verifying_key()
return cls(verifying_key, signing_key)
@classmethod
def from_base58_seed(cls, base58_seed):
raw_seed = b58decode_check(base58_seed)[1:]
return cls.from_raw_seed(raw_seed)
@classmethod
def from_address(cls, address):
public_key = decode_check("account", address)
if len(public_key) != 32:
raise XdrLengthError('Invalid Stellar address')
verifying_key = ed25519.VerifyingKey(public_key)
return cls(verifying_key)
def account_xdr_object(self):
return Xdr.types.PublicKey(Xdr.const.KEY_TYPE_ED25519,
self.verifying_key.to_bytes())
def xdr(self):
kp = Xdr.StellarXDRPacker()
kp.pack_PublicKey(self.account_xdr_object())
return base64.b64encode(kp.get_buffer())
def public_key(self):
return self.account_xdr_object()
def raw_public_key(self):
return self.verifying_key.to_bytes()
def raw_seed(self):
return self.signing_key.to_seed()
def address(self):
return encode_check('account', self.raw_public_key())
def seed(self):
return encode_check('seed', self.raw_seed())
# def raw_secret_key(self):
# return self.signing_key
# def can_sign(self):
# return self.signing_key
def sign(self, data):
try:
return self.signing_key.sign(data)
except:
raise Exception("cannot sign: no secret key available")
def verify(self, data, signature):
return self.verifying_key.verify(signature, data)
def sign_decorated(self, data):
signature = self.sign(data)
hint = self.signature_hint()
return Xdr.types.DecoratedSignature(hint, signature)
def signature_hint(self):
return bytes(self.public_key().ed25519[-4:])
def to_old_address(self):
rv = hashlib.new('sha256', self.raw_public_key()).digest()
rv = hashlib.new('ripemd160', rv).digest()
rv = chr(0).encode() + rv
# v += hashlib.new('sha256', hashlib.new('sha256', rv).digest()).digest()[0:4]
return b58encode_check(rv)
def to_old_seed(self):
seed = chr(33).encode() + self.raw_seed()
return b58encode_check(seed)
|
Python
| 0.000032
|
@@ -1027,18 +1027,26 @@
sphrase=
-''
+passphrase
)%0A
|
bfb048d9a1ac34cd07e0fc8d94c0e97d901ee096
|
fix simple_parser
|
tests/simple_parser.py
|
tests/simple_parser.py
|
from test_parser import *
import sys
os.chdir("..")
import parser
if __name__ == "__main__":
x = ""
if len(sys.argv) > 1:
x = sys.argv[1]
else:
x = "x"
p = parser.HTMLParser()
document = p.parse(StringIO.StringIO(x))
print convertTreeDump(document.printTree())
|
Python
| 0
|
@@ -1,8 +1,10 @@
+%0D%0A
from tes
@@ -32,16 +32,20 @@
port sys
+, os
%0D%0Aos.chd
@@ -51,12 +51,98 @@
dir(
-%22..%22
+os.path.split(os.path.abspath(__file__))%5B0%5D)%0Asys.path.insert(0, os.path.abspath(os.pardir)
)%0Aim
|
c682c5fa044cc4b4f0c0ede7d2e23e8de670969a
|
Use the _id function in full_format
|
bot/action/util/format.py
|
bot/action/util/format.py
|
import datetime
import time
from bot.action.standard.userinfo import UserStorageHandler
class DateFormatter:
@classmethod
def format(cls, timestamp):
return cls._format("%d %b %H:%M", timestamp)
@classmethod
def format_full(cls, timestamp):
return cls._format("%d %b %Y %H:%M:%S", timestamp)
@classmethod
def format_only_date(cls, timestamp):
return cls._format("%d %b %Y", timestamp)
@staticmethod
def _format(string_format, timestamp):
local_time_struct = time.localtime(int(timestamp))
return time.strftime(string_format, local_time_struct)
class UserFormatter:
def __init__(self, user):
self.user = user
@property
def default_format(self):
"""
Returns full name (first and last) if name is available.
If not, returns username if available.
If not available too, returns the user id as a string.
"""
user = self.user
if user.first_name is not None:
return self.full_name
elif user.username is not None:
return user.username
else:
return str(user.id)
@property
def full_name(self):
"""
Returns the first and last name of the user separated by a space.
"""
formatted_user = []
if self.user.first_name is not None:
formatted_user.append(self.user.first_name)
if self.user.last_name is not None:
formatted_user.append(self.user.last_name)
return " ".join(formatted_user)
@property
def username(self):
"""
Returns the username of the user without the '@' (thus, not mentioning them).
If the username is not available, returns an empty string.
"""
return self.user.username if self.user.username is not None else ""
@property
def full_format(self):
"""
Returns the full name (first and last parts), and the username between brackets if the user has it.
If there is no info about the user, returns the user id between < and >.
"""
formatted_user = self.full_name
if self.user.username is not None:
formatted_user += " [" + self.user.username + "]"
if not formatted_user:
formatted_user = "<" + str(self.user.id) + ">"
return formatted_user
@property
def full_data(self):
"""
Returns all the info available for the user in the following format:
name [username] <id> (locale) bot_or_user
If any data is not available, it is not added.
"""
data = [
self.full_name,
self._username(),
self._id(),
self._language_code(),
self._is_bot()
]
return " ".join(filter(None, data))
def _username(self):
if self.user.username:
return "[{username}]".format(username=self.user.username)
def _id(self):
return "<{id}>".format(id=self.user.id)
def _language_code(self):
if self.user.language_code:
return "({language_code})".format(language_code=self.user.language_code)
def _is_bot(self):
if self.user.is_bot is not None:
return "🤖" if self.user.is_bot else "👤"
@staticmethod
def retrieve(user_id, user_storage_handler: UserStorageHandler):
user = user_storage_handler.get(user_id)
return UserFormatter(user)
@classmethod
def retrieve_and_format(cls, user_id, user_storage_handler: UserStorageHandler):
return cls.retrieve(user_id, user_storage_handler).default_format
class ChatFormatter:
@staticmethod
def format_group_or_type(chat):
if GroupFormatter.is_group(chat):
return GroupFormatter.format(chat)
else:
return "<" + chat.type + ">"
@staticmethod
def format_group_or_user(chat):
if GroupFormatter.is_group(chat):
return GroupFormatter.format(chat)
else:
return UserFormatter(chat).full_format
class GroupFormatter:
@staticmethod
def format(group):
return group.title
@staticmethod
def is_group(chat):
return bool(chat.title)
class TimeFormatter:
@staticmethod
def format(seconds):
return str(datetime.timedelta(seconds=seconds))
class SizeFormatter:
MULTIPLIER_FACTOR = 1024
@classmethod
def format(cls, number, suffix='B'):
if abs(number) < cls.MULTIPLIER_FACTOR:
return "{} {}".format(number, suffix)
for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi', 'Yi']:
if abs(number) < cls.MULTIPLIER_FACTOR:
break
number /= cls.MULTIPLIER_FACTOR
return "{:.2f} {}{}".format(number, unit, suffix)
class TextSummarizer:
ELLIPSIS = "…"
@classmethod
def summarize(cls, text, max_number_of_characters=10):
if len(text) > max_number_of_characters:
text = text[:max_number_of_characters-len(cls.ELLIPSIS)] + cls.ELLIPSIS
return text.replace("\n", " ")
|
Python
| 0.001969
|
@@ -2317,37 +2317,18 @@
r =
-%22%3C%22 + str(self.user.id) + %22%3E%22
+self._id()
%0A
|
24b02f5added52ac572ba24a90ef0c74c3fb7cd7
|
use api.craft.ai for runtime url
|
local_demo.py
|
local_demo.py
|
#!/usr/bin/python
import os
import requests
import json
import subprocess
import time
def userInput(config):
def defaultValue(config, key):
return config[key] if key in config else ""
res = {}
invalid = {}
res['user_name'] = raw_input("your GitHub username (default = " + defaultValue(config, 'user_name') + "): ")
res['project_name'] = raw_input("name of your SAC project on GitHub (default = " + defaultValue(config, 'project_name') + "): ")
res['project_branch'] = raw_input("current working branch of your SAC project on GitHub (default = " + defaultValue(config, 'project_branch') + "): ")
res['sac_app_id'] = raw_input("generated SAC app ID (default = " + defaultValue(config, 'sac_app_id') + "): ")
res['sac_app_secret'] = raw_input("generated SAC app secret (default = " + defaultValue(config, 'sac_app_secret') + "): ")
for k, v in res.items():
if v == "": res[k] = defaultValue(config, k)
invalid = [k for k, v in res.items() if v == ""]
if len(invalid) > 0:
print "invalid configuration: properties", invalid, "must be set"
res = userInput(config)
return res
config_file = open('config.json', 'r')
config = json.load(config_file)
invalid_properties = [k for k, v in config.items() if v == ""]
print "current configuration:", json.dumps(config, indent=2)
if len(invalid_properties) > 0:
print "invalid configuration: properties", invalid_properties, "must be set"
config = userInput(config)
if 'user_name' and 'project_name' and 'project_branch' and 'sac_app_id' and 'sac_app_secret' in config:
reply = str(raw_input('config file complete. do you wish to reset it? (y/n): ')).lower().strip()
if reply[0] == 'y':
config = userInput(config)
else:
config = userInput(config)
with open('config.json', 'w') as config_file:
json.dump(config, config_file, indent=2)
p = subprocess.Popen(["ngrok", "http", "8080"])
time.sleep(1.5)
# retrieving public url for exposed localhost:8080
headers = {'Content-Type': 'application/json'}
r = requests.get('http://127.0.0.1:4040/api/tunnels', headers=headers)
public_url = json.loads(r.text)['tunnels'][0]['public_url']
# setting environment variables with user input
os.environ["CRAFT_DEMO_SAC_USER"] = config['user_name']
os.environ["CRAFT_DEMO_SAC_PROJECT"] = config['project_name']
os.environ["CRAFT_DEMO_SAC_VERSION"] = config['project_branch']
os.environ["CRAFT_DEMO_SAC_APP_ID"] = config['sac_app_id']
os.environ["CRAFT_DEMO_SAC_APP_SECRET"] = config['sac_app_secret']
os.environ["CRAFT_DEMO_SAC_PORT"] = '8080'
os.environ["CRAFT_DEMO_SAC_URL"] = 'http://localhost:8080'
os.environ["CRAFT_DEMO_SAC_WS_URL"] = 'ws://localhost:8080'
os.environ["CRAFT_RUNTIME_SERVER_URL"] = 'https://api.craft.ai'
os.environ["CRAFT_HUB_URL"] = 'https://hub.craft.ai'
os.environ["CRAFT_DEMO_SAC_ACTIONS_URL"] = public_url
subprocess.call(["python", "-u", "src/server/main.py"])
p.terminate()
|
Python
| 0
|
@@ -2717,11 +2717,11 @@
s://
-hub
+api
.cra
|
a80646e0891dc5345caffd0acd12a073a0b47187
|
add missing test
|
tests/testLimitator.py
|
tests/testLimitator.py
|
# -*- coding: utf-8 -*-
from tools.limitator import *
import unittest
import time
class TestLimitator(unittest.TestCase):
USER1 = {"id":1}
USER2 = {"id":2}
def test_1(self):
l = Limitator(5, 2)
for i in range(5):
l.next(self.USER1)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
time.sleep(3)
for i in range(2):
l.next(self.USER1)
time.sleep(1)
for i in range(2):
l.next(self.USER1)
time.sleep(3)
for i in range(5):
l.next(self.USER1)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
def test_2(self):
l = Limitator(5, 2, True)
for i in range(5):
l.next(self.USER1)
for i in range(5):
l.next(self.USER2)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
try:
l.next(self.USER2)
self.fail("must crash")
except LimitatorLimitted:
pass
def test_3(self):
l = Limitator(5, 2)
l.next(self.USER1, 5)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
def test_4(self):
l = Limitator(5, 2, True)
l.next(self.USER1, 5)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
def test_5(self):
l = Limitator(1, 61, True)
l.next(self.USER1)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
time.sleep(62)
l.next(self.USER1)
try:
l.next(self.USER1)
self.fail("must crash")
except LimitatorLimitted:
pass
|
Python
| 0.000288
|
@@ -2012,28 +2012,279 @@
rLimitted:%0A pass%0A
+%0A def test_6(self):%0A l = Limitator(5, 2, True)%0A l.next(self.USER1, 3)%0A try:%0A l.next(self.USER1, 3)%0A self.fail(%22must crash%22)%0A except LimitatorLimitted:%0A pass%0A l.next(self.USER1, 2)%0A
|
88c9facf33771ecfccf9c3b8f6d3e37b3a2b648c
|
Revert the first 'speedup'
|
chainerrl/misc/collections.py
|
chainerrl/misc/collections.py
|
import itertools
import numpy as np
class RandomAccessQueue(object):
"""FIFO queue with fast indexing
Operations getitem, setitem, append, popleft, and len
are amortized O(1)-time, if this data structure is used ephemerally.
"""
def __init__(self, *args, **kwargs):
self.maxlen = kwargs.pop('maxlen', None)
assert self.maxlen is None or self.maxlen >= 0
self._queue_front = []
self._queue_back = list(*args, **kwargs)
self._apply_maxlen()
def _apply_maxlen(self):
if self.maxlen is not None:
while len(self) > self.maxlen:
self.popleft()
def __iter__(self):
return itertools.chain(reversed(self._queue_front),
iter(self._queue_back))
def __repr__(self):
return "RandomAccessQueue({})".format(str(list(iter(self))))
def __len__(self):
return len(self._queue_front) + len(self._queue_back)
def __getitem__(self, i):
if i >= 0:
nf = len(self._queue_front)
if i < nf:
return self._queue_front[~i]
else:
i -= nf
if i < len(self._queue_back):
return self._queue_back[i]
else:
raise IndexError("RandomAccessQueue index out of range")
else:
nb = len(self._queue_back)
if i >= -nb:
return self._queue_back[i]
else:
i += nb
if i >= -len(self._queue_front):
return self._queue_front[~i]
else:
raise IndexError("RandomAccessQueue index out of range")
def __setitem__(self, i, x):
if i >= 0:
nf = len(self._queue_front)
if i < nf:
self._queue_front[~i] = x
else:
i -= nf
if i < len(self._queue_back):
self._queue_back[i] = x
else:
raise IndexError("RandomAccessQueue index out of range")
else:
nb = len(self._queue_back)
if i >= -nb:
self._queue_back[i] = x
else:
i += nb
if i >= -len(self._queue_front):
self._queue_front[~i] = x
else:
raise IndexError("RandomAccessQueue index out of range")
def append(self, x):
self._queue_back.append(x)
if self.maxlen is not None and len(self) > self.maxlen:
self.popleft()
def extend(self, xs):
self._queue_back.extend(xs)
self._apply_maxlen()
def popleft(self):
if not self._queue_front:
if not self._queue_back:
raise IndexError("pop from empty RandomAccessQueue")
self._queue_front = self._queue_back
self._queue_back = []
self._queue_front.reverse()
return self._queue_front.pop()
def _sample(self, k):
n = len(self)
if k > n:
raise ValueError("Sample larger than population or is negative")
# The following code is equivalent to
# return [self[i] for i in np.random.choice(n, k)]
nf = len(self._queue_front)
return [self._queue_front[i] if i < nf else self._queue_back[i - nf]
for i in np.random.choice(n, k)]
|
Python
| 0.000002
|
@@ -21,19 +21,25 @@
ort
-numpy as np
+random%0Aimport six
%0A%0A%0Ac
@@ -3257,34 +3257,48 @@
or i in
-np.
random.
-choic
+sample(six.moves.rang
e(n
+)
, k)%5D%0A%0A
@@ -3438,26 +3438,40 @@
in
-np.
random.
-choic
+sample(six.moves.rang
e(n
+)
, k)
|
4f34ea2d83e9e437f7934a6f8b88147db5ec4ee0
|
Add metric reporting for job step timeouts.
|
changes/jobs/sync_job_step.py
|
changes/jobs/sync_job_step.py
|
from __future__ import absolute_import, print_function
from datetime import datetime
from flask import current_app
from sqlalchemy import or_
from sqlalchemy.sql import func
from changes.constants import Status, Result
from changes.config import db
from changes.db.utils import try_create
from changes.models import (
ItemOption, JobPhase, JobStep, JobPlan, TestCase, ItemStat,
FileCoverage, FailureReason
)
from changes.queue.task import tracked_task
QUEUED_RETRY_DELAY = 30
def abort_step(task):
step = JobStep.query.get(task.kwargs['step_id'])
step.status = Status.finished
step.result = Result.aborted
db.session.add(step)
db.session.commit()
current_app.logger.exception('Unrecoverable exception syncing step %s', step.id)
def is_missing_tests(step, jobplan):
if 'snapshot' in jobplan.data:
options = jobplan.data['snapshot']['options']
else:
options = dict(db.session.query(
ItemOption.name, ItemOption.value,
).filter(
ItemOption.item_id == jobplan.plan.id,
ItemOption.name == 'build.expect-tests',
))
if options.get('build.expect-tests') != '1':
return False
# if the phase hasn't started (at least according to metadata)
# we can't accurately make comparisons
if not step.phase.date_started:
return False
# if this is not the final phase then ignore it
# TODO(dcramer): there is probably a better way we can be explicit about
# this?
jobphase_query = JobPhase.query.filter(
JobPhase.job_id == step.job_id,
JobPhase.id != step.phase_id,
or_(
JobPhase.date_started > step.phase.date_started,
JobPhase.date_started == None, # NOQA
)
)
if db.session.query(jobphase_query.exists()).scalar():
return False
has_tests = db.session.query(TestCase.query.filter(
TestCase.step_id == step.id,
).exists()).scalar()
return not has_tests
def has_test_failures(step):
return db.session.query(TestCase.query.filter(
TestCase.step_id == step.id,
TestCase.result == Result.failed,
).exists()).scalar()
def has_timed_out(step, jobplan):
if step.status != Status.in_progress:
return False
if not step.date_started:
return False
# TODO(dcramer): we make an assumption that there is a single step
options = jobplan.get_steps()[0].options
timeout = int(options.get('build.timeout') or 0)
if not timeout:
return False
# timeout is in minutes
timeout = timeout * 60
delta = datetime.utcnow() - step.date_started
if delta.total_seconds() > timeout:
return True
return False
def record_coverage_stats(step):
coverage_stats = db.session.query(
func.sum(FileCoverage.lines_covered).label('lines_covered'),
func.sum(FileCoverage.lines_uncovered).label('lines_uncovered'),
func.sum(FileCoverage.diff_lines_covered).label('diff_lines_covered'),
func.sum(FileCoverage.diff_lines_uncovered).label('diff_lines_uncovered'),
).filter(
FileCoverage.step_id == step.id,
).group_by(
FileCoverage.step_id,
).first()
stat_list = (
'lines_covered', 'lines_uncovered',
'diff_lines_covered', 'diff_lines_uncovered',
)
for stat_name in stat_list:
try_create(ItemStat, where={
'item_id': step.id,
'name': stat_name,
}, defaults={
'value': getattr(coverage_stats, stat_name, 0) or 0,
})
@tracked_task(on_abort=abort_step, max_retries=100)
def sync_job_step(step_id):
step = JobStep.query.get(step_id)
if not step:
return
jobplan, implementation = JobPlan.get_build_step_for_job(job_id=step.job_id)
# only synchronize if upstream hasn't suggested we're finished
if step.status != Status.finished:
implementation.update_step(step=step)
db.session.flush()
if step.status != Status.finished:
is_finished = False
else:
is_finished = sync_job_step.verify_all_children() == Status.finished
if not is_finished:
if has_timed_out(step, jobplan):
implementation.cancel_step(step=step)
step.result = Result.failed
db.session.add(step)
job = step.job
try_create(FailureReason, {
'step_id': step.id,
'job_id': job.id,
'build_id': job.build_id,
'project_id': job.project_id,
'reason': 'timeout'
})
db.session.flush()
if step.status != Status.in_progress:
retry_after = QUEUED_RETRY_DELAY
else:
retry_after = None
raise sync_job_step.NotFinished(retry_after=retry_after)
# ignore any 'failures' if its aborted
if step.result == Result.aborted:
return
try:
record_coverage_stats(step)
except Exception:
current_app.logger.exception('Failing recording coverage stats for step %s', step.id)
missing_tests = is_missing_tests(step, jobplan)
try_create(ItemStat, where={
'item_id': step.id,
'name': 'tests_missing',
}, defaults={
'value': int(missing_tests)
})
if step.result == Result.passed and missing_tests:
step.result = Result.failed
db.session.add(step)
if missing_tests:
if step.result != Result.failed:
step.result = Result.failed
db.session.add(step)
try_create(FailureReason, {
'step_id': step.id,
'job_id': step.job_id,
'build_id': step.job.build_id,
'project_id': step.project_id,
'reason': 'missing_tests'
})
db.session.commit()
db.session.flush()
if has_test_failures(step):
if step.result != Result.failed:
step.result = Result.failed
db.session.add(step)
try_create(FailureReason, {
'step_id': step.id,
'job_id': step.job_id,
'build_id': step.job.build_id,
'project_id': step.project_id,
'reason': 'test_failures'
})
db.session.commit()
|
Python
| 0.000001
|
@@ -243,16 +243,31 @@
mport db
+, statsreporter
%0Afrom ch
@@ -4647,16 +4647,77 @@
flush()%0A
+ statsreporter.stats().incr('job_step_timed_out')%0A
|
6c463135561796a3d26709d0498f300717ceba83
|
edit se.py to account for non-O3CPU workloads
|
configs/example/se.py
|
configs/example/se.py
|
# Copyright (c) 2006-2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Steve Reinhardt
# Simple test script
#
# "m5 test.py"
import m5
if m5.build_env['FULL_SYSTEM']:
m5.fatal("This script requires syscall emulation mode (*_SE).")
from m5.objects import *
import os, optparse, sys
from os.path import join as joinpath
m5.AddToPath('../common')
import Simulation
from Caches import *
from cpu2000 import *
# Get paths we might need. It's expected this file is in m5/configs/example.
config_path = os.path.dirname(os.path.abspath(__file__))
config_root = os.path.dirname(config_path)
m5_root = os.path.dirname(config_root)
parser = optparse.OptionParser()
# Benchmark options
parser.add_option("-c", "--cmd",
default=joinpath(m5_root, "tests/test-progs/hello/bin/alpha/linux/hello"),
help="The binary to run in syscall emulation mode.")
parser.add_option("-o", "--options", default="",
help='The options to pass to the binary, use " " around the entire string')
parser.add_option("-i", "--input", default="", help="Read stdin from a file.")
parser.add_option("--output", default="", help="Redirect stdout to a file.")
parser.add_option("--errout", default="", help="Redirect stderr to a file.")
execfile(os.path.join(config_root, "common", "Options.py"))
(options, args) = parser.parse_args()
if args:
print "Error: script doesn't take any positional arguments"
sys.exit(1)
if options.bench:
try:
if m5.build_env['TARGET_ISA'] != 'alpha':
print >>sys.stderr, "Simpoints code only works for Alpha ISA at this time"
sys.exit(1)
exec("workload = %s('alpha', 'tru64', 'ref')" % options.bench)
process = workload.makeLiveProcess()
except:
print >>sys.stderr, "Unable to find workload for %s" % options.bench
sys.exit(1)
else:
process = LiveProcess()
process.executable = options.cmd
process.cmd = [options.cmd] + options.options.split()
if options.input != "":
process.input = options.input
if options.output != "":
process.output = options.output
if options.errout != "":
process.errout = options.errout
if options.detailed:
#check for SMT workload
workloads = options.cmd.split(';')
if len(workloads) > 1:
process = []
smt_idx = 0
inputs = []
outputs = []
errouts = []
if options.input != "":
inputs = options.input.split(';')
if options.output != "":
outputs = options.output.split(';')
if options.errout != "":
errouts = options.errout.split(';')
for wrkld in workloads:
smt_process = LiveProcess()
smt_process.executable = wrkld
smt_process.cmd = wrkld + " " + options.options
if inputs and inputs[smt_idx]:
smt_process.input = inputs[smt_idx]
if outputs and outputs[smt_idx]:
smt_process.output = outputs[smt_idx]
if errouts and errouts[smt_idx]:
smt_process.errout = errouts[smt_idx]
process += [smt_process, ]
smt_idx += 1
(CPUClass, test_mem_mode, FutureClass) = Simulation.setCPUClass(options)
CPUClass.clock = '2GHz'
CPUClass.numThreads = len(workloads)
np = options.num_cpus
system = System(cpu = [CPUClass(cpu_id=i) for i in xrange(np)],
physmem = PhysicalMemory(range=AddrRange("512MB")),
membus = Bus(), mem_mode = test_mem_mode)
system.physmem.port = system.membus.port
if options.l2cache:
system.l2 = L2Cache(size='2MB')
system.tol2bus = Bus()
system.l2.cpu_side = system.tol2bus.port
system.l2.mem_side = system.membus.port
for i in xrange(np):
if options.caches:
system.cpu[i].addPrivateSplitL1Caches(L1Cache(size = '32kB'),
L1Cache(size = '64kB'))
if options.l2cache:
system.cpu[i].connectMemPorts(system.tol2bus)
else:
system.cpu[i].connectMemPorts(system.membus)
system.cpu[i].workload = process
if options.fastmem:
system.cpu[0].physmem_port = system.physmem.port
root = Root(system = system)
Simulation.run(options, root, system, FutureClass)
|
Python
| 0
|
@@ -3620,16 +3620,102 @@
errout%0A%0A
+%0A# By default, set workload to path of user-specified binary%0Aworkloads = options.cmd%0A%0A
if optio
|
f47085e1472a19a4647f671f1dc08db0c8e0835c
|
fix and last try on flaky test
|
tests/test_cmd_auto.py
|
tests/test_cmd_auto.py
|
import time
from multiprocessing import Process
import pytest
from doit.cmdparse import DefaultUpdate
from doit.exceptions import InvalidCommand
from doit.task import Task
from doit.cmd_base import TaskLoader
from doit import cmd_auto
class TestFindFileDeps(object):
def find_deps(self, sel_tasks):
tasks = {
't1': Task("t1", [""], file_dep=['f1']),
't2': Task("t2", [""], file_dep=['f2'], task_dep=['t1']),
't3': Task("t3", [""], file_dep=['f3'], setup=['t1']),
}
return cmd_auto.Auto._find_file_deps(tasks, sel_tasks)
def test_find_file_deps(self):
assert set(['f1']) == self.find_deps(['t1'])
assert set(['f1', 'f2']) == self.find_deps(['t2'])
assert set(['f1', 'f3']) == self.find_deps(['t3'])
class TestDepChanged(object):
def test_changed(self, dependency1):
started = time.time()
assert not cmd_auto.Auto._dep_changed([dependency1], started, [])
assert cmd_auto.Auto._dep_changed([dependency1], started-100, [])
assert not cmd_auto.Auto._dep_changed([dependency1], started-100,
[dependency1])
class FakeLoader(TaskLoader):
def __init__(self, task_list, dep_file):
self.task_list = task_list
self.dep_file = dep_file
def load_tasks(self, cmd, params, args):
return self.task_list, {'verbosity':2, 'dep_file':self.dep_file}
class TestAuto(object):
def test_invalid_args(self, dependency1, depfile_name):
t1 = Task("t1", [""], file_dep=[dependency1])
cmd = cmd_auto.Auto(task_loader=FakeLoader([t1], depfile_name))
pytest.raises(InvalidCommand, cmd.execute, None, 't2')
def test_run_wait(self, dependency1, target1, depfile_name):
def ok():
with open(target1, 'w') as fp:
fp.write('ok')
t1 = Task("t1", [ok], file_dep=[dependency1])
cmd = cmd_auto.Auto(task_loader=FakeLoader([t1], depfile_name))
run_wait_proc = Process(target=cmd.run_watch,
args=(DefaultUpdate(), []))
run_wait_proc.start()
# wait until task is executed
for x in range(5):
try:
got = open(target1, 'r').read()
print got
if got == 'ok':
break
except:
print 'busy'
time.sleep(0.1)
else: # pragma: no cover
raise Exception("target not created")
# write on file to terminate process
fd = open(dependency1, 'w')
fd.write("hi" + str(time.asctime()))
fd.close()
run_wait_proc.join(.5)
if run_wait_proc.is_alive(): # pragma: no cover
# this test is very flaky so we give it one more chance...
# write on file to terminate process
fd = open(dependency1, 'w')
fd.write("hi" + str(time.asctime()))
fd.close()
run_wait_proc.join(.5)
if run_wait_proc.is_alive(): # pragma: no cover
run_wait_proc.terminate()
raise Exception("process not terminated")
assert 0 == run_wait_proc.exitcode
def test_execute(self, monkeypatch):
# use dumb operation instead of executing RUN command and waiting event
def fake_run(self, params, args): # pragma: no cover
5 + 2
monkeypatch.setattr(cmd_auto.Auto, 'run_watch', fake_run)
# after join raise exception to stop AUTO command
original = cmd_auto.Process.join
def join_interrupt(self):
original(self)
raise KeyboardInterrupt()
monkeypatch.setattr(cmd_auto.Process, 'join', join_interrupt)
cmd = cmd_auto.Auto()
cmd.execute(None, None)
|
Python
| 0
|
@@ -2301,20 +2301,21 @@
print
-
+(
got
+)
%0A
@@ -2410,15 +2410,16 @@
rint
-
+(
'busy'
+)
%0A
@@ -3016,34 +3016,33 @@
_wait_proc.join(
-.5
+1
)%0A if
|
ae8f9c39cd75d837a4cb5a4cea4d3d11fd1cabed
|
Add additional test case for comments
|
tests/test_comments.py
|
tests/test_comments.py
|
from hypothesis_auto import auto_pytest_magic
from isort import comments
auto_pytest_magic(comments.parse)
auto_pytest_magic(comments.add_to_line)
|
Python
| 0
|
@@ -142,8 +142,131 @@
o_line)%0A
+%0A%0Adef test_add_to_line():%0A assert comments.add_to_line(%5B%5D, %22import os # comment%22, removed=True).strip() == %22import os%22%0A
|
aa1008691e3433f8350d3f3a5e5d03d9c629a45c
|
Test for getting parameters back from ideal observer
|
tests/test_idealobs.py
|
tests/test_idealobs.py
|
import pytest
import scipy.io as sio
from pambox import idealobs
import numpy as np
@pytest.fixture
def data():
return np.array([0.28032187, 1.07108181, 3.35513227, 8.66774961,
18.61914334, 33.63172026, 51.87228063, 69.72236134,
83.79127082, 92.72205919, 97.28779782, 99.16754416])
@pytest.fixture
def idealobs_parameters():
return (3.74647303e+00, 5.15928999e-02, -9.09197905e-07, 8000.)
@pytest.fixture
def snr():
return np.arange(-9, 3, 1)
@pytest.fixture
def snrenv(snr):
return 10. ** np.linspace(-2, 2, len(snr))
def test_fit_obs(data, snrenv, idealobs_parameters):
c = idealobs.IdealObs()
c.fit_obs(snrenv, data)
params = c.get_params()
res = [params['k'], params['q'], params['sigma_s']]
np.testing.assert_allclose(res, idealobs_parameters[0:3], atol=1e-5)
def test_psy_fn():
mat = sio.loadmat('./test_files/test_psychometric_function.mat')
x = mat['x'][0]
mu = 0.
sigma = 1.0
target = mat['p'][0]
y = idealobs.psy_fn(x, mu, sigma)
np.testing.assert_allclose(y, target)
def test_snr_env_to_pc(snrenv, idealobs_parameters, data):
c = idealobs.IdealObs(k=1., q=0.5, sigma_s=0.6, m=8000.)
pc = c.snrenv_to_pc(np.arange(0, 21))
target = np.array([0.0000, 0.0025, 0.0267, 0.1327, 0.4403, 1.1314, 2.4278,
4.5518, 7.6788, 11.8990, 17.1955, 23.4442, 30.4320,
37.8885, 45.5214, 53.0503, 60.2323, 66.8786, 72.8613,
78.1116, 82.6125])
np.testing.assert_allclose(pc, target, atol=1e-4)
|
Python
| 0
|
@@ -1592,8 +1592,142 @@
l=1e-4)%0A
+%0A%0Adef test_get_params():%0A p = %7B'k':1, 'q':2, 'sigma_s':0.5, 'm':800%7D%0A c = idealobs.IdealObs(**p)%0A assert p == c.get_params()%0A
|
28e049dfc0a784c9c47ef671286e39e13825c6c5
|
Remove useless import
|
tests/test_multival.py
|
tests/test_multival.py
|
# test_multival.py
"""Test suite for MultiValue class"""
# Copyright (c) 2012 Darcy Mason
# This file is part of pydicom, relased under an MIT-style license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
import unittest
from datetime import date
from pydicom.multival import MultiValue
from pydicom.valuerep import DS, DSfloat, DSdecimal, IS, DA
from pydicom import config # don't import datetime_conversion directly
import sys
python_version = sys.version_info
class MultiValuetests(unittest.TestCase):
def testMultiDS(self):
"""MultiValue: Multi-valued data elements can be created........"""
multival = MultiValue(DS, ['11.1', '22.2', '33.3'])
for val in multival:
self.assertTrue(isinstance(val, (DSfloat, DSdecimal)),
"Multi-value DS item not converted to DS")
def testLimits(self):
"""MultiValue: Raise error if any item outside DICOM limits...."""
original_flag = config.enforce_valid_values
config.enforce_valid_values = True
self.assertRaises(OverflowError, MultiValue, IS, [1, -2 ** 31 - 1]) # Overflow error not raised for IS out of DICOM valid range
config.enforce_valid_values = original_flag
def testAppend(self):
"""MultiValue: Append of item converts it to required type..."""
multival = MultiValue(IS, [1, 5, 10])
multival.append('5')
self.assertTrue(isinstance(multival[-1], IS))
self.assertEqual(multival[-1], 5, "Item set by append is not correct value")
def testSetIndex(self):
"""MultiValue: Setting list item converts it to required type"""
multival = MultiValue(IS, [1, 5, 10])
multival[1] = '7'
self.assertTrue(isinstance(multival[1], IS))
self.assertEqual(multival[1], 7, "Item set by index is not correct value")
def testExtend(self):
"""MultiValue: Extending a list converts all to required type"""
multival = MultiValue(IS, [1, 5, 10])
multival.extend(['7', 42])
self.assertTrue(isinstance(multival[-2], IS))
self.assertTrue(isinstance(multival[-1], IS))
self.assertEqual(multival[-2], 7, "Item set by extend not correct value")
def testSlice(self):
"""MultiValue: Setting slice converts items to required type."""
multival = MultiValue(IS, range(7))
multival[2:7:2] = [4, 16, 36]
for val in multival:
self.assertTrue(isinstance(val, IS), "Slice IS value not correct type")
self.assertEqual(multival[4], 16, "Set by slice failed for item 4 of list")
if __name__ == "__main__":
unittest.main()
|
Python
| 0.000004
|
@@ -417,12 +417,8 @@
, IS
-, DA
%0Afro
@@ -444,53 +444,8 @@
nfig
- # don't import datetime_conversion directly
%0A%0Aim
|
2fdb9d17b2c033370d663b4e72d71c1c7e105a84
|
fix test for python 3
|
tests/test_pipeline.py
|
tests/test_pipeline.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
from nose.tools import eq_
from redis_shard.shard import RedisShardAPI
from redis_shard._compat import b
from .config import settings
class TestShard(unittest.TestCase):
def setUp(self):
self.client = RedisShardAPI(**settings)
self.clear_db()
def tearDown(self):
pass
def clear_db(self):
self.client.delete('testset')
self.client.delete('testzset')
self.client.delete('testlist')
def test_pipeline(self):
self.client.set('test', '1')
pipe = self.client.pipeline()
pipe.set('test', '2')
pipe.zadd('testzset', 'first', 1)
pipe.zincrby('testzset', 'first')
pipe.zadd('testzset', 'second', 2)
pipe.execute()
pipe.reset()
eq_(self.client.get('test'), '2')
eq_(self.client.zscore('testzset', 'first'), 2.0)
eq_(self.client.zscore('testzset', 'second'), 2.0)
with self.client.pipeline() as pipe:
pipe.set('test', '3')
pipe.zadd('testzset', 'first', 4)
pipe.zincrby('testzset', 'first')
pipe.zadd('testzset', 'second', 5)
pipe.execute()
eq_(self.client.get('test'), '3')
eq_(self.client.zscore('testzset', 'first'), 5.0)
eq_(self.client.zscore('testzset', 'second'), 5.0)
def test_pipeline_script(self):
pipe = self.client.pipeline()
for i in range(100):
pipe.eval("""
redis.call('set', KEYS[1], ARGV[1])
""", 1, 'testx%d' % i, i)
pipe.execute()
for i in range(100):
eq_(self.client.get('testx%d' % i), b('%d' % i))
|
Python
| 0.000105
|
@@ -839,16 +839,17 @@
test'),
+b
'2')%0A
@@ -1245,16 +1245,17 @@
test'),
+b
'3')%0A
|
ab4c02c1f5f5cf3ba46b4924c48693d028dc23db
|
Split pipeline tests
|
tests/test_pipeline.py
|
tests/test_pipeline.py
|
from valohai_yaml.objs import Config, DeploymentNode
def test_pipeline(pipeline_config: Config):
lr = pipeline_config.lint()
assert lr.is_valid()
assert any(
(
edge.source_node == "batch1"
and edge.source_type == "parameter"
and edge.source_key == "aspect-ratio"
and edge.target_node == "batch2"
and edge.target_type == "parameter"
and edge.target_key == "aspect-ratio"
)
for edge in pipeline_config.pipelines["My little pipeline"].edges
)
assert any(
(
edge.source_node == "train"
and edge.source_type == "output"
and edge.source_key == "model"
and edge.target_node == "deploy-predictor"
and edge.target_type == "file"
and edge.target_key == "predict-digit.model"
)
for edge in pipeline_config.pipelines["My deployment pipeline"].edges
)
dp = pipeline_config.pipelines["My deployment pipeline"]
dn_predict = dp.get_node_by(name='deploy-predictor')
assert isinstance(dn_predict, DeploymentNode)
assert "predictor-staging" in dn_predict.aliases
assert "predict-digit" in dn_predict.endpoints
dn_no_preset = dp.get_node_by(name='deploy-no-presets')
assert isinstance(dn_no_preset, DeploymentNode)
assert dn_no_preset.aliases == []
assert dn_no_preset.endpoints == []
assert any(
(edge.source_type == "output" and edge.source_key == "model.pb")
for edge in pipeline_config.pipelines["My medium pipeline"].edges
)
|
Python
| 0.000001
|
@@ -65,16 +65,22 @@
pipeline
+_valid
(pipelin
@@ -106,12 +106,14 @@
-lr =
+assert
pip
@@ -135,33 +135,72 @@
nt()
-%0A assert lr.is_valid()
+.is_valid()%0A%0A%0Adef test_little_pipeline(pipeline_config: Config):
%0A
@@ -595,16 +595,133 @@
%0A )%0A%0A
+%0Adef test_deployment_pipeline(pipeline_config: Config):%0A dp = pipeline_config.pipelines%5B%22My deployment pipeline%22%5D%0A
asse
@@ -1055,132 +1055,22 @@
in
-pipeline_config.pipelines%5B%22My deployment pipeline%22%5D.edges%0A )%0A dp = pipeline_config.pipelines%5B%22My deployment pipeline%22%5D
+dp.edges%0A )
%0A%0A
@@ -1470,16 +1470,68 @@
== %5B%5D%0A%0A
+%0Adef test_medium_pipeline(pipeline_config: Config):%0A
asse
|
9e57e467ab508cd0e5fab2862a2c9b651eaa7838
|
rename tag basisofRecords to BASISOFRECORDS
|
bin/aggregate_metrics.py
|
bin/aggregate_metrics.py
|
import sys
import os
import json
SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + '/src'
sys.path.append(SRC_DIR)
from aggregator import ReportAggregator, CartoDBWriter
def check_arguments():
if len(sys.argv) != 3:
print 'usage: aggregate_metrics.py <data directory> <settings.json>\n'
print ' data directory: this should point to a directory'
print ' containing chunks of metric data.'
print ' metric data should be in json and'
print ' ordered by dataset key.\n'
print ' settings.json: contains the `api_key` that will'
print ' be used to contact the cartodb API.'
sys.exit(-1)
data_dir, settings_file = sys.argv[1:]
return [data_dir, settings_file]
def aggregate_metrics(data_dir):
agg = ReportAggregator()
data = agg.aggregate(data_dir)
return data
def write_data(data, settings_file):
settings = json.load(open(settings_file))
writer = CartoDBWriter()
basis_of_records_metrics = ['PRESERVED_SPECIMEN', 'FOSSIL_SPECIMEN', 'LIVING_SPECIMEN', 'MATERIAL_SAMPLE', 'OBSERVATION', 'HUMAN_OBSERVATION', 'MACHINE_OBSERVATION', 'LITERATURE', 'UNKNOWN']
for dataset in data:
row = [dataset]
basis_of_records = data[dataset]['basisofRecords']
for metric_name in basis_of_records_metrics:
if metric_name in basis_of_records:
row.append(basis_of_records[metric_name])
else:
row.append(0)
nr_of_records = data[dataset]['NUMBER_OF_RECORDS']
row.append(nr_of_records)
writer.write_basis_of_record(row, settings['api_key'])
def main():
data_dir, settings_file = check_arguments()
data = aggregate_metrics(data_dir)
write_data(data, settings_file)
main()
|
Python
| 0.000001
|
@@ -1352,22 +1352,22 @@
t%5D%5B'
-basisofRecords
+BASISOFRECORDS
'%5D%0A
|
77c0ad615c7f0270c0425866f06edde8856892b9
|
Add Augur Unit Tests For parseIntelXML()
|
build/tests/test_augur.py
|
build/tests/test_augur.py
|
#!/usr/bin/python3
"""
test_augur.py
APP: Inquisition
DESC: Unit test for Augur library
CREATION_DATE: 2017-11-25
"""
# MODULES
# | Native
import configparser
import unittest
# | Third-Party
from bs4 import BeautifulSoup as BSoup
# | Custom
from lib.destiny.Augur import Augur
# METADATA
__author__ = 'Joshua Carlson-Purcell'
__copyright__ = 'Copyright 2017, CarlsoNet'
__license__ = 'MIT'
__version__ = '1.0.0-alpha'
__maintainer__ = 'Joshua Carlson-Purcell'
__email__ = 'jcarlson@carlso.net'
__status__ = 'Development'
class AugurTestCase(unittest.TestCase):
def setUp(self):
# generate config
cfg = configparser.ConfigParser()
cfg.read('build/tests/unit_tests_GOOD.cfg')
self.augur = Augur(cfg=cfg)
def test_getXMLSrcData_validURL(self):
responseData = self.augur.getXMLSrcData(url='https://isc.sans.edu/api/openiocsources/')
self.assertIsInstance(responseData, BSoup)
def test_getXMLSrcData_invalidURL(self):
responseData = self.augur.getXMLSrcData(url='https://invalid.url/')
self.assertEqual(responseData, {})
def test_getXMLSrcData_blankURL(self):
try:
responseData = self.augur.getXMLSrcData(url='')
except ValueError:
self.assertTrue(True)
def test_mapIOCItemNameToFieldName(self):
fieldName = self.augur.mapIOCItemNameToFieldName(iocItemName='remoteIP')
self.assertEqual(fieldName, 'src_ip')
def test_mapIOCItemNameToFieldName_blankFieldName(self):
try:
fieldName = self.augur.mapIOCItemNameToFieldName(iocItemName='')
except ValueError:
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
@@ -1666,16 +1666,252 @@
(True)%0A%0A
+ def test_parseIntelXML(self):%0A responseData = self.augur.getXMLSrcData(url='https://isc.sans.edu/api/openiocsources/')%0A parsedData = self.augur.parseIntelXML(responseData)%0A%0A self.assertNotEqual(parsedData, %7B%7D)%0A%0A
if __nam
|
5c2141610c40c1879400a88c82ac055081bb7451
|
Let the final table have any number of periods per day
|
main/views.py
|
main/views.py
|
import sys
from string import ascii_uppercase
import itertools
from typing import Dict, List
from openpyxl import Workbook
from terminaltables import AsciiTable
from main import models
from main import solver
Timetable = Dict[models.Subject, List[int]]
def timetable_to_workbook(timetable: Timetable, sheet_name: str = 'Timetable', periods_per_day: int = 4):
wb = Workbook()
ws = wb.active
ws.title = sheet_name
for subject in timetable:
for period in timetable[subject]:
# TODO: error when some period is out of bounds (e.g. period 200 when there are just 4 periods / day)
# since everything is zero-indexed, floor division by the number of periods in a day gives us the day to
# which the period belongs, # i.e. for 4 periods in a day 0-3 -> day 0 (Monday) , 4-7 -> day 1 (Tuesday)...
# and the modulo by four gives the period in that day
day = period // 4
period_in_day = period % 4
cell = ws['{}{}'.format(ascii_uppercase[day], period_in_day + 1)]
cell.value = (cell.value or '') + subject.name + '\n'
return wb
def timetable_dict_to_ascii_table(timetable: Timetable) -> str:
flat_timetable = list(itertools.repeat('', 20))
for subject, periods in timetable.items():
for period in periods:
flat_timetable[period] += (subject.name + '\n')
square_timetable = list(zip(*[flat_timetable[i:i + 4] for i in range(0, len(flat_timetable), 4)]))
return AsciiTable(
[['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday']] + square_timetable
).table
def main():
ds = models.Datastore(sys.argv[1])
students = list(ds.get_students().values())
tt = solver.possible_timetables(students, 20)
timetable_to_workbook(next(tt)).save('out.xlsx')
if __name__ == '__main__':
main()
|
Python
| 0.997706
|
@@ -940,17 +940,31 @@
riod //
-4
+periods_per_day
%0A
@@ -993,17 +993,31 @@
eriod %25
-4
+periods_per_day
%0A%0A
|
b6572ec32295365862947845a8c916eae428700f
|
Clean up temporary files on 'nt'.
|
makemodule.py
|
makemodule.py
|
#!/bin/env python
"""
makemodule
Module generation tool
Copyright (c) 2015 Sam Saint-Pettersen.
Released under the MIT/X11 License.
"""
import sys
import os
import xml.dom.minidom as xml
class makemodule:
def __init__(self, args):
if len(args) == 1:
self.displayUsage()
else:
self.writeModuleXML()
def displayUsage(self):
print(__doc__)
print('Usage: makemodule [module..module]\n')
sys.exit(1)
def writeModuleXML(self):
names = []
enabled = []
redirect = ''
cleanup = False
if os.name == 'nt':
redirect = ' > a.tmp 2>&1'
cleanup = True
else:
redirect = ' >> /dev/null 2>&1'
for arg in sys.argv[1:]:
names.append(arg)
exitCode = int(os.system(arg + redirect))
if exitCode == 32512:
enabled.append(False)
else:
enabled.append(True)
doc = xml.Document()
c = doc.createElement('configuration')
doc.appendChild(c)
i = 0
for name in names:
m = doc.createElement('module')
c.appendChild(m)
n = doc.createElement('name')
m.appendChild(n)
n_is = doc.createTextNode(name)
n.appendChild(n_is)
e = doc.createElement('enabled')
m.appendChild(e)
e_is = doc.createTextNode(str(enabled[i]))
e.appendChild(e_is)
i = i + 1
print('Writing modules.xml...')
f = open('modules.xml', 'w')
f.write(doc.toprettyxml())
f.close()
makemodule(sys.argv)
|
Python
| 0
|
@@ -1661,16 +1661,64 @@
lose()%0A%0A
+ if os.name == 'nt': os.remove('a.tmp')%0A%0A
makemodu
|
9aae92fb0e22c97f559b6e3ee895d9959e010e05
|
Add missing import
|
tests_tf/test_model.py
|
tests_tf/test_model.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from cleverhans.model import Model
class TestModelClass(unittest.TestCase):
def test_get_layer(self):
# Define empty model
model = Model()
x = []
# Exception is thrown when `get_layer` not implemented
with self.assertRaises(Exception) as context:
model.get_layer(x, layer='')
self.assertTrue(context.exception)
def test_get_logits(self):
# Define empty model
model = Model()
x = []
# Exception is thrown when `get_logits` not implemented
with self.assertRaises(Exception) as context:
model.get_logits(x)
self.assertTrue(context.exception)
def test_get_probs(self):
# Define empty model
model = Model()
x = []
# Exception is thrown when `get_probs` not implemented
with self.assertRaises(Exception) as context:
model.get_probs(x)
self.assertTrue(context.exception)
def test_get_layer_names(self):
# Define empty model
model = Model()
# Exception is thrown when `get_layer_names` not implemented
with self.assertRaises(Exception) as context:
model.get_layer_names()
self.assertTrue(context.exception)
def test_fprop(self):
# Define empty model
model = Model()
x = []
# Exception is thrown when `fprop` not implemented
with self.assertRaises(Exception) as context:
model.fprop(x)
self.assertTrue(context.exception)
class TestCallableModelWrapperInitArguments(unittest.TestCase):
def test_output_layer(self):
def model():
return True
# The following two calls should not raise Exceptions
wrap = CallableModelWrapper(model, 'probs')
wrap = CallableModelWrapper(model, 'logits')
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000466
|
@@ -194,16 +194,38 @@
rt Model
+, CallableModelWrapper
%0A%0A%0Aclass
|
cd4e412b4e92ba10eaadacd55fb39b9f2d5f1a70
|
Check the permissions of the clone dest directory
|
king_phisher/client/dialogs/clone_page.py
|
king_phisher/client/dialogs/clone_page.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/dialogs/page_clone.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from king_phisher.client import gui_utilities
from king_phisher.client import web_cloner
from gi.repository import Gtk
__all__ = ['ClonePageDialog']
class ClonePageDialog(gui_utilities.UtilityGladeGObject):
"""
Display a dialog for cloning a web page. The logic of the cloning operation
is provided by the :py:mod:`.web_cloner` module.
"""
top_gobject = 'dialog'
def __init__(self, *args, **kwargs):
super(ClonePageDialog, self).__init__(*args, **kwargs)
self.resources = Gtk.ListStore(str, str, str)
treeview = self.gtk_builder_get('treeview_resources')
treeview.set_model(self.resources)
treeview.get_selection().set_mode(Gtk.SelectionMode.NONE)
gui_utilities.gtk_treeview_set_column_names(treeview, ('Resource Path', 'MIME Type', 'Size'))
self.resources.set_sort_func(2, gui_utilities.gtk_treesortable_sort_func_numeric, 2)
self.button_cancel = self.gtk_builder_get('button_cancel')
self.entry_directory = self.gtk_builder_get('entry_directory')
self.entry_target = self.gtk_builder_get('entry_target')
self.label_status = self.gtk_builder_get('label_status')
self.spinner_status = self.gtk_builder_get('spinner_status')
def set_status(self, status_text, spinner_active=False):
self.label_status.set_text("Status: {0}".format(status_text))
self.spinner_status.set_property('visible', spinner_active)
self.spinner_status.set_property('active', spinner_active)
def interact(self):
self.dialog.show_all()
self.set_status('Waiting')
while self.dialog.run() == Gtk.ResponseType.APPLY:
target_url = self.entry_target.get_text()
if not target_url:
gui_utilities.show_dialog_error('Missing Information', self.dialog, 'Please set the target URL.')
self.set_status('Missing Information')
continue
dest_dir = self.entry_directory.get_text()
if not dest_dir:
gui_utilities.show_dialog_error('Missing Information', self.dialog, 'Please set the destination directory.')
self.set_status('Missing Information')
continue
self.set_status('Cloning', spinner_active=True)
cloner = web_cloner.WebPageCloner(target_url, dest_dir)
signal_id = self.button_cancel.connect('clicked', lambda _: cloner.stop_cloning())
original_label = self.button_cancel.get_label()
self.button_cancel.set_label('Cancel')
cloner.wait()
self.button_cancel.set_label(original_label)
self.button_cancel.disconnect(signal_id)
if cloner.load_failed:
self.set_status('Failed')
gui_utilities.show_dialog_error('Operation Failed', self.dialog, 'The web page clone operation failed.')
continue
for resource, resource_details in cloner.cloned_resources.items():
if gui_utilities.gtk_list_store_search(self.resources, resource, column=0):
continue
self.resources.append([resource, resource_details.mime_type or 'N/A', "{0:,}".format(resource_details.size)])
self.set_status('Done')
gui_utilities.gtk_sync()
if len(self.resources) and gui_utilities.show_dialog_yes_no('Transfer Cloned Pages', self.dialog, 'Would you like to start the SFTP client\nto upload the cloned pages?'):
self.parent.start_sftp_client()
self.dialog.destroy()
def signal_multi_set_directory(self, _):
dialog = gui_utilities.UtilityFileChooser('Destination Directory', self.dialog)
response = dialog.run_quick_select_directory()
dialog.destroy()
if response:
self.entry_directory.set_text(response['target_path'])
|
Python
| 0
|
@@ -1587,16 +1587,27 @@
AGE.%0A#%0A%0A
+import os%0A%0A
from kin
@@ -3591,16 +3591,225 @@
ontinue%0A
+%09%09%09if not os.access(dest_dir, os.W_OK):%0A%09%09%09%09gui_utilities.show_dialog_error('Invalid Directory', self.dialog, 'Can not write to the specified directory.')%0A%09%09%09%09self.set_status('Invalid Directory')%0A%09%09%09%09continue%0A
%09%09%09self.
|
4646e7c682ba9a0291815a5d0de98674a9de3410
|
Fix RemoteCapture definition
|
src/pyshark/capture/remote_capture.py
|
src/pyshark/capture/remote_capture.py
|
from pyshark import LiveCapture
class RemoteCapture(LiveCapture):
"""
A capture which is performed on a remote machine which has an rpcapd service running.
"""
def __init__(self, remote_host, remote_interface, remote_port=2002, bpf_filter=None):
"""
Creates a new remote capture which will connect to a remote machine which is running rpcapd. Use the sniff() method
to get packets.
Note: The remote machine should have rpcapd running in null authentication mode (-n). Be warned that the traffic
is unencrypted!
:param remote_host: The remote host to capture on (IP or hostname). Should be running rpcapd.
:param remote_interface: The remote interface on the remote machine to capture on. Note that on windows it is
not the device display name but the true interface name (i.e. \Device\NPF_..).
:param remote_port: The remote port the rpcapd service is listening on
:param bpf_filter: A BPF (tcpdump) filter to apply on the cap before reading.
"""
interface = 'rpcap://%s:%d/%s' % (remote_host, remote_port, remote_interface)
super(RemoteCapture, self).__init__(interface, bpf_filter=bpf_filter)
|
Python
| 0.000001
|
@@ -858,16 +858,17 @@
.e.
+%5C
%5CDevice%5C
NPF_
@@ -863,16 +863,17 @@
%5CDevice%5C
+%5C
NPF_..).
|
1a9f0320b3a8aecc50cfee6335c3b6e8dc81c233
|
Make this tool less hacky.
|
tools/commit-impact.py
|
tools/commit-impact.py
|
#!/usr/bin/env python
#
# See the impact of a Futhark commit compared to the previous one we
# have benchmarking for.
import sys
import subprocess
from urllib.request import urlopen
from urllib.error import HTTPError
import json
def url_for(backend, system, commit):
return 'https://futhark-lang.org/benchmark-results/futhark-{}-{}-{}.json'.format(backend, system, commit)
def results_for_commit(backend, system, commit):
try:
url = url_for(backend, system, commit)
print('Fetching {}...'.format(url))
return json.loads(urlopen(url).read())
except HTTPError:
return None
def first_commit_with_results(backend, system, commits):
for commit in commits:
res = results_for_commit(backend, system, commit)
if res:
return commit, res
if __name__ == '__main__':
backend, system, commit = sys.argv[1:]
commits = subprocess.check_output(['git', 'rev-list', commit]).decode('utf-8').splitlines()
now = results_for_commit(backend, system, commit)
if not now:
print('No results found')
sys.exit(1)
then_commit, then = first_commit_with_results(backend, system, commits[1:])
print('Comparing {}'.format(commit))
print(' with {}'.format(then_commit))
# Hacky hacky...
m = __import__('cmp-bench-json')
m.compare(then, now)
|
Python
| 0
|
@@ -222,16 +222,42 @@
ort json
+%0Aimport tempfile%0Aimport os
%0A%0Adef ur
@@ -832,92 +832,43 @@
es%0A%0A
-if __name__ == '__main__':%0A backend, system, commit = sys.argv%5B1:%5D%0A%0A commits =
+def find_commits(start):%0A return
sub
@@ -908,21 +908,20 @@
-list',
-commi
+star
t%5D).deco
@@ -946,16 +946,88 @@
ines()%0A%0A
+if __name__ == '__main__':%0A backend, system, commit = sys.argv%5B1:4%5D%0A%0A
now
@@ -1144,16 +1144,141 @@
xit(1)%0A%0A
+ if len(sys.argv) == 5:%0A commits = find_commits(sys.argv%5B4%5D)%0A else:%0A commits = find_commits(commit)%5B1:%5D%0A%0A
then
@@ -1446,83 +1446,388 @@
-# Hacky hacky...%0A m = __import__('cmp-bench-json')%0A m.compare(then, now
+with tempfile.NamedTemporaryFile(prefix=commit, mode='w') as now_file:%0A with tempfile.NamedTemporaryFile(prefix=then_commit, mode='w') as then_file:%0A json.dump(now, now_file)%0A json.dump(then, then_file)%0A now_file.flush()%0A then_file.flush()%0A os.system('tools/cmp-bench-json.py %7B%7D %7B%7D'.format(then_file.name, now_file.name)
)%0A
|
0fa1e147fc7d2522a4352c0bbc60e4da67380257
|
add a missing statement
|
landlab/utils/tests/test_stream_length.py
|
landlab/utils/tests/test_stream_length.py
|
from landlab import RasterModelGrid, FieldError
from landlab.components import FlowAccumulator, FastscapeEroder, FlowDirectorSteepest
import numpy as np
from landlab.utils.stream_length import calculate_stream_length
from nose.tools import assert_equal, assert_true, assert_false, assert_raises
def test_no_flow_recievers():
"""Test that correct error is raised when no flow recievers are on the grid."""
# instantiate a model grid, do not run flow accumulation on it
mg = RasterModelGrid(30, 70)
# test that the stream length utility will fail because of a ValueError
assert_raises(FieldError, calculate_stream_length, mg)
def test_no_upstream_array():
"""Test that correct error is raised when no flow__upstream_node_order."""
# instantiate a model grid, do not run flow accumulation on it
mg = RasterModelGrid(30, 70)
z = mg.add_zeros('topographic__elevation', at='node')
fd = FlowDirectorSteepest(mg)
fd.run_one_step()
# test that the stream length utility will fail because of a ValueError
assert_raises(FieldError, calculate_stream_length, mg)
|
Python
| 1
|
@@ -853,65 +853,8 @@
70)%0A
- z = mg.add_zeros('topographic__elevation', at='node')
%0A
|
fedc14b2cba840d9ce4937252277c5643fea55c4
|
Revert "Skip pro directories when doing license check."
|
tools/licensescheck.py
|
tools/licensescheck.py
|
import os, sys, re
prunelist = ('hsqldb19b3',
'hsqldb',
'proj_gen',
'jni_md.h',
'jni.h',
'org_voltdb_jni_ExecutionEngine.h',
'org_voltdb_utils_DBBPool.h',
'org_voltdb_utils_DBBPool_DBBContainer.h',
'org_voltdb_utils_ThreadUtils.h',
'xml2',
'simplejson',
'projectfile',
'xml')
# pro directories to skip
prolist = ('org/voltdb/management')
def verifyLicense(f, content, approvedLicensesJavaC, approvedLicensesPython):
if f.endswith('.py'):
if not content.startswith("#"):
if content.lstrip().startswith("#"):
print "ERROR: \"%s\" contains whitespace before initial comment." % f
return 1
else:
print "ERROR: \"%s\" does not begin with a comment." % f
return 1
# skip hashbang
if content.startswith("#!"):
(ignore, content) = content.split("\n", 1)
content = content.lstrip()
# skip python coding magic
if content.startswith("# -*-"):
(ignore, content) = content.split("\n", 1)
content = content.lstrip()
# verify license
for license in approvedLicensesPython:
if content.startswith(license):
return 0
print "ERROR: \"%s\" does not start with an approved license." % f
else:
if not content.startswith("/*"):
if content.lstrip().startswith("/*"):
print "ERROR: \"%s\" contains whitespace before initial comment." % f
else:
print "ERROR: \"%s\" does not begin with a comment." % f
return 1
for license in approvedLicensesJavaC:
if content.startswith(license):
return 0
print "ERROR: \"%s\" does not start with an approved license." % f
return 1
def verifyTrailingWhitespace(f, content):
if re.search(r'[\t\f\v ]\n', content):
print("ERROR: \"%s\" contains trailing whitespace." % (f))
return 1
return 0
def verifyTabs(f, content):
num = content.count('\t')
if num > 0:
print("ERROR: \"%s\" contains %d tabs." % (f, num))
return 1
return 0
def verifySprintf(f, content):
num = content.count('sprintf')
if num > 0:
print("ERROR: \"%s\" contains %d calls to sprintf(). Use snprintf()." % (f, num))
return 1
return 0
def readFile(filename):
"read a file into a string"
FH=open(filename, 'r')
fileString = FH.read()
FH.close()
return fileString
def processFile(f, approvedLicensesJavaC, approvedLicensesPython):
for suffix in ('.java', '.cpp', '.cc', '.h', '.hpp', '.py'):
if f.endswith(suffix):
break
else:
return 0
content = readFile(f)
retval = verifyLicense(f, content, approvedLicensesJavaC, approvedLicensesPython)
if retval != 0:
return retval
retval = verifyTabs(f, content)
if retval != 0:
return retval
retval = verifyTrailingWhitespace(f, content)
if (retval != 0):
return retval
retval = verifySprintf(f, content)
if (retval != 0):
return retval
return 0
def processAllFiles(d, approvedLicensesJavaC, approvedLicensesPython):
# Skip files in pro
for p in prolist:
if d.endswith(p):
return 0
files = os.listdir(d)
errcount = 0
for f in [f for f in files if not f.startswith('.') and f not in prunelist]:
fullpath = os.path.join(d,f)
if os.path.isdir(fullpath):
errcount += processAllFiles(fullpath, approvedLicensesJavaC, approvedLicensesPython)
else:
errcount += processFile(fullpath, approvedLicensesJavaC, approvedLicensesPython)
return errcount
testLicenses = ['approved_licenses/mit_x11_hstore_and_voltdb.txt',
'approved_licenses/mit_x11_evanjones_and_voltdb.txt',
'approved_licenses/mit_x11_michaelmccanna_and_voltdb.txt',
'approved_licenses/mit_x11_voltdb.txt']
srcLicenses = ['approved_licenses/gpl3_hstore_and_voltdb.txt',
'approved_licenses/gpl3_evanjones_and_voltdb.txt',
'approved_licenses/gpl3_nanohttpd_and_voltdb.txt',
'approved_licenses/gpl3_base64_and_voltdb.txt',
'approved_licenses/gpl3_voltdb.txt']
testLicensesPy = ['approved_licenses/mit_x11_voltdb_python.txt']
srcLicensesPy = ['approved_licenses/gpl3_voltdb_python.txt']
errcount = 0
errcount += processAllFiles("../src",
tuple([readFile(f) for f in srcLicenses]),
tuple([readFile(f) for f in srcLicensesPy]))
errcount += processAllFiles("../tests",
tuple([readFile(f) for f in testLicenses]),
tuple([readFile(f) for f in testLicensesPy]))
if errcount == 0:
print "SUCCESS. Found 0 license text errors, 0 files containing tabs or trailing whitespace."
else:
print "FAILURE. Found %d license text or whitespace errors." % errcount
# run through any other source the caller wants checked
# assumes a single valid license in $repo/tools/approved_licenses/license.txt
# "${voltpro}" is the build.xml property - can be seen as a literal if the
# property is not set.
for arg in sys.argv[1:]:
if arg != "${voltpro}":
print "Checking additional repository: " + arg;
proLicenses = ["../" + arg + '/tools/approved_licenses/license.txt']
proLicensesPy = ["../" + arg + '/tools/approved_licenses/license_python.txt']
errcount += processAllFiles("../" + arg + "/src/",
tuple([readFile(f) for f in proLicenses]),
tuple([readFile(f) for f in proLicensesPy]))
errcount += processAllFiles("../" + arg + "/tests/",
tuple([readFile(f) for f in proLicenses]),
tuple([readFile(f) for f in proLicensesPy]))
if errcount == 0:
print "SUCCESS. Found 0 license text errors, 0 files containing tabs or trailing whitespace."
else:
print "FAILURE (%s). Found %d license text or whitespace errors." % arg, errcount
sys.exit(errcount)
|
Python
| 0
|
@@ -431,71 +431,8 @@
')%0A%0A
-# pro directories to skip%0Aprolist = ('org/voltdb/management')%0A%0A
def
@@ -3300,102 +3300,8 @@
n):%0A
- # Skip files in pro%0A for p in prolist:%0A if d.endswith(p):%0A return 0%0A%0A
|
3c7b421bf2069236a11292e677aff74ee53641dd
|
Use new Twitch.tv utility function
|
command.py
|
command.py
|
#!/usr/bin/env python3
# vim:fileencoding=utf-8:ts=8:et:sw=4:sts=4:tw=79
"""
command.py
Handle commands received on IRC.
Copyright (c) 2015 Twisted Pear <pear at twistedpear dot at>
See the file LICENSE for copying permission.
"""
import aiohttp
import asyncio
import bs4
import functools
import logging
PATREON_URL = "http://www.patreon.com/loadingreadyrun"
BROADCAST_URL = ("https://api.twitch.tv/kraken/channels/"
"loadingreadyrun/videos?limit=1&broadcasts=true")
class CommandHandler(object):
"""
The command handler interacts with an IRC client and dispatches commands.
It registers itself as a handler for PRIVMSG events.
"""
logger = logging.getLogger("command")
class rate_limit(object):
"""A decorator that suppresses method calls within a certain delay."""
last = 0.0
def __init__(self, *, delay=30, loop=None):
"""Initialize rate limiter with a default delay of 30."""
self.delay = delay
self.loop = loop or asyncio.get_event_loop()
def __call__(self, func):
@functools.wraps(func)
@asyncio.coroutine
def wrapper(*args, **kwargs):
now = self.loop.time()
if (now - self.last) > self.delay:
self.last = now
yield from func(*args, **kwargs)
return wrapper
def __init__(self, client, feed, *, prefix="!"):
"""Initialize the command handler and register for PRIVMSG events."""
self.logger.info("Creating CommandHandler instance.")
self.prefix = prefix
self.client = client
self.feed = feed
self.client.event_handler("PRIVMSG")(self.handle_privmsg)
@asyncio.coroutine
def handle_privmsg(self, nick, target, message):
"""
Handle a PRIVMSG event and dispatch any command to the relevant method.
"""
# ignore everything that's not a command with our prefix
if not message.startswith(self.prefix):
return
# command is always separated by a space
parts = message.split(" ", 1)
cmd = parts[0]
args = parts[1].strip() if len(parts) == 2 else None
self.logger.info("Got command {0} from {1}.".format(cmd, nick))
# is this a query? if so, send messages to nick instead
if target == self.client.nickname:
target = nick
# check if we can handle that command
cmd_name = "handle_command_{0}".format(cmd[1:])
handle_command = getattr(self, cmd_name, None)
if handle_command and callable(handle_command):
yield from handle_command(target, nick, args)
@rate_limit()
@asyncio.coroutine
def handle_command_patreon(self, target, nick, args):
"""
Handle !patreon command.
Post the number of patrons and the total earnings per month.
"""
patreon_req = yield from aiohttp.request("get", PATREON_URL)
patreon_body = yield from patreon_req.text()
patreon_soup = bs4.BeautifulSoup(patreon_body)
tag_patrons = patreon_soup.find("div", id="totalPatrons")
nof_patrons = tag_patrons.string if tag_patrons else "N/A"
tag_earnings = patreon_soup.find("span", id="totalEarnings")
total_earnings = tag_earnings.string if tag_earnings else "N/A"
patreon_msg = "{0} patrons for a total of ${1} per month. {2}".format(
nof_patrons, total_earnings, PATREON_URL)
yield from self.client.privmsg(target, patreon_msg)
@rate_limit()
@asyncio.coroutine
def handle_command_latest(self, target, nick, args):
"""
Handle !latest [video|podcast|broadcast] command.
Post the most recent RSS feed item or Twitch.tv broadcast.
"""
feed = "video"
if args and args in ["video", "podcast", "broadcast"]:
feed = args
# broadcasts are updated here
if feed == "broadcast":
broadcast_req = yield from aiohttp.request(
"get", BROADCAST_URL,
headers={"Accept": "application/vnd.twitchtv.v3+json"})
broadcast = yield from broadcast_req.json()
video = broadcast["videos"][0]
broadcast_msg = "Latest Broadcast: {0} ({1}) [{2}]".format(
video["title"], video["url"], video["recorded_at"])
yield from self.client.privmsg(target, broadcast_msg)
else:
# start a manual update
yield from self.feed.update(feed)
# let the feed parser announce it
yield from self.feed.announce(feed, target=target)
|
Python
| 0
|
@@ -300,16 +300,30 @@
logging
+%0Aimport twitch
%0A%0APATREO
@@ -375,133 +375,8 @@
un%22%0A
-BROADCAST_URL = (%22https://api.twitch.tv/kraken/channels/%22%0A %22loadingreadyrun/videos?limit=1&broadcasts=true%22)%0A
%0A%0Acl
@@ -3917,20 +3917,16 @@
roadcast
-_req
= yield
@@ -3935,189 +3935,50 @@
rom
-aiohttp.request(%0A %22get%22, BROADCAST_URL,%0A headers=%7B%22Accept%22: %22application/vnd.twitchtv.v3+json%22%7D)%0A broadcast = yield from broadcast_req.json(
+twitch.get_broadcasts(%22loadingreadyrun%22, 1
)%0A
@@ -3995,16 +3995,21 @@
video =
+next(
broadcas
@@ -4013,21 +4013,15 @@
cast
-%5B%22videos%22%5D%5B0%5D
+, None)
%0A%0A
@@ -4093,75 +4093,14 @@
mat(
-%0A video%5B%22title%22%5D, video%5B%22url%22%5D, video%5B%22recorded_at%22%5D
+*video
)%0A%0A
|
85ee5f5e6d7a5937b67c9d11ae127709749f7490
|
Bump to version 0.4.1
|
cmsplugin_cascade/__init__.py
|
cmsplugin_cascade/__init__.py
|
__version__ = "0.4.0"
|
Python
| 0
|
@@ -12,11 +12,11 @@
= %220.4.
-0
+1
%22%0A
|
59b8ae5f17e556c09ef8592723f9c684843c7dcc
|
update function and comment
|
code/utils/outlierfunction.py
|
code/utils/outlierfunction.py
|
# find outliers based on DVARS and FD
def outlier(data, bound):
'''
Input:
data: array of values
bound: threshold for outliers
Output:
indices of outliers
'''
outlier = []
# set outlier values to 0
for i in data:
if i <= bound:
outlier.append(0)
else:
outlier.append(i)
# find outlier indices
outlier_indices = np.nonzero(outlier)
return outlier_indices
|
Python
| 0
|
@@ -226,16 +226,19 @@
# set
+non
outlier
@@ -248,16 +248,37 @@
ues to 0
+, outliers to nonzero
%0A for
|
b0f7e70e29783de6980006be92bc105287b3b5c3
|
Remove dependency on not-yet-added [] library Change on 2014/01/24 by mgainer <mgainer@google.com> ------------- Created by MOE: http://code.google.com/p/moe-java MOE_MIGRATED_REVID=60226626
|
coursebuilder/main.py
|
coursebuilder/main.py
|
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Course Builder web application entry point."""
__author__ = 'Pavel Simakov (psimakov@google.com)'
import os
# The following import is needed in order to add third-party libraries.
import appengine_config # pylint: disable-msg=unused-import
from common import tags
from controllers import sites
from models import custom_modules
import modules.activity_tag.activity_tag
import modules.admin.admin
import modules.announcements.announcements
import modules.assessment_tags.questions
import modules.course_explorer.course_explorer
import modules.courses.courses
import modules.dashboard.dashboard
import modules.mapreduce.mapreduce_module
import modules.oauth2.oauth2
import modules.oeditor.oeditor
import modules.review.review
import modules.search.search
import modules.upload.upload
import webapp2
# use this flag to control debug only features
debug = not appengine_config.PRODUCTION_MODE
# init and enable most known modules
modules.activity_tag.activity_tag.register_module().enable()
modules.admin.admin.register_module().enable()
modules.announcements.announcements.register_module().enable()
modules.assessment_tags.questions.register_module().enable()
modules.course_explorer.course_explorer.register_module().enable()
modules.courses.courses.register_module().enable()
modules.dashboard.dashboard.register_module().enable()
modules.mapreduce.mapreduce_module.register_module().enable()
modules.oeditor.oeditor.register_module().enable()
modules.review.review.register_module().enable()
modules.search.search.register_module().enable()
modules.upload.upload.register_module().enable()
# register modules that are not enabled by default.
modules.oauth2.oauth2.register_module()
# compute all possible routes
global_routes, namespaced_routes = custom_modules.Registry.get_all_routes()
# routes available at '/%namespace%/' context paths
sites.ApplicationRequestHandler.bind(namespaced_routes)
app_routes = [(r'(.*)', sites.ApplicationRequestHandler)]
# enable Appstats handlers if requested
appstats_routes = []
if appengine_config.gcb_appstats_enabled():
# pylint: disable-msg=g-import-not-at-top
import google.appengine.ext.appstats.ui as appstats_ui
# pylint: enable-msg=g-import-not-at-top
# add all Appstats URL's to /admin/stats basepath
for path, handler in appstats_ui.URLMAP:
assert '.*' == path[:2]
appstats_routes.append(('/admin/stats/%s' % path[3:], handler))
# tag extension resource routes
extensions_routes = [(
'/extensions/tags/.*/resources/.*', tags.ResourcesHandler)]
# i18n configuration for jinja2
webapp2_i18n_config = {'translations_path': os.path.join(
appengine_config.BUNDLE_ROOT, 'modules/i18n/resources/locale')}
# init application
app = webapp2.WSGIApplication(
global_routes + extensions_routes + appstats_routes + app_routes,
config={'webapp2_extras.i18n': webapp2_i18n_config},
debug=debug)
|
Python
| 0
|
@@ -1196,50 +1196,8 @@
ard%0A
-import modules.mapreduce.mapreduce_module%0A
impo
@@ -1894,70 +1894,8 @@
e()%0A
-modules.mapreduce.mapreduce_module.register_module().enable()%0A
modu
|
81286b8912f2061a8a7e98d2ac87379e5237cb20
|
Fix chdir argument to be 'path'
|
lib/ansible/modules/extras/system/make.py
|
lib/ansible/modules/extras/system/make.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Linus Unnebäck <linus@folkdatorn.se>
#
# This file is part of Ansible
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: make
short_description: Run targets in a Makefile
requirements: [ make ]
version_added: "2.1"
author: Linus Unnebäck (@LinusU) <linus@folkdatorn.se>
description:
- Run targets in a Makefile.
options:
target:
description:
- The target to run
required: false
default: none
params:
description:
- Any extra parameters to pass to make
required: false
default: none
chdir:
description:
- cd into this directory before running make
required: true
'''
EXAMPLES = '''
# Build the default target
- make: chdir=/home/ubuntu/cool-project
# Run `install` target as root
- make: chdir=/home/ubuntu/cool-project target=install
become: yes
# Pass in extra arguments to build
- make:
chdir: /home/ubuntu/cool-project
target: all
params:
NUM_THREADS: 4
BACKEND: lapack
'''
# TODO: Disabled the RETURN as it was breaking docs building. Someone needs to
# fix this
RETURN = '''# '''
def run_command(command, module, check_rc=True):
"""
Run a command using the module, return
the result code and std{err,out} content.
:param command: list of command arguments
:param module: Ansible make module instance
:return: return code, stdout content, stderr content
"""
rc, out, err = module.run_command(command, check_rc=check_rc, cwd=module.params['chdir'])
return rc, sanitize_output(out), sanitize_output(err)
def sanitize_output(output):
"""
Sanitize the output string before we
pass it to module.fail_json. Defaults
the string to empty if it is None, else
strips trailing newlines.
:param output: output to sanitize
:return: sanitized output
"""
if output is None:
return b('')
else:
return output.rstrip(b("\r\n"))
def main():
module = AnsibleModule(
supports_check_mode=True,
argument_spec=dict(
target=dict(required=False, default=None, type='str'),
params=dict(required=False, default=None, type='dict'),
chdir=dict(required=True, default=None, type='str'),
),
)
# Build up the invocation of `make` we are going to use
make_path = module.get_bin_path('make', True)
make_target = module.params['target']
if module.params['params'] is not None:
make_parameters = [k + '=' + str(v) for k, v in module.params['params'].iteritems()]
else:
make_parameters = []
base_command = [make_path, make_target]
base_command.extend(make_parameters)
# Check if the target is already up to date
rc, out, err = run_command(base_command + ['--question'], module, check_rc=False)
if module.check_mode:
# If we've been asked to do a dry run, we only need
# to report whether or not the target is up to date
changed = (rc != 0)
else:
if rc == 0:
# The target is up to date, so we don't have to
# do anything
changed = False
else:
# The target isn't upd to date, so we need to run it
rc, out, err = run_command(base_command, module)
changed = True
# We don't report the return code, as if this module failed
# we would be calling fail_json from run_command, so even if
# we had a non-zero return code, we did not fail. However, if
# we report a non-zero return code here, we will be marked as
# failed regardless of what we signal using the failed= kwarg.
module.exit_json(
changed=changed,
failed=False,
stdout=out,
stderr=err,
target=module.params['target'],
params=module.params['params'],
chdir=module.params['chdir']
)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
|
Python
| 0.013779
|
@@ -2856,35 +2856,36 @@
ult=None, type='
-str
+path
'),%0A ),%0A
|
270c8ca68357f92999474fbf110fed7b01cdfdf2
|
Use proper way to access package resources.
|
cqlengine/__init__.py
|
cqlengine/__init__.py
|
import os
from cqlengine.columns import *
from cqlengine.functions import *
from cqlengine.models import Model
from cqlengine.query import BatchQuery
__cqlengine_version_path__ = os.path.realpath(__file__ + '/../VERSION')
__version__ = open(__cqlengine_version_path__, 'r').readline().strip()
# compaction
SizeTieredCompactionStrategy = "SizeTieredCompactionStrategy"
LeveledCompactionStrategy = "LeveledCompactionStrategy"
ANY = "ANY"
ONE = "ONE"
TWO = "TWO"
THREE = "THREE"
QUORUM = "QUORUM"
LOCAL_QUORUM = "LOCAL_QUORUM"
EACH_QUORUM = "EACH_QUORUM"
ALL = "ALL"
|
Python
| 0
|
@@ -2,16 +2,37 @@
mport os
+%0Aimport pkg_resources
%0A%0Afrom c
@@ -166,16 +166,17 @@
hQuery%0A%0A
+%0A
__cqleng
@@ -200,41 +200,115 @@
_ =
-os.path.realpath(__file__ + '/../
+pkg_resources.resource_filename('cqlengine',%0A '
VERS
|
5584ec8c6aa8e6567b3ddd286c1c7305fad070a3
|
fix init
|
cryptotik/__init__.py
|
cryptotik/__init__.py
|
from cryptotik.poloniex import Poloniex
from cryptotik.bittrex import Bittrex
from cryptotik.btce import Btce
from cryptotik.therock import TheRock
from cryptotik.livecoin import Livecoin
<<<<<<< HEAD
from cryptotik.okcoin import OKcoin
=======
from cryptotik.hitbtc import Hitbtc
>>>>>>> 7e948ea7ab42a9ad57d9ec1259539995ff34fb34
|
Python
| 0.024288
|
@@ -186,21 +186,8 @@
oin%0A
-%3C%3C%3C%3C%3C%3C%3C HEAD%0A
from
@@ -222,16 +222,8 @@
oin%0A
-=======%0A
from
@@ -258,53 +258,4 @@
btc%0A
-%3E%3E%3E%3E%3E%3E%3E 7e948ea7ab42a9ad57d9ec1259539995ff34fb34%0A
|
35cc2bce4e5fb62083ec1a44bda85c2da064d119
|
Remove debug print statements
|
cs251tk/specs/load.py
|
cs251tk/specs/load.py
|
from logging import warning
from glob import iglob
import json
import os
import shutil
import sys
from .cache import cache_specs
from .dirs import get_specs_dir
def load_all_specs(*, basedir=get_specs_dir()):
os.makedirs(basedir, exist_ok=True)
# the repo has a /specs folder
basedir = os.path.join(basedir, 'specs')
cache_specs(basedir)
spec_files = iglob(os.path.join(basedir, '_cache', '*.json'))
# load_spec returns a (name, spec) tuple, so we just let the dict() constructor
# turn that into the {name: spec} pairs of a dictionary for us
return dict([load_spec(filename, basedir) for filename in spec_files])
def load_some_specs(idents, *, basedir=get_specs_dir()):
# the repo has a /specs folder
basedir = os.path.join(basedir, 'specs')
cache_specs(basedir)
wanted_spec_files = [os.path.join(basedir, '_cache', '{}.json'.format(ident)) for ident in idents]
all_spec_files = iglob(os.path.join(basedir, '_cache', '*.json'))
loadable_spec_files = set(all_spec_files).intersection(wanted_spec_files)
print(loadable_spec_files)
# load_spec returns a (name, spec) tuple, so we just let the dict() constructor
# turn that into the {name: spec} pairs of a dictionary for us
return dict([load_spec(filename) for filename in loadable_spec_files])
def load_spec(filename, basedir):
with open(filename, 'r', encoding='utf-8') as specfile:
loaded_spec = json.load(specfile)
name = os.path.splitext(os.path.basename(filename))[0]
assignment = loaded_spec['assignment']
if name != assignment:
warning('assignment "{}" does not match the filename {}'.format(assignment, filename))
# warning("Re-caching specs\n")
# print(file=sys.stderr)
recache = input("Re-cache specs? (Y/N)")
if recache == "Y" or recache == "y":
shutil.rmtree(os.path.join(basedir, '_cache'))
cache_specs(basedir)
return assignment, loaded_spec
|
Python
| 0.000003
|
@@ -83,19 +83,8 @@
util
-%0Aimport sys
%0A%0Afr
@@ -1552,16 +1552,77 @@
ment'%5D%0A%0A
+ # Ask if user wants to re-cache specs to fix discrepancy%0A
if n
@@ -1739,81 +1739,8 @@
e))%0A
- # warning(%22Re-caching specs%5Cn%22)%0A # print(file=sys.stderr)%0A
|
096d3c44a60c83820410a85cd6a56f20b13b9ccd
|
更新 API Infor, 使用新格式改寫 users_total_count API 的回應
|
commonrepo/infor_api/views.py
|
commonrepo/infor_api/views.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from rest_framework import permissions
from rest_framework import renderers
from rest_framework import status
from rest_framework import viewsets
from rest_framework.decorators import api_view, detail_route
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.parsers import JSONParser
from commonrepo.elos.models import ELO
from commonrepo.users.models import User as User
from .permissions import IsOwnerOrReadOnly
# ELOs
@api_view(['GET'])
def elos_total_count(request):
if request.method == 'GET':
return Response({"code": 202,
"status": "ok",
"result": {
"total_elos": ELO.objects.all().count()
}
},
status=status.HTTP_202_ACCEPTED)
else:
return Response({"code": 400,
"status": "error"
},
status=status.HTTP_400_BAD_REQUEST)
# Users
@api_view(['GET'])
def users_total_count(request):
if request.method == 'GET':
return Response({"total_users": User.objects.all().count() }, status=status.HTTP_202_ACCEPTED)
else:
return Response(status=status.HTTP_400_BAD_REQUEST)
|
Python
| 0
|
@@ -1218,20 +1218,16 @@
REQUEST)
-
%0A%0A# User
@@ -1341,119 +1341,429 @@
e(%7B%22
-total_users%22: User.objects.all().count() %7D, status=status.HTTP_202_ACCEPTED)%0A else:%0A return Response(
+code%22: 202, %0A %22status%22: %22ok%22,%0A %22result%22: %7B%0A %22total_users%22: User.objects.all().count()%0A %7D%0A %7D,%0A status=status.HTTP_202_ACCEPTED)%0A else:%0A return Response(%7B%22code%22: 400,%0A %22status%22: %22error%22%0A %7D,%0A
stat
@@ -1793,10 +1793,8 @@
REQUEST)
-
|
98e52a8c603346a03b2e5a6197ce2cb49af86f2c
|
Cut uselessness codes
|
complexism/multimodel/mmrt.py
|
complexism/multimodel/mmrt.py
|
import networkx as nx
from complexism.misc.counter import count
from complexism.mcore import *
from .entries import RelationEntry
__author__ = 'TimeWz667'
__all__ = ['ObsMultiModel', 'MultiModel']
class ObsMultiModel(Observer):
def __init__(self):
Observer.__init__(self)
self.ObservingModels = list()
def add_observing_model(self, model):
if model not in self.ObservingModels:
self.ObservingModels.append(model)
def update_dynamic_observations(self, model, flow, ti):
for m in self.ObservingModels:
mod = model.get_model(m)
flow.update({'{}@{}'.format(m, k): v for k, v in mod.Obs.Flow.items() if k != 'Time'})
def read_statics(self, model, tab, ti):
for m in self.ObservingModels:
mod = model.get_model(m)
if tab is self.Last:
tab.update({'{}@{}'.format(m, k): v for k, v in mod.Obs.Last.items() if k != 'Time'})
elif self.ExtMid:
tab.update({'{}@{}'.format(m, k): v for k, v in mod.Obs.Mid.items() if k != 'Time'})
class MultiModel(BranchModel):
def __init__(self, name, pc=None):
BranchModel.__init__(self, name, pc, ObsMultiModel())
self.Models = nx.MultiDiGraph()
def add_observing_model(self, m):
if m in self.Models:
self.Observer.add_observing_model(m)
def append(self, m):
if m.Name not in self.Models:
self.Models.add_node(m.Name, model=m)
def link(self, src, tar, message=None, **kwargs):
src = src if isinstance(src, RelationEntry) else RelationEntry(src)
tar = tar if isinstance(tar, RelationEntry) else RelationEntry(tar)
m_src = self.select_all(src.Selector)
m_tar = self.select_all(tar.Selector)
if src.is_single():
ms = m_src.first()
for kt, mt in m_tar.items():
if ms is not mt:
mt.listen(ms.Name, message, src.Parameter, tar.Parameter, **kwargs)
self.Models.add_edge(ms.Name, mt.Name, par_src=src.Parameter, par_tar=tar.Parameter)
def read_y0(self, y0, ti):
if not y0:
return
for k, m in self.Models.nodes().data('model'):
m.read_y0(y0=y0[k], ti=ti)
def reset_impulse(self, ti):
for s, nbd in self.Models.adjacency():
src = self.get_model(s)
for t in nbd.keys():
tar = self.get_model(t)
tar.impulse_foreign(src, 'update', ti)
@count()
def do_request(self, req):
src = self.get_model(req.Who)
for t, kb in self.Models[req.Who].items():
# for _, atr in kb.items():
tar = self.get_model(t)
tar.impulse_foreign(src, req.Message, req.When)
def find_next(self):
for k, model in self.all_models().items():
for req in model.Next:
self.Requests.append_request(req.up_scale(self.Name))
self.Requests.append_event(req.Event, k, self.Name)
# self.Requests.append_requests([req.up_scale(k) for req in model.Next])
def all_models(self):
return dict(self.Models.nodes().data('model'))
def get_model(self, k):
return self.Models.nodes[k]['model']
def clone(self, **kwargs):
pass
|
Python
| 0
|
@@ -2827,307 +2827,12 @@
-for k, model in self.all_models().items():%0A for req in model.Next:%0A self.Requests.append_request(req.up_scale(self.Name))%0A self.Requests.append_event(req.Event, k, self.Name)%0A# self.Requests.append_requests(%5Breq.up_scale(k) for req in model.Next%5D)
+pass
%0A%0A
|
6e35e4f5af341bbcda050434d86fd7e4712ebd0f
|
Update JGit to get PackInserter fix
|
lib/jgit/jgit.bzl
|
lib/jgit/jgit.bzl
|
load("//tools/bzl:maven_jar.bzl", "GERRIT", "MAVEN_LOCAL", "MAVEN_CENTRAL", "maven_jar")
_JGIT_VERS = "4.9.2.201712150930-r"
_DOC_VERS = _JGIT_VERS # Set to _JGIT_VERS unless using a snapshot
JGIT_DOC_URL = "http://download.eclipse.org/jgit/site/" + _DOC_VERS + "/apidocs"
_JGIT_REPO = MAVEN_CENTRAL # Leave here even if set to MAVEN_CENTRAL.
# set this to use a local version.
# "/home/<user>/projects/jgit"
LOCAL_JGIT_REPO = ""
def jgit_repos():
if LOCAL_JGIT_REPO:
native.local_repository(
name = "jgit",
path = LOCAL_JGIT_REPO,
)
else:
jgit_maven_repos()
def jgit_maven_repos():
maven_jar(
name = "jgit_lib",
artifact = "org.eclipse.jgit:org.eclipse.jgit:" + _JGIT_VERS,
repository = _JGIT_REPO,
sha1 = "a3a2d1df793245ebfc7322db3c2b9828ee184850",
src_sha1 = "afa9a25e5502aeeb3b93d773ee445866fb316069",
unsign = True,
)
maven_jar(
name = "jgit_servlet",
artifact = "org.eclipse.jgit:org.eclipse.jgit.http.server:" + _JGIT_VERS,
repository = _JGIT_REPO,
sha1 = "87b4d287feff8b6b4c4f38a504460d2a3d4624f3",
unsign = True,
)
maven_jar(
name = "jgit_archive",
artifact = "org.eclipse.jgit:org.eclipse.jgit.archive:" + _JGIT_VERS,
repository = _JGIT_REPO,
sha1 = "ce4133fb0735d454dc8f6695fe6c6d5eff18a452",
)
maven_jar(
name = "jgit_junit",
artifact = "org.eclipse.jgit:org.eclipse.jgit.junit:" + _JGIT_VERS,
repository = _JGIT_REPO,
sha1 = "127074493f6a6ee5e6232a707d9adb523479e3bb",
unsign = True,
)
def jgit_dep(name):
mapping = {
"@jgit_junit//jar": "@jgit//org.eclipse.jgit.junit:junit",
"@jgit_lib//jar:src": "@jgit//org.eclipse.jgit:libjgit-src.jar",
"@jgit_lib//jar": "@jgit//org.eclipse.jgit:jgit",
"@jgit_servlet//jar":"@jgit//org.eclipse.jgit.http.server:jgit-servlet",
"@jgit_archive//jar": "@jgit//org.eclipse.jgit.archive:jgit-archive",
}
if LOCAL_JGIT_REPO:
return mapping[name]
else:
return name
|
Python
| 0
|
@@ -117,16 +117,29 @@
150930-r
+.3-g43ef5dabf
%22%0A%0A_DOC_
@@ -145,26 +145,38 @@
_VERS =
-_JGIT_VERS
+%224.9.2.201712150930-r%22
# Set
@@ -309,29 +309,22 @@
_REPO =
-MAVEN_CENTRAL
+GERRIT
# Leav
@@ -800,111 +800,111 @@
= %22
-a3a2d1df793245ebfc7322db3c2b9828ee184850%22,%0A src_sha1 = %22afa9a25e5502aeeb3b93d773ee445866fb316069
+3f6a1002069be91d99e1b356193aac5bbe5b3da3%22,%0A src_sha1 = %224fbbcd1e2f474917dd0ddbfef2580f474daf4dbd
%22,%0A
@@ -1112,48 +1112,48 @@
= %22
-87b4d287feff8b6b4c4f38a504460d2a3d4624f3
+78425749a618dd82da8dcf19ef9fd14e4318315b
%22,%0A
@@ -1357,48 +1357,48 @@
= %22
-ce4133fb0735d454dc8f6695fe6c6d5eff18a452
+884933af30be5c64187838e43764e0e19309f850
%22,%0A
@@ -1575,48 +1575,48 @@
= %22
-127074493f6a6ee5e623
+d7c24fec0a23842a03a6eea59
2a
-7
07
-d9adb523479e3bb
+fbd1448e783
%22,%0A
|
8ec01cf71db0431c5da5283775e96b73614be8df
|
test ip
|
config/settings/production.py
|
config/settings/production.py
|
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
'''
from __future__ import absolute_import, unicode_literals
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
# SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
# ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['example.com'])
ALLOWED_HOSTS = ['128.199.158.90']
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# AWS_ACCESS_KEY_ID = env('DJANGO_AWS_ACCESS_KEY_ID')
# AWS_SECRET_ACCESS_KEY = env('DJANGO_AWS_SECRET_ACCESS_KEY')
# AWS_STORAGE_BUCKET_NAME = env('DJANGO_AWS_STORAGE_BUCKET_NAME')
# AWS_AUTO_CREATE_BUCKET = True
# AWS_QUERYSTRING_AUTH = False
# AWS_S3_CALLING_FORMAT = OrdinaryCallingFormat()
# AWS cache settings, don't change unless you know what you're doing:
# AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
# AWS_HEADERS = {
# 'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
# AWS_EXPIRY, AWS_EXPIRY))
# }
# URL that handles the media served from MEDIA_ROOT, used for managing
# stored files.
# MEDIA_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# Static Assets
# ------------------------
# STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# STATICFILES_STORAGE = DEFAULT FILE STORAGE
# STATIC_URL = MEDIA_URL
# EMAIL
# ------------------------------------------------------------------------------
# DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
# default='Django Bootstrap <noreply@example.com>')
# EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
# MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
# MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
# EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[Django Bootstrap] ')
# SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# TEMPLATE CONFIGURATION
# ------------------------------------------------------------------------------
# See:
# https://docs.djangoproject.com/en/dev/ref/templates/api/#django.template.loaders.cached.Loader
TEMPLATES[0]['OPTIONS']['loaders'] = [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ]),
]
# DATABASE CONFIGURATION
# ------------------------------------------------------------------------------
# Raises ImproperlyConfigured exception if DATABASE_URL not in os.environ
DATABASES['default'] = env.db("DATABASE_URL")
# CACHING
# ------------------------------------------------------------------------------
# Heroku URL does not pass the DB number, so we parse it in
# CACHES = {
# "default": {
# "BACKEND": "django_redis.cache.RedisCache",
# "LOCATION": "{0}/{1}".format(env.cache_url('REDIS_URL', default="redis://127.0.0.1:6379"), 0),
# "OPTIONS": {
# "CLIENT_CLASS": "django_redis.client.DefaultClient",
# "IGNORE_EXCEPTIONS": True, # mimics memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
# }
# }
# }
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
'''
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
'''
# Custom Admin URL, use {% url 'admin:index' %}
ADMIN_URL = env('DJANGO_ADMIN_URL', default="r'^admin/'")
# Your production stuff: Below this line define 3rd party library settings
|
Python
| 0.000009
|
@@ -2022,14 +2022,15 @@
199.
-158.90
+253.141
'%5D%0A#
|
eaefc28fc4b3af73163fb5c93c8a83121758e36a
|
test disable whitenoise storage
|
config/settings/production.py
|
config/settings/production.py
|
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use mailgun to send emails
- Use Redis on Heroku
'''
from __future__ import absolute_import, unicode_literals
import os
import json
from urllib.parse import urlparse
from boto.s3.connection import OrdinaryCallingFormat
from django.utils import six
from .common import * # noqa
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ
SECRET_KEY = env("DJANGO_SECRET_KEY")
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# django-secure
# ------------------------------------------------------------------------------
INSTALLED_APPS += ("djangosecure", )
SECURITY_MIDDLEWARE = (
'djangosecure.middleware.SecurityMiddleware',
)
# Make sure djangosecure.middleware.SecurityMiddleware is listed first
MIDDLEWARE_CLASSES = SECURITY_MIDDLEWARE + MIDDLEWARE_CLASSES
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True)
SECURE_FRAME_DENY = env.bool("DJANGO_SECURE_FRAME_DENY", default=True)
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True)
SECURE_BROWSER_XSS_FILTER = True
SESSION_COOKIE_SECURE = False
SESSION_COOKIE_HTTPONLY = True
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# SITE CONFIGURATION
# ------------------------------------------------------------------------------
# Hosts/domain names that are valid for this site
ALLOWED_HOSTS = env.list('DJANGO_ALLOWED_HOSTS', default=['source.opennews.org'])
INSTALLED_APPS += ("gunicorn", )
BASE_SITE_URL = 'https://source.opennews.org'
# STORAGE CONFIGURATION
# ------------------------------------------------------------------------------
# Uploaded Media Files
# ------------------------
# NOTE: AWS keys set in common.py
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
# TODO See: https://github.com/jschneier/django-storages/issues/47
# Revert the following and use str after the above-mentioned bug is fixed in
# either django-storage-redux or boto
AWS_HEADERS = {
'Cache-Control': six.b('max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY))
}
# Static Assets
# ------------------------
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
# EMAIL
# ------------------------------------------------------------------------------
DEFAULT_FROM_EMAIL = env('DJANGO_DEFAULT_FROM_EMAIL',
default='Source <noreply@source.opennews.org>')
#EMAIL_BACKEND = 'django_mailgun.MailgunBackend'
#MAILGUN_ACCESS_KEY = env('DJANGO_MAILGUN_API_KEY')
#MAILGUN_SERVER_NAME = env('DJANGO_MAILGUN_SERVER_NAME')
EMAIL_SUBJECT_PREFIX = env("DJANGO_EMAIL_SUBJECT_PREFIX", default='[Source] ')
SERVER_EMAIL = env('DJANGO_SERVER_EMAIL', default=DEFAULT_FROM_EMAIL)
# SEARCH
# ------------------------------------------------------------------------------
ES_URL = urlparse(os.environ.get('BONSAI_URL') or 'http://127.0.0.1:9200/')
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.ElasticsearchSearchEngine',
'URL': ES_URL.scheme + '://' + ES_URL.hostname + ':443',
'INDEX_NAME': 'haystack',
},
}
if ES_URL.username:
HAYSTACK_CONNECTIONS['default']['KWARGS'] = {"http_auth": ES_URL.username + ':' + ES_URL.password}
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django_bmemcached.memcached.BMemcached',
'LOCATION': os.environ.get('MEMCACHEDCLOUD_SERVERS').split(','),
'OPTIONS': {
'username': os.environ.get('MEMCACHEDCLOUD_USERNAME'),
'password': os.environ.get('MEMCACHEDCLOUD_PASSWORD'),
}
}
}
BASE_URL = env('BASE_URL', default='')
# LOGGING CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s '
'%(process)d %(thread)d %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'mail_admins'],
'propagate': True
}
}
}
|
Python
| 0
|
@@ -2703,16 +2703,17 @@
-------%0A
+#
STATICFI
|
6a5729d566a6e75c97b67a544dd7aed9c857e6de
|
update attachment attributes
|
data_center/models.py
|
data_center/models.py
|
# -*- coding: utf-8 -*-
from datetime import datetime
from django.db import models
class Course(models.Model):
"""Course database schema"""
no = models.CharField(max_length=20, blank=True)
code = models.CharField(max_length=20, blank=True)
eng_title = models.CharField(max_length=200, blank=True)
chi_title = models.CharField(max_length=200, blank=True)
note = models.TextField(blank=True)
objective = models.CharField(max_length=80, blank=True)
time = models.CharField(max_length=20, blank=True)
time_token = models.CharField(max_length=20, blank=True)
teacher = models.CharField(max_length=40, blank=True) # Only save Chinese
room = models.CharField(max_length=20, blank=True)
credit = models.IntegerField(blank=True, null=True)
limit = models.IntegerField(blank=True, null=True)
prerequisite = models.BooleanField(default=False, blank=True)
ge = models.CharField(max_length=80, blank=True)
hit = models.IntegerField(default=0)
syllabus = models.TextField(blank=True) # pure text
def __str__(self):
return self.no
class Department(models.Model):
dept_name = models.CharField(max_length=20, blank=True)
required_course = models.ManyToManyField(Course, blank=True)
def __unicode__(self):
return self.dept_name
class Announcement(models.Model):
TAG_CHOICE = (
('Info', '公告'),
('Bug', '已知問題'),
('Fix', '問題修復'),
)
content = models.TextField(blank=True)
time = models.DateTimeField(default=datetime.now)
tag = models.CharField(max_length=10, choices=TAG_CHOICE, default='Info')
def __unicode__(self):
return '%s|%s' % (self.time, self.tag)
|
Python
| 0.000001
|
@@ -77,16 +77,162 @@
models%0A
+from django.utils.http import urlquote%0A%0Aattachment_url_format = 'https://www.ccxp.nthu.edu.tw/ccxp/INQUIRE/JH/output/6_6.1_6.1.12/%25s.pdf' # noqa%0A
%0A%0Aclass
@@ -1201,54 +1201,223 @@
ext%0A
-%0A def __str__(self):%0A return self.no
+ has_attachment = models.BooleanField(default=False) # has pdf%0A%0A def __str__(self):%0A return self.no%0A%0A @property%0A def attachment_url(self):%0A return attachment_url_format %25 urlquote(self.no)
%0A%0A%0Ac
|
00dec661c39437e2fd031328431ab59ca428aaf3
|
Fix deprecation warning regarding BaseException.message
|
linkedin/utils.py
|
linkedin/utils.py
|
# -*- coding: utf-8 -*-
import requests
from .exceptions import LinkedInError, get_exception_for_error_code
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
import simplejson as json
except ImportError:
try:
from django.utils import simplejson as json
except ImportError:
import json
def enum(enum_type='enum', base_classes=None, methods=None, **attrs):
"""
Generates a enumeration with the given attributes.
"""
# Enumerations can not be initalized as a new instance
def __init__(instance, *args, **kwargs):
raise RuntimeError('%s types can not be initialized.' % enum_type)
if base_classes is None:
base_classes = ()
if methods is None:
methods = {}
base_classes = base_classes + (object,)
for k, v in methods.iteritems():
methods[k] = classmethod(v)
attrs['enums'] = attrs.copy()
methods.update(attrs)
methods['__init__'] = __init__
return type(enum_type, base_classes, methods)
def to_utf8(st):
if isinstance(st, unicode):
return st.encode('utf-8')
else:
return bytes(st)
def raise_for_error(response):
try:
response.raise_for_status()
except (requests.HTTPError, requests.ConnectionError) as error:
try:
if len(response.content) == 0:
# There is nothing we can do here since LinkedIn has neither sent
# us a 2xx response nor a response content.
return
response = response.json()
if ('error' in response) or ('errorCode' in response):
message = '%s: %s' % (response.get('error', error.message),
response.get('message', 'Unknown Error'))
error_code = response.get('status')
ex = get_exception_for_error_code(error_code)
raise ex(message)
else:
raise LinkedInError(error.message)
except (ValueError, TypeError):
raise LinkedInError(error.message)
HTTP_METHODS = enum('HTTPMethod', GET='GET', POST='POST',
PUT='PUT', DELETE='DELETE', PATCH='PATCH')
|
Python
| 0.000013
|
@@ -1698,29 +1698,26 @@
error',
+str(
error
-.message
+)
),%0A
|
7210d1d7840fb9190d616e1a59af6e9619f93835
|
Add VoiceCloseReasons
|
litecord/enums.py
|
litecord/enums.py
|
"""
enums.py - Various Enums used by litecord
"""
class OP:
"""Gateway OP codes."""
DISPATCH = 0
HEARTBEAT = 1
IDENTIFY = 2
STATUS_UPDATE = 3
VOICE_STATE_UPDATE = 4
VOICE_SERVER_PING = 5
RESUME = 6
RECONNECT = 7
REQUEST_GUILD_MEMBERS = 8
INVALID_SESSION = 9
HELLO = 10
HEARTBEAT_ACK = 11
GUILD_SYNC = 12
class CloseCodes:
"""Websocket close codes used by the gateway."""
UNKNOWN_ERROR = 4000
UNKNOWN_OP = 4001
DECODE_ERROR = 4002
NOT_AUTH = 4003
AUTH_FAILED = 4004
ALREADY_AUTH = 4005
INVALID_SEQ = 4007
RATE_LIMITED = 4008
SESSION_TIMEOUT = 4009
INVALID_SHARD = 4010
SHARDING_REQUIRED = 4011
CloseReasons = {
CloseCodes.UNKNOWN_OP: 'Unknown OP code',
CloseCodes.NOT_AUTH: 'Not authenticated',
CloseCodes.AUTH_FAILED: 'Failed to authenticate',
CloseCodes.ALREADY_AUTH: 'Already identified',
CloseCodes.INVALID_SEQ: 'Invalid sequence',
CloseCodes.RATE_LIMITED: 'Rate limited',
CloseCodes.SESSION_TIMEOUT: 'Session timed out',
CloseCodes.INVALID_SHARD: 'Invalid Shard',
CloseCodes.SHARDING_REQUIRED: 'Sharding required',
}
class VoiceOP:
"""Voice OP codes.
These OP codes are used in the Voice Websocket.
"""
IDENTIFY = 0
SELECT_PROTOCOL = 1
READY = 2
HEARTBEAT = 3
SESSION_DESCRIPTION = 4
SPEAKING = 5
HEARTBEAT_ACK = 6
RESUME = 7
HELLO = 8
RESUMED = 9
CLIENT_DISCONNECT = 13
class VoiceWSCloseCodes:
"""Close codes used by the Voice WebSocket."""
UNKNOWN_OP = 4001
NOT_AUTH = 4003
AUTH_FAILED = 4004
ALREADY_AUTH = 4005
INVALID_SESSION = 4006
SESSION_TIMEOUT = 4009
SERVER_NOT_FOUND = 4011
UNKNOWN_PROTOCOL = 4012
DISCONNECTED = 4014
SERVER_CRASH = 4015
UNKNOWN_ENC_MODE = 4016
class AppType:
"""Application Type."""
BOT = 0
class ChannelType:
"""Channel Type."""
GUILD_TEXT = 0
DM = 1
GUILD_VOICE = 2
GROUP_DM = 3
GUILD_CATEGORY = 4
class MessageType:
"""Message Type.
``DEFAULT`` is the one that users can usually send.
The rest are system messages.
"""
DEFAULT = 0
RECIPIENT_ADD = 1
RECIPIENT_REMOVE = 2
CALL = 3
CHANNEL_NAME_CHANGE = 4
CHANNEL_ICON_CHANGE = 5
CHANNEL_PINNED_MESSAGE = 6
GUILD_MEMBER_JOIN = 7
|
Python
| 0.000001
|
@@ -1835,16 +1835,710 @@
= 4016%0A%0A
+VoiceCloseReasons = %7B%0A VoiceWSCloseCodes.UNKNOWN_OP: 'Unknown OP code sent',%0A VoiceWSCloseCodes.NOT_AUTH: 'Not authenticated..',%0A VoiceWSCloseCodes.AUTH_FAILED: 'Authentication failed',%0A VoiceWSCloseCodes.ALREADY_AUTH: 'Already Authenticated',%0A VoiceWSCloseCodes.INVALID_SESSION: 'Invalid session provided',%0A VoiceWSCloseCodes.SESSION_TIMEOUT: 'Session Timeout',%0A VoiceWSCloseCodes.SERVER_NOT_FOUND: 'Server not found to connect',%0A VoiceWSCloseCodes.UNKNOWN_PROTOCOL: 'Unknown Protocol',%0A VoiceWSCloseCodes.DISCONNECTED: 'Disconnected from Voice',%0A VoiceWSCloseCodes.SERVER_CRASH: 'Server crashed',%0A VoiceWSCloseCodes.UNKNOWN_ENC_CODE: 'Unknown encryption',%0A%7D%0A%0A
class Ap
|
17028a6ae567e9d67dbaa99b86a956fefdd3e792
|
fix pynotify
|
livereload/app.py
|
livereload/app.py
|
import os
import logging
import tornado.web
import tornado.options
import tornado.ioloop
from tornado import escape
from tornado import websocket
from tornado.util import ObjectDict
from livereload.task import Task
ROOT = os.path.abspath(os.path.dirname(__file__))
STATIC_PATH = os.path.join(ROOT, 'static')
NOTIFIER = None
APPLICATION_ICON = None
def _get_growl():
import gntp.notifier
growl = gntp.notifier.GrowlNotifier(
applicationName='Python LiveReload',
notifications=['Message'],
defaultNotifications=['Message'],
applicationIcon=APPLICATION_ICON,
)
result = growl.register()
if result is not True:
return None
def notifier(message):
return growl.notify(
'Message',
'LiveReload',
message,
icon=APPLICATION_ICON,
)
return notifier
def _get_notifyOSD():
import pynotify
pynotify.init()
return lambda message: pynotify.Notification('LiveReload', message).show()
def send_notify(message):
global NOTIFIER
if NOTIFIER:
return NOTIFIER(message)
try:
NOTIFIER = _get_growl()
except ImportError:
NOTIFIER = _get_notifyOSD()
except:
NOTIFIER = logging.info
return NOTIFIER(message)
class LiveReloadHandler(websocket.WebSocketHandler):
waiters = set()
_watch_running = False
def allow_draft76(self):
return True
def on_close(self):
if self in LiveReloadHandler.waiters:
LiveReloadHandler.waiters.remove(self)
send_notify('There are %s waiters left' % len(self.waiters))
def send_message(self, message):
if isinstance(message, dict):
message = escape.json_encode(message)
try:
self.write_message(message)
except:
logging.error('Error sending message', exc_info=True)
def watch_tasks(self):
path = Task.watch()
if path:
send_notify('Reload %s waiters\nChanged %s' % \
(len(LiveReloadHandler.waiters), path))
msg = {
'command': 'reload',
'path': path,
'liveCSS': True
}
for waiter in LiveReloadHandler.waiters:
try:
waiter.write_message(msg)
except:
logging.error('Error sending message', exc_info=True)
LiveReloadHandler.waiters.remove(waiter)
def on_message(self, message):
message = ObjectDict(escape.json_decode(message))
if message.command == 'hello':
handshake = {}
handshake['command'] = 'hello'
protocols = message.protocols
protocols.append(
'http://livereload.com/protocols/2.x-remote-control'
)
handshake['protocols'] = protocols
handshake['serverName'] = 'livereload-tornado'
self.send_message(handshake)
if message.command == 'info' and 'url' in message:
send_notify('Browser Connected: %s' % message.url)
LiveReloadHandler.waiters.add(self)
if not LiveReloadHandler._watch_running:
try:
execfile('Guardfile')
except:
Task.add(os.getcwd())
LiveReloadHandler._watch_running = True
logging.info('Start watching changes')
tornado.ioloop.PeriodicCallback(self.watch_tasks, 500).start()
handlers = [
(r'/livereload', LiveReloadHandler),
(r'/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_PATH}),
]
def main():
tornado.options.parse_command_line()
app = tornado.web.Application(handlers=handlers)
app.listen(35729)
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main()
|
Python
| 0.000031
|
@@ -932,16 +932,35 @@
fy.init(
+'Python LiveReload'
)%0A re
|
004c9c11441f59590121a5428fce29ccde3f7694
|
Fix error with keyword argument
|
mesh_utils.py
|
mesh_utils.py
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# Booltron super add-on for super fast booleans.
# Copyright (C) 2014-2019 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
import bpy
import bmesh
from mathutils import bvhtree
def delete_loose(bm):
for v in bm.verts:
if v.is_wire or not v.link_edges:
bm.verts.remove(v)
class MeshUtils:
def object_overlap(self, obs):
depsgraph = bpy.context.depsgraph
bm = bmesh.new()
for ob in obs:
me = ob.to_mesh(depsgraph, True)
me.transform(ob.matrix_world)
bm.from_mesh(me)
bpy.data.meshes.remove(me)
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.0001)
tree = bvhtree.BVHTree.FromBMesh(bm, epsilon=0.00001)
overlap = tree.overlap(tree)
bm.free()
return bool(overlap)
def object_prepare(self):
ob1 = bpy.context.object
obs = bpy.context.selected_objects
if ob1.select_get():
obs.remove(ob1)
if self.keep_objects:
# TODO local view
# space_data = bpy.context.space_data
for ob in obs:
ob_copy = ob.copy()
ob_copy.data = ob.data.copy()
for coll in ob.users_collection:
coll.objects.link(ob_copy)
# TODO local view
# if self.local_view:
# base.layers_from_view(space_data)
ob_copy.select_set(True)
ob.select_set(False)
bpy.ops.object.make_single_user(object=True, obdata=True)
bpy.ops.object.convert(target="MESH")
if self.pos_correct:
self.object_pos_correct(obs)
def mesh_prepare(self, ob, select=False):
me = ob.data
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.0001)
delete_loose(bm)
bmesh.ops.holes_fill(bm, edges=bm.edges)
if self.triangulate:
bmesh.ops.triangulate(bm, faces=bm.faces, quad_method=3)
for f in bm.faces:
f.select = select
bm.to_mesh(me)
bm.free()
def mesh_cleanup(self, ob):
me = ob.data
bm = bmesh.new()
bm.from_mesh(me)
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.0001)
delete_loose(bm)
bm.to_mesh(me)
bm.free()
def mesh_check(self, ob):
bm = bmesh.new()
bm.from_mesh(ob.data)
for e in bm.edges:
if not e.is_manifold:
self.report({"ERROR"}, "Boolean operation result is non-manifold")
bm.free()
return True
bm.free()
return False
|
Python
| 0.000017
|
@@ -2738,17 +2738,28 @@
_method=
-3
+%22SHORT_EDGE%22
)%0A%0A
|
36e8335bc146e4eda6801b2c148410c3ea620ae5
|
Update scipy.py
|
wigs/scipy.py
|
wigs/scipy.py
|
class scipy(PythonWig):
tarball_uri = 'https://github.com/scipy/scipy/releases/download/v$RELEASE_VERSION$/scipy-$RELEASE_VERSION$.tar.gz'
last_release_version = 'v0.18.1'
git_uri = 'https://github.com/scipy/scipy'
dependencies = ['numpy']
|
Python
| 0.000002
|
@@ -237,8 +237,580 @@
numpy'%5D%0A
+%0A%09optional_dependencies = %5B'openblas'%5D%0A%09supported_features = %5B'openblas'%5D%0A%09default_features = %5B'+openblas'%5D%0A%0A%09def setup(self):%0A%09%09self.site_cfg = %5B%5D%0A%0A%09def switch_openblas_on(self):%0A%09%09self.require('openblas')%0A%09%09include_dirs = map(os.path.abspath, P.prefix_include_dirs)%0A%09%09lib_dirs = map(os.path.abspath, P.prefix_lib_dirs)%0A%09%09self.site_cfg += %5B%0A%09%09%09'%5Bopenblas%5D',%0A%09%09%09'libraries = openblas',%0A%09%09%09'include_dirs = %25s' %25 os.path.pathsep.join(include_dirs),%0A%09%09%09'library_dirs = %25s' %25 os.path.pathsep.join(lib_dirs),%0A%09%09%09'runtime_library_dirs = %25s' %25 os.path.pathsep.join(lib_dirs)%0A%09%09%5D%0A
|
374c386a6b2dd1ad1ba75ba70009de6c7ee3c3fc
|
Add process_request method to Application
|
restalchemy/api/applications.py
|
restalchemy/api/applications.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2014 Eugene Frolov <eugene@frolov.net.ru>
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import dec
from restalchemy.api import resources
from restalchemy.api import routes
DEFAULT_CONTENT_TYPE = 'application/json'
class WSGIApp(object):
def __init__(self, route_class):
super(WSGIApp, self).__init__()
self._main_route = routes.route(route_class)
resources.ResourceMap.set_resource_map(
routes.Route.build_resource_map(route_class))
@dec.wsgify
def __call__(self, req):
return self._main_route(req).do()
Application = WSGIApp
|
Python
| 0.000002
|
@@ -1096,16 +1096,95 @@
lass))%0A%0A
+ def process_request(self, req):%0A return self._main_route(req).do()%0A%0A
@dec
@@ -1240,36 +1240,35 @@
rn self.
-_main_route(req).do(
+process_request(req
)%0A%0A%0AAppl
|
846f9bbb0214d2dadea5be233189b3384e8c2ee4
|
set app_label on models
|
restclients/models/wheniwork.py
|
restclients/models/wheniwork.py
|
from django.db import models
from datetime import time
class Account(models.Model):
id = models.PositiveIntegerField(primary_key=True)
master = models.ForeignKey('self')
company = models.CharField(max_length=500)
class Meta:
db_table = "restclients_wheniwork_account"
class User(models.Model):
id = models.PositiveIntegerField(primary_key=True)
first_name = models.CharField(max_length=100, null=True)
last_name = models.CharField(max_length=100, null=True)
email = models.CharField(max_length=100, null=True)
employee_code = models.CharField(max_length=100, null=True)
class Meta:
db_table = "restclients_wheniwork_user"
class Location(models.Model):
id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=100, null=True)
address = models.CharField(max_length=100)
class Meta:
db_table = "restclients_wheniwork_location"
class Position(models.Model):
id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=100, null=True)
class Meta:
db_table = "restclients_wheniwork_position"
class Site(models.Model):
id = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=100, null=True)
location = models.ForeignKey(Location)
address = models.CharField(max_length=100)
class Meta:
db_table = "restclients_wheniwork_site"
class Shift(models.Model):
id = models.PositiveIntegerField(primary_key=True)
account = models.ForeignKey(Account)
user = models.ForeignKey(User)
location = models.ForeignKey(Location)
position = models.ForeignKey(Position)
site = models.ForeignKey(Site)
start_time = models.DateTimeField()
end_time = models.DateTimeField()
notes = models.CharField(max_length=350)
class Meta:
db_table = "restclients_wheniwork_shifts"
class Request(models.Model):
STATUS_PENDING = 0
STATUS_CANCELED = 1
STATUS_ACCEPTED = 2
STATUS_EXPIRED = 3
TYPE_UNPAIDTIMEOFF = 0
TYPE_PAIDTIMEOFF = 1
TYPE_SICKLEAVE = 2
TYPE_HOLIDAY = 3
id = models.PositiveIntegerField(primary_key=True)
account = models.ForeignKey(Account)
user = models.ForeignKey(User)
creator = models.ForeignKey(User, related_name='+')
status = models.PositiveSmallIntegerField(choices=(
(STATUS_PENDING, 'Pending'),
(STATUS_CANCELED, 'Canceled'),
(STATUS_ACCEPTED, 'Accepted'),
(STATUS_EXPIRED, 'Expired')))
type = models.PositiveSmallIntegerField(choices=(
(TYPE_UNPAIDTIMEOFF, 'Unpaid Time Off'),
(TYPE_PAIDTIMEOFF, 'Paid Time Off'),
(TYPE_SICKLEAVE, 'Sick Leave'),
(TYPE_HOLIDAY, 'Holiday')))
start_time = models.DateTimeField()
end_time = models.DateTimeField()
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
canceled_by = models.ForeignKey(User, related_name='+')
hours = models.DecimalField(max_digits=5, decimal_places=2)
def is_allday(self, tz):
if self.start_time.astimezone(tz).time() == time(0, 0, 0) \
and self.end_time.astimezone(tz).time() == time(23, 59, 59):
return True
return False
class Meta:
db_table = "restclients_wheniwork_request"
class Message(models.Model):
id = models.PositiveIntegerField(primary_key=True)
account = models.ForeignKey(Account)
user = models.ForeignKey(User)
request = models.ForeignKey(Request)
swap_id = models.PositiveIntegerField()
conversation_id = models.PositiveIntegerField()
title = models.CharField(max_length=100)
content = models.TextField()
created_at = models.DateTimeField()
updated_at = models.DateTimeField()
type = models.PositiveSmallIntegerField()
class Meta:
db_table = "restclients_wheniwork_message"
|
Python
| 0.000001
|
@@ -288,16 +288,50 @@
ccount%22%0A
+ app_label = 'restclients'%0A
%0A%0Aclass
@@ -711,16 +711,50 @@
k_user%22%0A
+ app_label = 'restclients'%0A
%0A%0Aclass
@@ -999,24 +999,58 @@
k_location%22%0A
+ app_label = 'restclients'%0A
%0A%0Aclass Posi
@@ -1248,16 +1248,50 @@
sition%22%0A
+ app_label = 'restclients'%0A
%0A%0Aclass
@@ -1575,16 +1575,50 @@
k_site%22%0A
+ app_label = 'restclients'%0A
%0A%0Aclass
@@ -2080,16 +2080,50 @@
shifts%22%0A
+ app_label = 'restclients'%0A
%0A%0Aclass
@@ -3527,16 +3527,50 @@
equest%22%0A
+ app_label = 'restclients'%0A
%0A%0Aclass
@@ -4073,32 +4073,32 @@
class Meta:%0A
-
db_table
@@ -4132,8 +4132,42 @@
essage%22%0A
+ app_label = 'restclients'%0A
|
b7e8af6ef92c0244bd5121c528e3e85441b0d835
|
Disable test/mac/gyptest-objc-gc.py when using Xcode 5.1
|
test/mac/gyptest-objc-gc.py
|
test/mac/gyptest-objc-gc.py
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that GC objc settings are handled correctly.
"""
import TestGyp
import sys
if sys.platform == 'darwin':
# set |match| to ignore build stderr output.
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'],
match = lambda a, b: True)
CHDIR = 'objc-gc'
test.run_gyp('test.gyp', chdir=CHDIR)
build_error_code = {
'xcode': [1, 65], # 1 for xcode 3, 65 for xcode 4 (see `man sysexits`)
'make': 2,
'ninja': 1,
}[test.format]
test.build('test.gyp', 'gc_exe_fails', chdir=CHDIR, status=build_error_code)
test.build(
'test.gyp', 'gc_off_exe_req_lib', chdir=CHDIR, status=build_error_code)
test.build('test.gyp', 'gc_req_exe', chdir=CHDIR)
test.run_built_executable('gc_req_exe', chdir=CHDIR, stdout="gc on: 1\n")
test.build('test.gyp', 'gc_exe_req_lib', chdir=CHDIR)
test.run_built_executable('gc_exe_req_lib', chdir=CHDIR, stdout="gc on: 1\n")
test.build('test.gyp', 'gc_exe', chdir=CHDIR)
test.run_built_executable('gc_exe', chdir=CHDIR, stdout="gc on: 1\n")
test.build('test.gyp', 'gc_off_exe', chdir=CHDIR)
test.run_built_executable('gc_off_exe', chdir=CHDIR, stdout="gc on: 0\n")
test.pass_test()
|
Python
| 0.00004
|
@@ -250,16 +250,31 @@
TestGyp
+%0Aimport TestMac
%0A%0Aimport
@@ -469,16 +469,167 @@
True)%0A%0A
+ # Xcode 5.1 removed support for garbage-collection:%0A # error: garbage collection is no longer supported%0A if TestMac.Xcode.Version() %3C '0510':%0A%0A
CHDIR
@@ -642,16 +642,18 @@
c-gc'%0A
+
+
test.run
@@ -683,16 +683,18 @@
CHDIR)%0A%0A
+
build_
@@ -708,16 +708,18 @@
ode = %7B%0A
+
'xco
@@ -790,16 +790,18 @@
s%60)%0A
+
+
'make':
@@ -803,16 +803,18 @@
ke': 2,%0A
+
'nin
@@ -821,16 +821,18 @@
ja': 1,%0A
+
%7D%5Btest
@@ -839,24 +839,26 @@
.format%5D%0A%0A
+
+
test.build('
@@ -922,16 +922,18 @@
r_code)%0A
+
test.b
@@ -944,16 +944,18 @@
(%0A
+
'test.gy
@@ -1017,24 +1017,26 @@
or_code)%0A%0A
+
+
test.build('
@@ -1065,32 +1065,34 @@
', chdir=CHDIR)%0A
+
test.run_built
@@ -1144,32 +1144,34 @@
=%22gc on: 1%5Cn%22)%0A%0A
+
test.build('te
@@ -1204,32 +1204,34 @@
chdir=CHDIR)%0A
+
+
test.run_built_e
@@ -1236,24 +1236,33 @@
_executable(
+%0A
'gc_exe_req_
@@ -1294,32 +1294,34 @@
=%22gc on: 1%5Cn%22)%0A%0A
+
test.build('te
@@ -1344,32 +1344,34 @@
', chdir=CHDIR)%0A
+
test.run_built
@@ -1423,24 +1423,26 @@
on: 1%5Cn%22)%0A%0A
+
test.build
@@ -1481,16 +1481,18 @@
=CHDIR)%0A
+
test.r
|
197f8e77257dda2a21fd401ad1dc4a3073586434
|
Allow to use an environment variable if hue not found
|
installCGSapps.py
|
installCGSapps.py
|
#!/usr/bin/python
__author__ = 'CGS'
import os, shutil, sys, distutils.core, subprocess
# Some configuration needed for this file
apps_directory = ""
apps = {"calculator": "apps/calculator"}
# TODO: better management of errors
# Some basic checks
if not 'SUDO_UID' in os.environ.keys():
sys.exit("This program requires super user privileges.")
if len(sys.argv) <= 1:
sys.exit("Please, give the name of the app you want to install. Choose among the followings: " +
str(apps.keys()))
if sys.argv[0] != "installCGSapps.py" and "/" in sys.argv[0]:
# If the script was not launch in the current directory, we have to make some modifications
tmp = sys.argv[0].split("/")
script_name = tmp.pop()
app_directory_prefix = sys.argv[0].replace("/"+script_name,"/")
else:
app_directory_prefix = ""
# We take the folder where hue is installed
try:
hue_directory = subprocess.Popen("whereis hue", stdin=False, shell=True, stdout=subprocess.PIPE)
hue_directory = str(hue_directory.communicate()[0]).split(" ")[2].strip()
except:
hue_directory = "/usr/lib/hue"
if os.path.exists(hue_directory) and not os.path.exists(hue_directory+"/myapps"):
try:
os.makedirs(hue_directory+"/myapps")
except:
sys.exit("Impossible to create the folder 'myapps' in '"+hue_directory+"'.")
apps_directory = hue_directory + "/myapps"
# Some basic checks first
if not os.path.exists(hue_directory):
sys.exit("This installation file did not find the hue apps directory, please edit the variable 'hue_directory'"
" in this installCGSapps.py file.")
# We install each application
for i in xrange(1, len(sys.argv)):
app_name = sys.argv[i]
if not app_name in apps:
sys.exit("Invalid app name. Choose among the followings: "+str(apps.keys()))
if not os.path.exists(app_directory_prefix+apps[app_name]):
sys.exit("It seems the source of the app '"+app_name+"' is missing from the uncompressed zip.")
# We try to delete the eventual old folder
app_directory = apps_directory+"/"+app_name
if os.path.exists(app_directory):
reinstall = raw_input("It seems the '"+app_name+"' already exists. Do you want to reinstall it [Y/n]?")
if reinstall != "Y" and reinstall != "y":
print("Installation of '"+app_name+"' aborted.")
continue
else:
try:
shutil.rmtree(app_directory)
except Exception as e:
print(e.message)
sys.exit("Impossible to delete the folder "+app_directory+". Check the access rights.")
# We create the app
# TODO: we do not catch correctly the errors of 'subprocess'
try:
print("Creating the app '"+app_name+"'...")
app_install = subprocess.Popen("cd " + apps_directory + " && " + hue_directory +
"/build/env/bin/hue create_desktop_app " + app_name,
stdin=False, shell=True, stdout=subprocess.PIPE)
app_install.communicate()
app_install = subprocess.Popen("cd " + apps_directory + " && python " + hue_directory +
"/tools/app_reg/app_reg.py --install " + app_name +
" && service hue restart", stdin=False, shell=True, stdout=subprocess.PIPE)
app_install.communicate()
except Exception as e:
print(e.message)
sys.exit("Error while creating the app...")
# We copy the content of the application to the new directory
app_src = app_directory_prefix+apps[app_name]
try:
print("Copying source code to app folder...")
distutils.dir_util.copy_tree(app_src, app_directory)
except:
sys.exit("Impossible to copy data from '"+app_src+"' to '"+app_directory+"'.")
# The happy end
print("Installation successful.")
|
Python
| 0.000003
|
@@ -1092,16 +1092,137 @@
b/hue%22%0A%0A
+if not os.path.exists(hue_directory) and %22HUE_DIRECTORY%22 in os.environ:%0A hue_directory = os.environ%5B%22HUE_DIRECTORY%22%5D%0A%0A
if os.pa
@@ -1612,21 +1612,16 @@
the hue
-apps
director
@@ -1634,86 +1634,64 @@
ase
-edit the variable 'hue_directory'%22%0A %22 in this installCGSapps.py fi
+create a HUE_DIRECTORY environment%22%0A %22variab
le.%22
|
da557b0b26d144095988a8809a97b83791077f20
|
fix number
|
biblioteca/views.py
|
biblioteca/views.py
|
from django.shortcuts import render
from .models import Temas, Biblioteca
from django.shortcuts import get_object_or_404
from django.db.models import Q
# Create your views here.
def index(request,template='biblioteca/index.html',slug=None):
temas = Temas.objects.all()
ultimas_guias = Biblioteca.objects.filter(tipo_documento=1).order_by('-fecha')[:6]
return render(request, template, locals())
def buscar_guia(request, template='biblioteca/lista_guias.html'):
buscar_palabra = request.GET.get('q')
resultado = Biblioteca.objects.filter(tipo_documento=1).filter(Q(titulo__icontains=buscar_palabra) | Q(descripcion__icontains=buscar_palabra))
return render(request, template, locals())
def buscar_tema(request, template='biblioteca/lista_guias.html', id=None):
temas = Temas.objects.all()
buscar_palabra = get_object_or_404(Temas,id=id)
resultado = Biblioteca.objects.filter(tema=buscar_palabra)
return render(request, template, locals())
def detalle_guia(request,template='biblioteca/detalle.html',slug=None):
temas = Temas.objects.all()
la_guia = get_object_or_404(Biblioteca, slug=slug)
return render(request, template, locals())
|
Python
| 0.000047
|
@@ -349,9 +349,10 @@
')%5B:
-6
+12
%5D%0A%0A%09
|
1412c1a15f4b8b09beb4b7eb4b3245eaeb343a14
|
Bump sleep time for Github API reader
|
src/api_readers/github_daemon.py
|
src/api_readers/github_daemon.py
|
from api_reader_daemon import APIReaderDaemon
import datetime
import time
from models import GithubRepo
from models import GithubRepoEvent
from github import Github
class GithubReaderDaemon(APIReaderDaemon):
def __init__(self, **kwargs):
# neh. don't need it.
pass
def start(self):
while True:
a_minute_ago = datetime.datetime.now() - datetime.timedelta(seconds = 60)
repos_to_read = self.session.query(GithubRepo).all()
for repo in repos_to_read:
try:
gh = Github()
e_repo = gh.get_repo(repo.gh_username + '/' + repo.gh_repo)
events = e_repo.get_events()
if events[0].created_at > a_minute_ago and events[0].type == 'PushEvent':
author = events[0].actor
commit = events[0].payload['commits'][0]['message']
new_event = GithubRepoEvent(repo.id, author.name,
author.avatar_url, commit)
self.session.add(new_event)
except:
continue
self.session.commit()
time.sleep(60)
def stop(self):
# or whatever
pass
if __name__ == '__main__':
GithubReaderDaemon().start()
|
Python
| 0
|
@@ -1210,17 +1210,18 @@
e.sleep(
-6
+12
0)%0A%0A
|
d01430e40d923fdced0d753822a1f62fe69a916e
|
add analytics folder to path
|
bigbang/__init__.py
|
bigbang/__init__.py
|
Python
| 0.000001
|
@@ -0,0 +1,23 @@
+from . import analysis%0A
|
|
17147f02abdb50f6df6398c8c3c750d858c1c758
|
fix docs
|
doc/ext/nova_autodoc.py
|
doc/ext/nova_autodoc.py
|
import gettext
import os
gettext.install('nova')
from nova import utils
def setup(app):
rootdir = os.path.abspath(app.srcdir + '/..')
print "**Autodocumenting from %s" % rootdir
rv = utils.execute('cd %s && ./generate_autodoc_index.sh' % rootdir)
print rv[0]
|
Python
| 0.000008
|
@@ -182,16 +182,38 @@
rootdir%0A
+ os.chdir(rootdir)%0A
rv =
@@ -232,17 +232,8 @@
te('
-cd %25s &&
./ge
@@ -256,26 +256,16 @@
ndex.sh'
- %25 rootdir
)%0A pr
|
d72a1dde759e4993f7c75764fd36668192b387e5
|
Clean up middleware code
|
middleware.py
|
middleware.py
|
from mixcloud.speedbar.modules.base import RequestTrace
from django.utils.encoding import smart_unicode, smart_str
from django.utils.html import escapejs
from django.core.urlresolvers import reverse
from gargoyle import gargoyle
import re
HTML_TYPES = ('text/html', 'application/xhtml+xml')
METRIC_PLACEHOLDER_RE = re.compile('<span data-module="(?P<module>[^"]+)" data-metric="(?P<metric>[^"]+)"></span>')
class SpeedbarMiddleware(object):
def process_request(self, request):
RequestTrace.instance().stacktracer.root.label = '%s %s' % (request.method, request.path)
def process_response(self, request, response):
request_trace = RequestTrace.instance()
def sanitize(string):
return string.title().replace(' ','-')
metrics = dict((key, module.get_metrics()) for key, module in request_trace.modules.items())
for module, module_values in metrics.items():
for key, value in module_values.items():
response['X-Mixcloud-%s-%s' % (sanitize(module), sanitize(key))] = value
if hasattr(request, 'user') and request.user.is_staff:
if 'gzip' not in response.get('Content-Encoding', '') and response.get('Content-Type', '').split(';')[0] in HTML_TYPES:
# Force render of response (from lazy TemplateResponses) before speedbar is injected
if hasattr(response, 'render'):
response.render()
content = smart_unicode(response.content)
def replace_placeholder(match):
module = match.group('module')
metric = match.group('metric')
return unicode(metrics[module][metric])
content = METRIC_PLACEHOLDER_RE.sub(replace_placeholder, content)
if gargoyle.is_active('speedbar:panel', request):
panel_url = reverse('speedbar_panel', args=[request_trace.id])
content = content.replace(
u'<script data-speedbar-panel-url-placeholder></script>',
u'<script>var _speedbar_panel_url = "%s";</script>' % (escapejs(panel_url),))
if gargoyle.is_active('speedbar:trace', request):
response['X-TraceUrl'] = reverse('speedbar_trace', args=[request_trace.id])
response.content = smart_str(content)
if response.get('Content-Length', None):
response['Content-Length'] = len(response.content)
return response
|
Python
| 0.001611
|
@@ -682,91 +682,8 @@
e()%0A
-%0A def sanitize(string):%0A return string.title().replace(' ','-')%0A%0A
@@ -779,24 +779,25 @@
tems())%0A
+%0A
for modu
@@ -792,195 +792,52 @@
-for module, module_values in metrics.items():%0A for key, value in module_values.items():%0A response%5B'X-Mixcloud-%25s-%25s' %25 (sanitize(module), sanitize(key))%5D = value
+self.add_response_headers(response, metrics)
%0A%0A
@@ -1296,284 +1296,390 @@
-def replace_placeholder(match):%0A module = match.group('module')%0A metric = match.group('metric')%0A return unicode(metrics%5Bmodule%5D%5Bmetric%5D)%0A content = METRIC_PLACEHOLDER_RE.sub(replace_placeholder, content)%0A
+content = self.replace_templatetag_placeholders(content, metrics)%0A%0A # Note: The URLs returned here do not exist at this point. The relevant data is added to the cache by a signal handler%0A # once all page processing is finally done. This means it is possible summary values displayed and the detailed%0A # break down won't quite correspond.
%0A
@@ -2264,33 +2264,32 @@
rt_str(content)%0A
-%0A
@@ -2421,12 +2421,1033 @@
n response%0A%0A
+ def add_response_headers(self, response, metrics):%0A %22%22%22%0A Adds all summary metrics to the response headers, so they can be stored in nginx logs if desired.%0A %22%22%22%0A def sanitize(string):%0A return string.title().replace(' ','-')%0A%0A for module, module_values in metrics.items():%0A for key, value in module_values.items():%0A response%5B'X-Mixcloud-%25s-%25s' %25 (sanitize(module), sanitize(key))%5D = value%0A%0A def replace_templatetag_placeholders(self, content, metrics):%0A %22%22%22%0A The templatetags defined in this module add placeholder values which we replace with true values here. They%0A cannot just insert the values directly as not all processing may have happened by that point.%0A %22%22%22%0A def replace_placeholder(match):%0A module = match.group('module')%0A metric = match.group('metric')%0A return unicode(metrics%5Bmodule%5D%5Bmetric%5D)%0A return METRIC_PLACEHOLDER_RE.sub(replace_placeholder, content)%0A
|
231358916869a2986696a7bbcf08530404d655e7
|
Prepend account IDs to generate user configs
|
euca2ools/commands/iam/createaccesskey.py
|
euca2ools/commands/iam/createaccesskey.py
|
# Copyright 2009-2015 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from requestbuilder import Arg
import six
from euca2ools.commands.iam import IAMRequest, AS_ACCOUNT, arg_user
from euca2ools.commands.iam.getuser import GetUser
import euca2ools.exceptions
import euca2ools.util
class CreateAccessKey(IAMRequest):
DESCRIPTION = 'Create a new access key for a user'
ARGS = [arg_user(help='''user the new key will belong to
(default: current user)'''),
Arg('-w', '--write-config', action='store_true', route_to=None,
help='''output access keys and region information in the
form of a euca2ools.ini(5) configuration file instead of
by themselves'''),
Arg('-d', '--domain', route_to=None, help='''the DNS domain
to use for region information in configuration file
output (default: based on IAM URL)'''),
AS_ACCOUNT]
def postprocess(self, result):
if self.args.get('write_config'):
parsed = six.moves.urllib.parse.urlparse(self.service.endpoint)
if not self.args.get('domain'):
dnsname = parsed.netloc.split(':')[0]
if all(label.isdigit() for label in dnsname.split('.')):
msg = ('warning: IAM URL {0} refers to a specific IP; '
'for a complete configuration file supply '
'the region\'s DNS domain with -d/--domain'
.format(self.service.endpoint))
print >> sys.stderr, msg
else:
self.args['domain'] = parsed.netloc.split('.', 1)[1]
configfile = six.moves.configparser.SafeConfigParser()
if self.args.get('domain'):
if ':' not in self.args['domain'] and ':' in parsed.netloc:
# Add the port
self.args['domain'] += ':' + parsed.netloc.split(':')[1]
# This uses self.config.region instead of
# self.service.region_name because the latter is a global
# service in AWS and thus frequently deferred with "use"
# statements. That may eventually happen in eucalyptus
# cloud federations as well.
#
# At some point an option that lets one choose a region
# name at the command line may be useful, but until
# someone asks for it let's not clutter it up for now.
region_name = self.config.region or self.args['domain']
section = 'region {0}'.format(region_name.split(':')[0])
configfile.add_section(section)
for service in sorted(euca2ools.util.generate_service_names()):
url = '{scheme}://{service}.{domain}/'.format(
scheme=parsed.scheme, domain=self.args['domain'],
service=service)
configfile.set(section, '{0}-url'.format(service), url)
section = 'user {0}'.format(result['AccessKey'].get('UserName')
or 'root')
configfile.add_section(section)
configfile.set(section, 'key-id',
result['AccessKey']['AccessKeyId'])
configfile.set(section, 'secret-key',
result['AccessKey']['SecretAccessKey'])
account_id = self.get_user_account_id()
if account_id:
configfile.set(section, 'account-id', account_id)
result['configfile'] = configfile
def print_result(self, result):
if self.args.get('write_config'):
result['configfile'].write(sys.stdout)
else:
print result['AccessKey']['AccessKeyId']
print result['AccessKey']['SecretAccessKey']
def get_user_account_id(self):
req = GetUser.from_other(
self, UserName=self.params['UserName'],
DelegateAccount=self.params.get('DelegateAccount'))
try:
response = req.main()
except euca2ools.exceptions.AWSError as err:
if err.status_code == 403:
msg = ('warning: unable to retrieve account ID ({0})'
.format(err.message))
print >> sys.stderr, msg
return None
raise
arn = response['User']['Arn']
return arn.split(':')[4]
|
Python
| 0
|
@@ -3912,32 +3912,39 @@
+region_
section = 'regio
@@ -4015,32 +4015,39 @@
ile.add_section(
+region_
section)%0A
@@ -4328,32 +4328,39 @@
configfile.set(
+region_
section, '%7B0%7D-ur
@@ -4382,14 +4382,50 @@
ce),
+%0A
url)%0A
+%0A
@@ -4436,36 +4436,20 @@
+u
se
-ction = 'user %7B0%7D'.format(
+r_name =
resu
@@ -4479,16 +4479,26 @@
erName')
+ or 'root'
%0A
@@ -4506,45 +4506,227 @@
- or 'root'
+account_id = self.get_user_account_id()%0A if account_id:%0A user_section = 'user %7B0%7D:%7B1%7D'.format(account_id, user_name)%0A else:%0A user_section = 'user %7B0%7D'.format(user_name
)%0A
@@ -4758,16 +4758,21 @@
section(
+user_
section)
@@ -4791,32 +4791,37 @@
configfile.set(
+user_
section, 'key-id
@@ -4905,32 +4905,37 @@
configfile.set(
+user_
section, 'secret
@@ -5012,60 +5012,8 @@
'%5D)%0A
- account_id = self.get_user_account_id()%0A
@@ -5066,16 +5066,21 @@
ile.set(
+user_
section,
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.