id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
1636185
|
print 50
cnt = 1
for i in xrange(50):
for j in xrange(50):
if i == j: print 0,
else:
print cnt,
cnt += 1
print
|
1636188
|
import unittest
from followthemoney.types import registry
numbers = registry.number
class NumberTest(unittest.TestCase):
def test_cast_num(self):
self.assertEqual(numbers.to_number("1,00,000"), 100000.0)
self.assertEqual(numbers.to_number(" -999.0"), -999.0)
self.assertEqual(numbers.to_number("- 1,00,000.234"), -100000.234)
self.assertEqual(numbers.to_number("99"), 99.0)
self.assertEqual(numbers.to_number("banana"), None)
|
1636289
|
import pytest
from pynamodb.pagination import RateLimiter
class MockTime():
def __init__(self):
self.current_time = 0.0
def sleep(self, amount):
self.current_time += amount
def time(self):
return self.current_time
def increment_time(self, amount):
self.current_time += amount
def test_rate_limiter_exceptions():
with pytest.raises(ValueError):
r = RateLimiter(0)
with pytest.raises(ValueError):
r = RateLimiter(-1)
with pytest.raises(ValueError):
r = RateLimiter(10)
r.rate_limit = 0
with pytest.raises(ValueError):
r = RateLimiter(10)
r.rate_limit = -1
def test_basic_rate_limiting():
mock_time = MockTime()
r = RateLimiter(0.1, mock_time)
# 100 operations
for i in range(0, 100):
r.acquire()
# Simulates an operation that takes 1 second
mock_time.increment_time(1)
r.consume(1)
# Since the first acquire doesn't take time, thus we should be expecting (100-1) * 10 seconds = 990 delay
# plus 1 for the last increment_time(1) operation
assert mock_time.time() == 991.0
def test_basic_rate_limiting_small_increment():
mock_time = MockTime()
r = RateLimiter(0.1, mock_time)
# 100 operations
for i in range(0, 100):
r.acquire()
# Simulates an operation that takes 2 second
mock_time.increment_time(2)
r.consume(1)
# Since the first acquire doesn't take time, thus we should be expecting (100-1) * 10 seconds = 990 delay
# plus 2 for the last increment_time(2) operation
assert mock_time.time() == 992.0
def test_basic_rate_limiting_large_increment():
mock_time = MockTime()
r = RateLimiter(0.1, mock_time)
# 100 operations
for i in range(0, 100):
r.acquire()
# Simulates an operation that takes 2 second
mock_time.increment_time(11)
r.consume(1)
# The operation takes longer than the minimum wait, so rate limiting should have no effect
assert mock_time.time() == 1100.0
|
1636302
|
import tensorflow as tf
def last_relevant_output(output, sequence_length):
"""
Given the outputs of a LSTM, get the last relevant output that
is not padding. We assume that the last 2 dimensions of the input
represent (sequence_length, hidden_size).
Parameters
----------
output: Tensor
A tensor, generally the output of a tensorflow RNN.
The tensor index sequence_lengths+1 is selected for each
instance in the output.
sequence_length: Tensor
A tensor of dimension (batch_size, ) indicating the length
of the sequences before padding was applied.
Returns
-------
last_relevant_output: Tensor
The last relevant output (last element of the sequence), as retrieved
by the output Tensor and indicated by the sequence_length Tensor.
"""
with tf.name_scope("last_relevant_output"):
batch_size = tf.shape(output)[0]
max_length = tf.shape(output)[-2]
out_size = int(output.get_shape()[-1])
index = tf.range(0, batch_size) * max_length + (sequence_length - 1)
flat = tf.reshape(output, [-1, out_size])
relevant = tf.gather(flat, index)
return relevant
|
1636309
|
src = Split('''
system_stm32f4xx.c
STM32F4xx_StdPeriph_Driver/src/misc.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_adc.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_can.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_crc.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_dac.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_dbgmcu.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_dma.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_exti.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_flash.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_gpio.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_rng.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_i2c.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_iwdg.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_pwr.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_rcc.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_rtc.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_sdio.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_spi.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_syscfg.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_tim.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_usart.c
STM32F4xx_StdPeriph_Driver/src/stm32f4xx_wwdg.c
''')
component = aos_component('STM32F4xx_Peripheral_Libraries', src)
if aos_global_config.get('HOST_MCU_VARIANT') not in ['STM32F411', 'STM32F401']:
component.add_sources('STM32F4xx_StdPeriph_Driver/src/stm32f4xx_fsmc.c')
if aos_global_config.get('HOST_MCU_VARIANT') == 'STM32F412':
component.add_sources('STM32F4xx_StdPeriph_Driver/src/stm32f4xx_qspi.c')
component.add_global_includes('STM32F4xx_StdPeriph_Driver/inc', '../../../' +aos_global_config.arch +'/CMSIS')
|
1636330
|
from .Grammars import reference_patterns
from .dictionary import wordlist_english
import logging
import re
REFERENCE_PREFIX = "REF_"
strip_table = str.maketrans("", "", "(){}<>[]")
strip_table = str.maketrans("", "", "(){}<>[].,!;?")
class separate_reference:
"""
Detects if a reference number has been mistakenly concatenated to words in
a document. This module will remove reference numbers, with the option
to include them as a token representing the reference number. If these
reference numbers wrap around a period at the end of the sentence, the
module will identify this and properly split the sentences.
Example:
input: 'How is the treatment going.4-5 Pretty well'
output: 'How is the treatment going . Pretty well'
"""
def __init__(self, reference_token=False, f_wordlist=None):
"""
Initialize the parser
Args:
reference_token: boolean, flag to decide to tokenize removed
reference content
"""
self.logger = logging.getLogger(__name__)
if f_wordlist is None:
f_wordlist = wordlist_english
self.english_words = set()
with open(f_wordlist) as FIN:
for line in FIN:
self.english_words.add(line.strip())
self.reference_token = reference_token
self.reference_pattern = reference_patterns()
def __call__(self, text):
"""
call the parser
Args:
text: a document string
Returns:
return_doc: a document string
"""
new_doc = []
tokens = text.strip().split()
new_sentence = []
for token in tokens:
# Check if word is of the form word4.
new_tokens = self.single_number_pattern(token)
if new_tokens:
new_sentence.extend(new_tokens)
continue
# Check if the word is of the form word(4)
new_tokens = self.identify_reference_punctuation_pattern(
token, self.reference_pattern.single_number_parens, parens=True
)
if new_tokens:
new_sentence.extend(new_tokens)
continue
# Check if the word is of the form word,2,3,4
new_tokens = self.identify_reference_punctuation_pattern(
token, self.reference_pattern.punctuation_then_number, forward=3
)
if new_tokens:
new_sentence.extend(new_tokens)
continue
# Check if the word is of the form word2,3,4
new_tokens = self.identify_reference_punctuation_pattern(
token, self.reference_pattern.number_then_punctuation
)
if new_tokens:
new_sentence.extend(new_tokens)
continue
# if no reference detected, append word to the new sentence
new_sentence.append(token)
join_sentence = " ".join(new_sentence)
new_doc.append(join_sentence)
return_doc = " ".join(new_doc)
return return_doc
# This is an ambiguous case, because there could be words that end in a
# number that don't represent a footnote, as is the case for chemicals.
# The code looks up the characters that make up a word, and if they are
# not found in the dictionary, it is assumed it is a chemical name and
# the number is not pruned.
def single_number_pattern(self, token):
"""
Detect the most basic case where a single number is concatenated to the
word token
Args:
token: a string token
Returns:
output: a list of string tokens
"""
output = []
try:
parse_return = self.reference_pattern.single_number.parseString(
token
)
except BaseException:
return False
if parse_return[0] not in self.english_words:
output.append(token)
else:
word = parse_return[0]
reference = parse_return[1]
output.append(word)
if self.reference_token:
output.append(REFERENCE_PREFIX + reference)
if self.end_parens_match(parse_return):
output[-1] = output[-1] + parse_return[-1]
return output
def identify_reference_punctuation_pattern(
self, token, pattern, parens=False, forward=2
):
"""
Identify whether the pyparsing pattern passed to the function is found
in the token.
Args:
token: a string token
pattern: a pyparsing grammar pattern
parens: a boolean to flag whether the function should expect to
recognize a reference in parenthesis
forward: Number of characters to skip forward
Return:
Output: a list of string tokens
"""
output = []
parse_return = pattern.searchString(token)
if parse_return:
substring = "".join(parse_return[0][forward:])
index = token.find(substring)
word = token[:index]
if self.end_parens_match(parse_return[0], parens=True):
end_offset = len(parse_return[0][-1]) * -1
reference = token[len(word) : end_offset]
else:
reference = token[len(word) :]
output.append(word)
if self.reference_token:
ref_token = (REFERENCE_PREFIX + reference).translate(
strip_table
)
output.append(ref_token)
# Handle nested parens
if (
len(substring) > 2
and substring[-2] in "])}"
and substring[-3] in "])}"
):
output[-1] += substring[-2]
# Reference tokens have stripped too much
if self.reference_token:
if substring[-1] in ".,?!;:":
output[-1] += substring[-1]
# Replace any stripped punctuation
if substring[0] in ".,?!;:":
output[-1] += substring[0]
if self.end_parens_match(parse_return[0], parens=parens):
output[-1] = output[-1] + parse_return[0][-1]
else:
output = False
if output and reference[-1] in ".,?!;:" and not self.reference_token:
output[-1] += reference[-1]
return output
def end_parens_match(
self, strlist, search=re.compile(r"[^)}\]]").search, parens=False
):
"""
Check if the token ends in parenthesis, and thus needs to avoid
removing them as part of the reference.
Args:
strlist: a list of string subtokens, parsed from the given grammar
search: pattern to recognize. Defaults to ending parenthesis
parens: boolean to flag whether the passed grammar is for
references with parenthesis in them.
Return:
a boolean
"""
# special case when parsing with the parenthetical content grammar.
# We check to see if a token ends with a parenthesis to see if we need
# to append one to the cleaned token. However, we do not want to do
# this if the token ends with a parenthesis that holds a nested
# reference
if parens:
LP_Paran = sum(1 for a in strlist if a == "(")
RP_Paran = sum(1 for a in strlist if a == ")")
LP_Bracket = sum(1 for a in strlist if a == "[")
RP_Bracket = sum(1 for a in strlist if a == "]")
LP_Curl = sum(1 for a in strlist if a == "{")
RP_Curl = sum(1 for a in strlist if a == "}")
# If the count of the left paren doesn't match the right, then
# ignore all parenthesis
FLAG_valid = (
(LP_Paran == RP_Paran)
and (LP_Bracket == RP_Bracket)
and (LP_Curl == RP_Curl)
)
if FLAG_valid:
return False
return isinstance(strlist[-1], str) and not bool(search(strlist[-1]))
|
1636358
|
import riemann
import unittest
from riemann import tx
from riemann import utils
from riemann.tests import helpers
class TestOutpoint(unittest.TestCase):
def setUp(self):
pass
def test_create_outpoint(self):
outpoint_index = helpers.P2PKH1['ser']['ins'][0]['index']
outpoint_tx_id = helpers.P2PKH1['ser']['ins'][0]['hash']
outpoint = tx.Outpoint(outpoint_tx_id, outpoint_index)
self.assertEqual(outpoint.tx_id, outpoint_tx_id)
self.assertEqual(outpoint.index, outpoint_index)
self.assertEqual(outpoint, outpoint_tx_id + outpoint_index)
def test_create_outpoint_short_tx_id(self):
outpoint_index = helpers.P2PKH1['ser']['ins'][0]['index']
outpoint_tx_id = bytearray(b'\xff')
with self.assertRaises(ValueError) as context:
tx.Outpoint(outpoint_tx_id, outpoint_index)
self.assertIn('Expected byte-like object with length 32. ',
str(context.exception))
def test_create_outpoint_str_tx_id(self):
outpoint_index = helpers.P2PKH1['ser']['ins'][0]['index']
outpoint_tx_id = 'Hello world'
with self.assertRaises(ValueError) as context:
tx.Outpoint(outpoint_tx_id, outpoint_index)
self.assertIn('Expected byte-like object. ',
str(context.exception))
def test_create_outpoint_long_index(self):
outpoint_index = utils.i2le_padded(0, 5)
outpoint_tx_id = helpers.P2PKH1['ser']['ins'][0]['hash']
with self.assertRaises(ValueError) as context:
tx.Outpoint(outpoint_tx_id, outpoint_index)
self.assertIn('Expected byte-like object with length 4. ',
str(context.exception))
def test_create_outpoint_no_index(self):
outpoint_index = None
outpoint_tx_id = helpers.P2PKH1['ser']['ins'][0]['hash']
with self.assertRaises(ValueError) as context:
tx.Outpoint(outpoint_tx_id, outpoint_index)
self.assertIn('Expected byte-like object. ',
str(context.exception))
def test_copy(self):
outpoint_index = helpers.P2PKH1['ser']['ins'][0]['index']
outpoint_tx_id = helpers.P2PKH1['ser']['ins'][0]['hash']
res = tx.Outpoint(outpoint_tx_id, outpoint_index)
copy = res.copy()
self.assertEqual(res, copy)
self.assertIsNot(res, copy)
def test_from_bytes(self):
outpoint = tx.Outpoint.from_bytes(
helpers.P2PKH1['ser']['ins'][0]['outpoint'])
self.assertEqual(outpoint, helpers.P2PKH1['ser']['ins'][0]['outpoint'])
self.assertEqual(
outpoint.tx_id,
helpers.P2PKH1['ser']['ins'][0]['hash'])
self.assertEqual(
outpoint.index,
helpers.P2PKH1['ser']['ins'][0]['index'])
class TestTxIn(unittest.TestCase):
def setUp(self):
outpoint_index = helpers.P2PKH1['ser']['ins'][0]['index']
outpoint_tx_id = helpers.P2PKH1['ser']['ins'][0]['hash']
self.stack_script = helpers.P2PKH1['ser']['ins'][0]['stack_script']
self.redeem_script = helpers.P2PKH1['ser']['ins'][0]['redeem_script']
self.sequence = helpers.P2PKH1['ser']['ins'][0]['sequence']
self.outpoint = tx.Outpoint(outpoint_tx_id, outpoint_index)
def test_create_input(self):
tx_in = tx.TxIn(self.outpoint, self.stack_script,
self.redeem_script, self.sequence)
self.assertEqual(tx_in.outpoint, self.outpoint)
self.assertEqual(tx_in.stack_script, self.stack_script)
self.assertEqual(tx_in.redeem_script, self.redeem_script)
self.assertEqual(tx_in.sequence, self.sequence)
self.assertEqual(tx_in, helpers.P2PKH1['ser']['tx']['in'])
def test_copy(self):
tx_in = tx.TxIn(self.outpoint, self.stack_script,
self.redeem_script, self.sequence)
tx_in_copy = tx_in.copy()
self.assertEqual(tx_in, tx_in_copy) # They should be equal
self.assertIsNot(tx_in, tx_in_copy) # But not the same object
def test_long_script_sig(self):
with self.assertRaises(ValueError) as context:
tx.TxIn(self.outpoint, b'\x00' * 1000,
b'\x00' * 1000, self.sequence)
self.assertIn(
'Input script_sig is too long. Expected <= 1650 bytes. '
'Got 2000 bytes.',
str(context.exception))
def test_from_bytes_pkh(self):
tx_in = tx.TxIn.from_bytes(helpers.P2PKH1['ser']['tx']['in'])
self.assertEqual(tx_in, helpers.P2PKH1['ser']['tx']['in'])
self.assertEqual(
tx_in.outpoint,
helpers.P2PKH1['ser']['ins'][0]['outpoint'])
self.assertEqual(
tx_in.sequence,
helpers.P2PKH1['ser']['ins'][0]['sequence'])
self.assertEqual(
tx_in.stack_script,
helpers.P2PKH1['ser']['ins'][0]['stack_script'])
self.assertEqual(
tx_in.redeem_script,
helpers.P2PKH1['ser']['ins'][0]['redeem_script'])
def test_from_bytes_sh(self):
tx_in = tx.TxIn.from_bytes(helpers.P2SH['ser']['ins'][0]['input'])
self.assertEqual(tx_in, helpers.P2SH['ser']['ins'][0]['input'])
self.assertEqual(
tx_in.outpoint,
helpers.P2SH['ser']['ins'][0]['outpoint'])
self.assertEqual(
tx_in.sequence,
helpers.P2SH['ser']['ins'][0]['sequence'])
self.assertEqual(
tx_in.stack_script,
helpers.P2SH['ser']['ins'][0]['stack_script'])
self.assertEqual(
tx_in.redeem_script,
helpers.P2SH['ser']['ins'][0]['redeem_script'])
def test_from_bytes_wsh(self):
tx_in = tx.TxIn.from_bytes(helpers.P2WSH['ser']['ins'][0]['input'])
self.assertEqual(tx_in, helpers.P2WSH['ser']['ins'][0]['input'])
self.assertEqual(
tx_in.outpoint,
helpers.P2WSH['ser']['ins'][0]['outpoint'])
self.assertEqual(
tx_in.sequence,
utils.i2be(helpers.P2WSH['human']['ins'][0]['sequence']))
self.assertEqual(tx_in.stack_script, b'')
self.assertEqual(tx_in.redeem_script, b'')
def test_from_bytes_coinbase(self):
# test cases for patched error in txin deserialization
for tx_in_hex in helpers.COINBASE_REGRESSION:
print(tx_in_hex)
tx_in = tx.TxIn.from_hex(tx_in_hex)
self.assertEqual(tx_in.hex(), tx_in_hex)
class TestTxOut(unittest.TestCase):
def setUp(self):
self.value = helpers.P2PKH1['ser']['outs'][0]['value']
self.output_script = helpers.P2PKH1['ser']['outs'][0]['pk_script']
def test_create_output(self):
tx_out = tx.TxOut(self.value, self.output_script)
self.assertEqual(tx_out, helpers.P2PKH1['ser']['outs'][0]['out'])
def test_copy(self):
tx_out = tx.TxOut(self.value, self.output_script)
tx_out_copy = tx_out.copy()
self.assertEqual(tx_out, tx_out_copy) # They should be equal
self.assertIsNot(tx_out, tx_out_copy) # But not the same object
def test_from_bytes(self):
output = helpers.P2PKH1['ser']['outs'][0]['value'] + \
b'\x19' + helpers.P2PKH1['ser']['outs'][0]['pk_script']
tx_out = tx.TxOut.from_bytes(output)
self.assertEqual(
tx_out.value,
helpers.P2PKH1['ser']['outs'][0]['value'])
self.assertEqual(
tx_out.output_script,
helpers.P2PKH1['ser']['outs'][0]['pk_script'])
def test_from_bytes_long(self):
with self.assertRaises(NotImplementedError) as context:
tx.TxOut.from_bytes(b'\xff' * 10)
self.assertIn(
'No support for abnormally long pk_scripts.',
str(context.exception))
class TestWitnessStackItem(unittest.TestCase):
def setUp(self):
self.stack_item_bytes = \
helpers.P2WSH['ser']['witnesses'][0]['wit_stack_items'][1]
def test_create_stack_item(self):
w = tx.WitnessStackItem(self.stack_item_bytes)
self.assertEqual(w.item, self.stack_item_bytes)
self.assertEqual(
w,
bytes([len(self.stack_item_bytes)]) + self.stack_item_bytes)
def test_from_bytes(self):
w = tx.WitnessStackItem.from_bytes(
bytes([len(self.stack_item_bytes)]) + self.stack_item_bytes)
self.assertEqual(w.item, self.stack_item_bytes)
self.assertEqual(
w,
bytes([len(self.stack_item_bytes)]) + self.stack_item_bytes)
# def test_item_too_long(self):
# with self.assertRaises(ValueError) as context:
# tx.WitnessStackItem(b'\xff' * 521)
# self.assertIn(
# 'Item is too large. Expected <=520 bytes. ',
# str(context.exception))
def test_null_item_from_bytes(self):
w = tx.WitnessStackItem.from_bytes(b'\x00')
self.assertEqual(w, b'\x00')
class TestInputWitness(unittest.TestCase):
def setUp(self):
self.stack = [tx.WitnessStackItem(b)
for b in
helpers.P2WSH['ser']['witnesses'][0]['wit_stack_items']]
def test_create_witness(self):
iw = tx.InputWitness(self.stack)
self.assertEqual(len(iw.stack), len(self.stack))
for item, expected in zip(iw.stack, self.stack):
self.assertEqual(item, expected)
bad_stack = [None, 1]
with self.assertRaises(ValueError) as context:
tx.InputWitness(bad_stack)
self.assertIn('Invalid witness stack item. '
'Expected WitnessStackItem. Got None',
str(context.exception))
def test_from_bytes(self):
iw = tx.InputWitness.from_bytes(helpers.P2WSH['ser']['tx']['witness'])
self.assertEqual(len(iw.stack), len(self.stack))
for item, expected in zip([s.item for s in iw.stack],
[s.item for s in self.stack]):
self.assertEqual(item, expected)
class TestTx(unittest.TestCase):
def setUp(self):
self.outpoint_index = helpers.P2PKH1['ser']['ins'][0]['index']
self.outpoint_tx_id = helpers.P2PKH1['ser']['ins'][0]['hash']
self.stack_script = helpers.P2PKH1['ser']['ins'][0]['stack_script']
self.redeem_script = helpers.P2PKH1['ser']['ins'][0]['redeem_script']
self.sequence = helpers.P2PKH1['ser']['ins'][0]['sequence']
self.outpoint = tx.Outpoint(self.outpoint_tx_id, self.outpoint_index)
self.tx_in = tx.TxIn(self.outpoint, self.stack_script,
self.redeem_script, self.sequence)
self.value_0 = helpers.P2PKH1['ser']['outs'][0]['value']
self.output_script_0 = \
helpers.P2PKH1['ser']['outs'][0]['pk_script']
self.value_1 = helpers.P2PKH1['ser']['outs'][1]['value']
self.output_script_1 = \
helpers.P2PKH1['ser']['outs'][1]['pk_script']
self.tx_out_0 = tx.TxOut(self.value_0, self.output_script_0)
self.tx_out_1 = tx.TxOut(self.value_1, self.output_script_1)
self.version = helpers.P2PKH1['ser']['version']
self.none_flag = None
self.tx_ins = [self.tx_in]
self.tx_outs = [self.tx_out_0, self.tx_out_1]
self.none_witnesses = None
self.lock_time = helpers.P2PKH1['ser']['locktime']
self.segwit_flag = b'\x00\x01'
self.stack = [tx.WitnessStackItem(b)
for b in
helpers.P2WSH['ser']['witnesses'][0]['wit_stack_items']]
self.tx_witnesses = [tx.InputWitness(self.stack)]
def tearDown(self):
riemann.select_network('bitcoin_main')
# Convenience monotest
# Sorta broken.
def test_everything_witness(self):
version = bytearray([0] * 4)
flag = b'\x00\x01'
outpoint_index = utils.i2le_padded(0, 4)
outpoint_tx_id = bytearray(bytearray.fromhex(
'10399b3f20cbdd4b5ac3f823afdba28b'
'9f70e21437a59b312a1b62c42c5cd101'))[::-1]
outpoint = tx.Outpoint(outpoint_tx_id, outpoint_index)
sequence = utils.i2le_padded(0, 4)
script = bytearray(bytearray.fromhex('473044022000e02ea97289a35181a9bfabd324f12439410db11c4e94978cdade6a665bf1840220458b87c34d8bb5e4d70d01041c7c2d714ea8bfaca2c2d2b1f9e5749c3ee17e3d012102ed0851f0b4c4458f80e0310e57d20e12a84642b8e097fe82be229edbd7dbd53920f6665740b1f950eb58d646b1fae9be28cef842da5e51dc78459ad2b092e7fd6e514c5163a914bb408296de2420403aa79eb61426bb588a08691f8876a91431b31321831520e346b069feebe6e9cf3dd7239c670400925e5ab17576a9140d22433293fe9652ea00d21c5061697aef5ddb296888ac')) # noqa: E501
tx_in = tx.TxIn(outpoint, script, bytearray(), sequence)
tx_ins = [tx_in]
tx_outs = [
tx.TxOut(
value=bytearray(utils.i2le_padded(2000, 8)),
output_script=bytearray(bytearray.fromhex('76a914f2539f42058da784a9d54615ad074436cf3eb85188ac'))) # noqa: E501
]
none_witnesses = [
tx.InputWitness(
[
tx.WitnessStackItem(bytearray([0x88] * 18)),
tx.WitnessStackItem(bytearray([0x99] * 18))
]
)
]
lock_time = bytearray([0xff] * 4)
tx.Tx(version, flag, tx_ins, tx_outs, none_witnesses, lock_time)
# TODO: needs assertions
# Convenience monotest
def test_everything(self):
version = utils.i2le_padded(1, 4)
outpoint_index = utils.i2le_padded(0, 4)
outpoint_tx_id = bytearray(bytearray.fromhex(
'10399b3f20cbdd4b5ac3f823afdba28b'
'9f70e21437a59b312a1b62c42c5cd101'))[::-1]
outpoint = tx.Outpoint(outpoint_tx_id, outpoint_index)
sequence = utils.i2le_padded(0, 4)
script = bytearray(bytearray.fromhex('473044022000e02ea97289a35181a9bfabd324f12439410db11c4e94978cdade6a665bf1840220458b87c34d8bb5e4d70d01041c7c2d714ea8bfaca2c2d2b1f9e5749c3ee17e3d012102ed0851f0b4c4458f80e0310e57d20e12a84642b8e097fe82be229edbd7dbd53920f6665740b1f950eb58d646b1fae9be28cef842da5e51dc78459ad2b092e7fd6e514c5163a914bb408296de2420403aa79eb61426bb588a08691f8876a91431b31321831520e346b069feebe6e9cf3dd7239c670400925e5ab17576a9140d22433293fe9652ea00d21c5061697aef5ddb296888ac')) # noqa: E501
tx_in = tx.TxIn(outpoint, script, bytearray(), sequence)
tx_ins = [tx_in]
tx_outs = [
tx.TxOut(
value=bytearray(utils.i2le_padded(2000, 8)),
output_script=bytearray(bytearray.fromhex('76a914f2539f42058da784a9d54615ad074436cf3eb85188ac'))) # noqa: E501
]
lock_time = utils.i2le_padded(0, 4)
res = tx.Tx(version, None, tx_ins, tx_outs, None, lock_time)
self.assertEqual(res, helpers.RAW_P2SH_TO_P2PKH)
# TODO: Break up this monstrosity (further)
def test_tx_witness(self):
t = tx.Tx(self.version, self.none_flag, self.tx_ins, self.tx_outs,
self.none_witnesses, self.lock_time)
self.assertEqual(t, helpers.P2PKH1['ser']['tx']['signed'])
with self.assertRaises(ValueError) as context:
tx.Tx(self.version, b'\x00\x00', self.tx_ins, self.tx_outs,
self.none_witnesses, self.lock_time)
self.assertIn(
'Invald segwit flag. Expected None or ',
str(context.exception))
with self.assertRaises(ValueError) as context:
tx.Tx(self.version, None, self.tx_ins, self.tx_outs,
self.tx_witnesses, self.lock_time)
self.assertIn(
'Got witnesses but no segwit flag.',
str(context.exception))
with self.assertRaises(ValueError) as context:
stack = self.stack + [self.stack[0]]
witness = tx.InputWitness(stack)
tx.Tx(self.version, self.segwit_flag, self.tx_ins, self.tx_outs,
witness, self.lock_time)
self.assertIn(
'Witness and TxIn lists must be same length. ',
str(context.exception))
with self.assertRaises(ValueError) as context:
tx.Tx(self.version, self.segwit_flag, self.tx_ins, self.tx_outs,
[1 for _ in self.tx_witnesses], self.lock_time)
self.assertIn(
'Invalid InputWitness. Expected instance of InputWitness.',
str(context.exception))
def test_tx_inandout(self):
t = tx.Tx(self.version, self.none_flag, self.tx_ins, self.tx_outs,
self.none_witnesses, self.lock_time)
self.assertEqual(t, helpers.P2PKH1['ser']['tx']['signed'])
with self.assertRaises(ValueError) as context:
tx_ins = []
tx.Tx(self.version, self.none_flag, tx_ins, self.tx_outs,
None, self.lock_time)
self.assertIn(
'Too few inputs or outputs. Stop that.',
str(context.exception))
with self.assertRaises(ValueError) as context:
tx_ins = [1]
tx.Tx(self.version, self.none_flag, tx_ins, self.tx_outs,
None, self.lock_time)
self.assertIn(
'Invalid TxIn. Expected instance of TxIn. Got int',
str(context.exception))
with self.assertRaises(ValueError) as context:
tx_outs = [1]
tx.Tx(self.version, self.none_flag, self.tx_ins, tx_outs,
None, self.lock_time)
self.assertIn(
'Invalid TxOut. Expected instance of TxOut. Got int',
str(context.exception))
def test_tx_inout_mutation(self):
t = tx.Tx(self.version, self.none_flag, self.tx_ins, self.tx_outs,
self.none_witnesses, self.lock_time)
with self.assertRaises(TypeError, msg='That\'s immutable, honey'):
t.tx_ins = t.tx_ins + (1,)
with self.assertRaises(TypeError, msg='That\'s immutable, honey'):
t.tx_outs = t.tx_outs + (1,)
def test_tx_id(self):
t = tx.Tx(self.version, self.none_flag, self.tx_ins, self.tx_outs,
self.none_witnesses, self.lock_time)
self.assertEqual(t.tx_id, helpers.P2PKH1['ser']['tx']['hash'])
self.assertEqual(t.tx_id_le, helpers.P2PKH1['ser']['tx']['hash_le'])
def test_from_hex_pkh(self):
t = tx.Tx.from_hex(helpers.P2PKH1['human']['tx']['signed'])
self.assertEqual(t.version, helpers.P2PKH1['ser']['version'])
self.assertEqual(t.tx_ins[0], helpers.P2PKH1['ser']['tx']['in'])
self.assertEqual(t.tx_outs[0], helpers.P2PKH1['ser']['outs'][0]['out'])
self.assertEqual(t.tx_outs[1], helpers.P2PKH1['ser']['outs'][1]['out'])
self.assertEqual(t.lock_time, helpers.P2PKH1['ser']['locktime'])
self.assertEqual(t, helpers.P2PKH1['ser']['tx']['signed'])
def test_from_hex_sh(self):
t = tx.Tx.from_hex(helpers.P2SH['human']['tx']['signed'])
self.assertEqual(t.version, helpers.P2SH['ser']['version'])
self.assertEqual(
t.tx_ins[0],
helpers.P2SH['ser']['ins'][0]['input'])
self.assertEqual(
t.tx_outs[0],
helpers.P2SH['ser']['outs'][0]['output'])
self.assertEqual(
t.tx_outs[1],
helpers.P2SH['ser']['outs'][1]['output'])
self.assertEqual(t.lock_time, helpers.P2SH['ser']['locktime'])
self.assertEqual(t, helpers.P2SH['ser']['tx']['signed'])
def test_from_hex_wsh(self):
t = tx.Tx.from_hex(helpers.P2WSH['human']['tx']['signed'])
self.assertEqual(t.version, helpers.P2WSH['ser']['version'])
self.assertEqual(t.tx_ins[0], helpers.P2WSH['ser']['ins'][0]['input'])
self.assertEqual(
t.tx_outs[0],
helpers.P2WSH['ser']['outs'][0]['output'])
self.assertEqual(
t.tx_outs[1],
helpers.P2WSH['ser']['outs'][1]['output'])
self.assertEqual(
t.tx_outs[2],
helpers.P2WSH['ser']['outs'][2]['output'])
self.assertEqual(
t.tx_outs[3],
helpers.P2WSH['ser']['outs'][3]['output'])
self.assertEqual(
t.tx_witnesses[0],
helpers.P2WSH['ser']['tx']['witness'])
self.assertEqual(t.lock_time, helpers.P2WSH['ser']['locktime'])
self.assertEqual(t, helpers.P2WSH['ser']['tx']['signed'])
def test_from_bytes_pkh(self):
t = tx.Tx.from_bytes(helpers.P2PKH1['ser']['tx']['signed'])
self.assertEqual(t.version, helpers.P2PKH1['ser']['version'])
self.assertEqual(t.tx_ins[0], helpers.P2PKH1['ser']['tx']['in'])
self.assertEqual(t.tx_outs[0], helpers.P2PKH1['ser']['outs'][0]['out'])
self.assertEqual(t.tx_outs[1], helpers.P2PKH1['ser']['outs'][1]['out'])
self.assertEqual(t.lock_time, helpers.P2PKH1['ser']['locktime'])
self.assertEqual(t, helpers.P2PKH1['ser']['tx']['signed'])
def test_from_bytes_sh(self):
t = tx.Tx.from_bytes(helpers.P2SH['ser']['tx']['signed'])
self.assertEqual(t.version, helpers.P2SH['ser']['version'])
self.assertEqual(
t.tx_ins[0],
helpers.P2SH['ser']['ins'][0]['input'])
self.assertEqual(
t.tx_outs[0],
helpers.P2SH['ser']['outs'][0]['output'])
self.assertEqual(
t.tx_outs[1],
helpers.P2SH['ser']['outs'][1]['output'])
self.assertEqual(t.lock_time, helpers.P2SH['ser']['locktime'])
self.assertEqual(t, helpers.P2SH['ser']['tx']['signed'])
def test_from_bytes_wsh(self):
t = tx.Tx.from_bytes(helpers.P2WSH['ser']['tx']['signed'])
self.assertEqual(t.version, helpers.P2WSH['ser']['version'])
self.assertEqual(t.tx_ins[0], helpers.P2WSH['ser']['ins'][0]['input'])
self.assertEqual(
t.tx_outs[0],
helpers.P2WSH['ser']['outs'][0]['output'])
self.assertEqual(
t.tx_outs[1],
helpers.P2WSH['ser']['outs'][1]['output'])
self.assertEqual(
t.tx_outs[2],
helpers.P2WSH['ser']['outs'][2]['output'])
self.assertEqual(
t.tx_outs[3],
helpers.P2WSH['ser']['outs'][3]['output'])
self.assertEqual(
t.tx_witnesses[0],
helpers.P2WSH['ser']['tx']['witness'])
self.assertEqual(t.lock_time, helpers.P2WSH['ser']['locktime'])
self.assertEqual(t, helpers.P2WSH['ser']['tx']['signed'])
def test_calculate_fee(self):
t = tx.Tx(self.version, self.none_flag, self.tx_ins, self.tx_outs,
self.none_witnesses, self.lock_time)
self.assertEqual(t.calculate_fee([10 ** 8]), 57534406)
def test_sighash_none(self):
t = tx.Tx(self.version, self.none_flag, self.tx_ins, self.tx_outs,
self.none_witnesses, self.lock_time)
with self.assertRaises(NotImplementedError) as context:
t.sighash_none()
self.assertIn('SIGHASH_NONE is a bad idea.', str(context.exception))
def test_copy(self):
t = tx.Tx(self.version, self.none_flag, self.tx_ins, self.tx_outs,
self.none_witnesses, self.lock_time)
t_copy = t.copy()
self.assertEqual(t, t_copy)
self.assertIsNot(t, t_copy)
def test_is_witness(self):
t = tx.Tx(self.version, self.none_flag, self.tx_ins, self.tx_outs,
self.none_witnesses, self.lock_time)
self.assertFalse(t.is_witness())
t = tx.Tx.from_bytes(helpers.P2WSH['ser']['tx']['signed'])
self.assertTrue(t.is_witness())
def test_segwit_sighash_all(self):
t = tx.Tx.from_bytes(helpers.P2WPKH['ser']['tx']['signed'])
self.assertEqual(
t.sighash_all(
0,
helpers.P2WPKH['ser']['ins'][0]['pk_script'],
prevout_value=helpers.P2WPKH['ser']['ins'][0]['value']
),
helpers.P2WPKH['ser']['segwit_sighash']['all'])
def test_segwit_sighash_all_anyonecanpay(self):
t = tx.Tx.from_bytes(helpers.P2WPKH['ser']['tx']['signed'])
self.assertEqual(
t.sighash_all(
0,
helpers.P2WPKH['ser']['ins'][0]['pk_script'],
prevout_value=helpers.P2WPKH['ser']['ins'][0]['value'],
anyone_can_pay=True),
helpers.P2WPKH['ser']['segwit_sighash']['all_anyonecanpay'])
def test_segwit_sighash_single(self):
t = tx.Tx.from_bytes(helpers.P2WPKH['ser']['tx']['signed'])
self.assertEqual(
t.sighash_single(
0,
helpers.P2WPKH['ser']['ins'][0]['pk_script'],
prevout_value=helpers.P2WPKH['ser']['ins'][0]['value']),
helpers.P2WPKH['ser']['segwit_sighash']['single'])
def test_segwit_sighash_single_anyonecanpay(self):
t = tx.Tx.from_bytes(helpers.P2WPKH['ser']['tx']['signed'])
self.assertEqual(
t.sighash_single(
0,
helpers.P2WPKH['ser']['ins'][0]['pk_script'],
prevout_value=helpers.P2WPKH['ser']['ins'][0]['value'],
anyone_can_pay=True),
helpers.P2WPKH['ser']['segwit_sighash']['single_anyonecanpay'])
def test_presegwit_sighashes(self):
''' all, all anyonecanpay, single, single_anyonecanpay.
Marks transaction as pre- or non-segwit in a segwit network.
'''
t = tx.Tx(self.version, None, self.tx_ins, self.tx_outs,
self.none_witnesses, self.lock_time)
self.assertEqual(
t.sighash_all(
0,
helpers.P2PKH1['ser']['ins'][0]['pk_script'],
),
helpers.P2PKH1['ser']['sighash']['all'])
self.assertEqual(
t.sighash_all(
0,
helpers.P2PKH1['ser']['ins'][0]['pk_script'],
anyone_can_pay=True),
helpers.P2PKH1['ser']['sighash']['all_anyonecanpay'])
self.assertEqual(
t.sighash_single(
0,
helpers.P2PKH1['ser']['ins'][0]['pk_script']),
helpers.P2PKH1['ser']['sighash']['single'])
self.assertEqual(
t.sighash_single(
0,
helpers.P2PKH1['ser']['ins'][0]['pk_script'],
anyone_can_pay=True),
helpers.P2PKH1['ser']['sighash']['single_anyonecanpay'])
def test_sighash_single_bug(self):
with self.assertRaises(NotImplementedError) as context:
t = tx.Tx(self.version, self.none_flag, self.tx_ins * 3,
self.tx_outs, self.none_witnesses, self.lock_time)
t.sighash_single(2, helpers.P2PKH1['ser']['ins'][0]['pk_script'])
self.assertIn(
'I refuse to implement the SIGHASH_SINGLE bug.',
str(context.exception))
def test_sighash_forkid_single(self):
riemann.select_network('bitcoin_cash_main')
t = tx.Tx(self.version, self.none_flag, self.tx_ins, self.tx_outs,
self.none_witnesses, self.lock_time)
sighash = t.sighash_single(
index=0,
script=helpers.P2PKH1['ser']['ins'][0]['pk_script'],
prevout_value=helpers.P2PKH1['ser']['ins'][0]['value'])
self.assertEqual(
sighash,
helpers.SIGHASH_FORKID['single'])
def test_sighash_forkid_single_anyone_can_pay(self):
riemann.select_network('bitcoin_cash_main')
t = tx.Tx(self.version, self.none_flag, self.tx_ins, self.tx_outs,
self.none_witnesses, self.lock_time)
sighash = t.sighash_single(
index=0,
script=helpers.P2PKH1['ser']['ins'][0]['pk_script'],
prevout_value=helpers.P2PKH1['ser']['ins'][0]['value'],
anyone_can_pay=True)
self.assertEqual(
sighash,
helpers.SIGHASH_FORKID['single_anyone_can_pay'])
def test_sighash_forkid_all(self):
riemann.select_network('bitcoin_cash_main')
t = tx.Tx(self.version, self.none_flag, self.tx_ins, self.tx_outs,
self.none_witnesses, self.lock_time)
sighash = t.sighash_all(
index=0,
script=helpers.P2PKH1['ser']['ins'][0]['pk_script'],
prevout_value=helpers.P2PKH1['ser']['ins'][0]['value'])
self.assertEqual(
sighash,
helpers.SIGHASH_FORKID['all'])
def test_sighash_forkid_all_anyone_can_pay(self):
riemann.select_network('bitcoin_cash_main')
t = tx.Tx(self.version, self.none_flag, self.tx_ins, self.tx_outs,
self.none_witnesses, self.lock_time)
sighash = t.sighash_all(
index=0,
script=helpers.P2PKH1['ser']['ins'][0]['pk_script'],
prevout_value=helpers.P2PKH1['ser']['ins'][0]['value'],
anyone_can_pay=True)
self.assertEqual(
sighash,
helpers.SIGHASH_FORKID['all_anyone_can_pay'])
|
1636374
|
import argparse
import json
import os
from collections import defaultdict
def get_datasets(args):
datasets = []
for _, _, dataset in args.input:
if dataset not in datasets:
datasets.append(dataset)
return datasets
def get_metrics(args):
metrics = []
for _, metric, _ in args.input:
if metric not in metrics:
metrics.append(metric)
return metrics
def main(args):
datasets = get_datasets(args)
metrics = get_metrics(args)
values = defaultdict(dict)
for file_path, metric, dataset in args.input:
correlations = json.load(open(file_path, 'r'))
values[metric][dataset] = correlations
dirname = os.path.dirname(args.output_file)
if dirname:
os.makedirs(dirname, exist_ok=True)
with open(args.output_file, 'w') as out:
out.write('<table>\n')
# Print the header with the dataset names
out.write('<tr>\n')
out.write('<th></th>\n')
for dataset in datasets:
out.write(f'<th colspan="3">{dataset}</th>\n')
out.write('</tr>\n')
# Print the header with the correlation coefficient names
out.write('<tr>\n')
out.write('<th></th>\n')
for dataset in datasets:
for coef in ['r', 'p', 'k']:
out.write(f'<th>{coef}</th>\n')
out.write('</tr>\n')
# Print each value in the table
for metric in metrics:
out.write('<tr>\n')
out.write(f'<td>{metric}</td>\n')
for dataset in datasets:
out.write(f'<td>{values[metric][dataset][args.correlation_level]["pearson"]["r"]:.2f}</td>\n')
out.write(f'<td>{values[metric][dataset][args.correlation_level]["spearman"]["rho"]:.2f}</td>\n')
out.write(f'<td>{values[metric][dataset][args.correlation_level]["kendall"]["tau"]:.2f}</td>\n')
out.write('</tr>\n')
out.write('</table>\n')
if __name__ == '__main__':
argp = argparse.ArgumentParser()
argp.add_argument('output_file')
argp.add_argument('correlation_level', choices=['summary_level', 'system_level', 'global'])
argp.add_argument('--input', action='append', nargs=3, metavar=('file_path', 'metric', 'dataset'))
args = argp.parse_args()
main(args)
|
1636414
|
from river import stream
from . import base
class SMTP(base.RemoteDataset):
"""SMTP dataset from the KDD 1999 cup.
The goal is to predict whether or not an SMTP connection is anomalous or not. The dataset only
contains 2,211 (0.4%) positive labels.
References
----------
[^1]: [SMTP (KDDCUP99) dataset](http://odds.cs.stonybrook.edu/smtp-kddcup99-dataset/)
"""
def __init__(self):
super().__init__(
n_samples=95_156,
n_features=3,
task=base.BINARY_CLF,
url="https://maxhalford.github.io/files/datasets/smtp.zip",
size=5_484_982,
filename="smtp.csv",
)
def _iter(self):
return stream.iter_csv(
self.path,
target="service",
converters={
"duration": float,
"src_bytes": float,
"dst_bytes": float,
"service": int,
},
)
|
1636438
|
import yaml
import six
script_out = """all:
children:
ungrouped:
hosts:
foobar:
should_be_artemis_here: !vault |
$ANSIBLE_VAULT;1.2;AES256;alan
30386264646430643536336230313232653130643332356531633437363837323430663031356364
3836313935643038306263613631396136663634613066650a303838613532313236663966343433
37636234366130393131616631663831383237653761373533363666303361333662373664336261
6136313463383061330a633835643434616562633238383530356632336664316366376139306135
3534"""
# --- the YAML docs ---
# class Monster(yaml.YAMLObject):
# yaml_tag = u'!vault'
# def __init__(self, node):
# print ' args kwargs ' + str(node)# + str(kwargs)
# self.node = node
#
# def __repr__(self):
# return str(self.node)
# second example
class Dice(tuple):
def __new__(cls, a, b):
return tuple.__new__(cls, [a, b])
def __repr__(self):
return "Dice(%s,%s)" % self
def dice_representer(dumper, data):
return dumper.represent_scalar(u'!dice', u'%sd%s' % data)
def dice_constructor(loader, node):
value = loader.construct_scalar(node)
a, b = map(int, value.split('d'))
return Dice(a, b)
yaml.add_representer(Dice, dice_representer)
yaml.add_constructor(u'!dice', dice_constructor)
print yaml.dump({'gold': Dice(10,6)})
print yaml.load("""initial hit points: !dice 8d4""")
class NaiveVault:
def __init__(self, data):
self.data = data
def __repr__(self):
return six.text_type(self.data)
print NaiveVault('hello world')
def vault_representer(dumper, data):
return dumper.represent_scalar(u'!vault', six.text_type(data))
def vault_constructor(loader, node):
value = loader.construct_scalar(node)
return NaiveVault(value)
yaml.add_representer(NaiveVault, vault_representer)
yaml.add_constructor(u'!vault', vault_constructor)
# --- the Ansible method ---
# from yaml.constructor import SafeConstructor
#
# class AnsibleConstructor(SafeConstructor):
#
# def construct_vault_encrypted_unicode(self, node):
# value = self.construct_scalar(node)
# return str(value)
#
# yaml.add_constructor(
# u'!vault',
# AnsibleConstructor.construct_vault_encrypted_unicode)
python_out = yaml.load(script_out)
print ' python output '
print python_out
print ' dumped output '
print yaml.dump(python_out, default_flow_style=False)
print ' original script out '
print script_out
print ' again, using safe_load '
yaml.SafeLoader.add_constructor(u'!vault', vault_constructor)
python_out = yaml.safe_load(script_out)
|
1636439
|
import platform
import numpy as np
import pytest
import qtpy
from napari.layers import Labels, Points
from qtpy.QtCore import QCoreApplication
from PartSeg._roi_analysis.image_view import ResultImageView
from PartSeg.common_backend.base_settings import BaseSettings
from PartSeg.common_gui.channel_control import ChannelProperty
from PartSeg.common_gui.napari_viewer_wrap import Viewer
from PartSegCore.project_info import AdditionalLayerDescription
from PartSegCore.roi_info import ROIInfo
from .utils import CI_BUILD
pyside_skip = pytest.mark.skipif(qtpy.API_NAME == "PySide2" and platform.system() == "Linux", reason="PySide2 problem")
class TestResultImageView:
@pytest.mark.skipif((platform.system() == "Windows") and CI_BUILD, reason="glBindFramebuffer with no OpenGL")
@pyside_skip
def test_simple(self, qtbot, part_settings, image):
prop = ChannelProperty(part_settings, "test")
viewer = ResultImageView(part_settings, prop, "test")
viewer.show()
qtbot.add_widget(prop)
qtbot.add_widget(viewer)
viewer.add_image(image)
assert not viewer.roi_alternative_select.isVisible()
assert not viewer.label1.isVisible()
assert not viewer.label2.isVisible()
assert not viewer.opacity.isVisible()
assert not viewer.only_border.isVisible()
assert not viewer.roi_alternative_select.isVisible()
assert not viewer.any_roi()
assert not viewer.available_alternatives()
viewer.hide()
# prop.close()
# viewer.close()
@pytest.mark.skipif((platform.system() == "Windows") and CI_BUILD, reason="glBindFramebuffer with no OpenGL")
@pyside_skip
def test_set_roi(self, qtbot, part_settings, image):
prop = ChannelProperty(part_settings, "test")
viewer = ResultImageView(part_settings, prop, "test")
qtbot.add_widget(prop)
qtbot.add_widget(viewer)
viewer.show()
part_settings.image = image
roi = ROIInfo((image.get_channel(0) > 0).astype(np.uint8))
roi = roi.fit_to_image(image)
viewer.set_roi(roi, image)
QCoreApplication.processEvents()
assert not viewer.roi_alternative_select.isVisible()
assert viewer.label1.isVisible()
assert viewer.label2.isVisible()
assert viewer.opacity.isVisible()
assert viewer.only_border.isVisible()
assert not viewer.roi_alternative_select.isVisible()
assert viewer.any_roi()
assert not viewer.available_alternatives()
viewer.hide()
@pyside_skip
@pytest.mark.skipif((platform.system() == "Windows") and CI_BUILD, reason="glBindFramebuffer with no OpenGL")
class TestNapariViewer:
def test_base(self, image, analysis_segmentation2, tmp_path):
settings = BaseSettings(tmp_path)
settings.image = image
viewer = Viewer(settings, "")
viewer.create_initial_layers(True, True, True, True)
assert len(viewer.layers) == 2
viewer.create_initial_layers(True, True, True, True)
assert len(viewer.layers) == 2
settings.image = analysis_segmentation2.image
viewer.create_initial_layers(True, True, True, True)
assert len(viewer.layers) == 1
settings.roi = analysis_segmentation2.roi_info.roi
viewer.create_initial_layers(True, True, True, True)
assert len(viewer.layers) == 2
settings.mask = analysis_segmentation2.mask
viewer.create_initial_layers(True, True, True, True)
assert len(viewer.layers) == 3
viewer.close()
def test_points(self, image, tmp_path, qtbot):
settings = BaseSettings(tmp_path)
settings.image = image
viewer = Viewer(settings, "")
viewer.create_initial_layers(True, True, True, True)
assert len(viewer.layers) == 2
points = np.array([[0, 1, 1, 1], [0, 7, 10, 10]])
settings.points = points
viewer.create_initial_layers(True, True, True, True)
assert len(viewer.layers) == 3
assert isinstance(viewer.layers[-1], Points)
viewer._sync_widget.sync_points_chk.setChecked(True)
with qtbot.wait_signal(settings.points_changed):
settings.points = None
assert len(viewer.layers) == 2
with qtbot.wait_signal(settings.points_changed):
settings.points = points
assert len(viewer.layers) == 3
assert isinstance(viewer.layers[-1], Points)
viewer.close()
def test_image(self, image, image2, tmp_path, qtbot):
settings = BaseSettings(tmp_path)
settings.image = image
viewer = Viewer(settings, "test")
with qtbot.waitSignal(viewer._sync_widget.sync_image_chk.stateChanged):
viewer._sync_widget.sync_image_chk.setChecked(True)
assert len(viewer.layers) == 2
with qtbot.waitSignal(settings.image_changed):
settings.image = image2
assert len(viewer.layers) == 3
viewer.close()
def test_roi(self, image, tmp_path, qtbot):
settings = BaseSettings(tmp_path)
settings.image = image
viewer = Viewer(settings, "test")
viewer._sync_widget.sync_image()
assert len(viewer.layers) == 2
viewer._sync_widget.sync_ROI_chk.setChecked(True)
roi_info = ROIInfo(image.get_channel(0), {}, {"sample": image.get_channel(1)})
with qtbot.waitSignal(settings.roi_changed):
settings.roi = roi_info
assert len(viewer.layers) == 4
viewer.close()
def test_additional(self, image, tmp_path, qtbot):
settings = BaseSettings(tmp_path)
settings.image = image
viewer = Viewer(settings, "test")
viewer._sync_widget.sync_image()
assert len(viewer.layers) == 2
settings._additional_layers = {
"first": AdditionalLayerDescription(image.get_channel(0), "image", "first"),
"second": AdditionalLayerDescription(image.get_channel(0), "labels", "second"),
}
viewer._sync_widget.sync_additional()
assert len(viewer.layers) == 4
assert isinstance(viewer.layers[-1], Labels)
viewer.close()
|
1636476
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class AGNewsmodelWrapper(nn.Module):
def __init__(self, model):
super(AGNewsmodelWrapper, self).__init__()
self.model = model
def compute_bert_outputs( # pylint: disable=no-self-use
self, model_bert, embedding_input, attention_mask=None, head_mask=None
):
"""Computes Bert Outputs.
Args:
model_bert : the bert model
embedding_input : input for bert embeddings.
attention_mask : attention mask
head_mask : head mask
Returns:
output : the bert output
"""
if attention_mask is None:
attention_mask = torch.ones( # pylint: disable=no-member
embedding_input.shape[0], embedding_input.shape[1]
).to(embedding_input)
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(model_bert.parameters()).dtype
) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if head_mask is not None:
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(model_bert.config.num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = (
head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
) # We can specify head_mask for each layer
head_mask = head_mask.to(
dtype=next(model_bert.parameters()).dtype
) # switch to fload if need + fp16 compatibility
else:
head_mask = [None] * model_bert.config.num_hidden_layers
encoder_outputs = model_bert.encoder(
embedding_input, extended_attention_mask, head_mask=head_mask
)
sequence_output = encoder_outputs[0]
pooled_output = model_bert.pooler(sequence_output)
outputs = (
sequence_output,
pooled_output,
) + encoder_outputs[1:]
return outputs
def forward(self, embeddings, attention_mask=None):
"""Forward function.
Args:
embeddings : bert embeddings.
attention_mask: Attention mask value
"""
outputs = self.compute_bert_outputs(self.model.bert_model, embeddings, attention_mask)
pooled_output = outputs[1]
output = F.relu(self.model.fc1(pooled_output))
output = self.model.drop(output)
output = self.model.out(output)
return output
|
1636491
|
from typing import List, Iterable, Mapping
from .common import write_varint, sha256
NIL = bytes([0] * 32)
def floor_lg(n: int) -> int:
"""Return floor(log_2(n)) for a positive integer `n`"""
assert n > 0
r = 0
t = 1
while 2 * t <= n:
t = 2 * t
r = r + 1
return r
def ceil_lg(n: int) -> int:
"""Return ceiling(log_2(n)) for a positive integer `n`."""
assert n > 0
r = 0
t = 1
while t < n:
t = 2 * t
r = r + 1
return r
def is_power_of_2(n: int) -> bool:
"""For a positive integer `n`, returns `True` is `n` is a perfect power of 2, `False` otherwise."""
assert n >= 1
return n & (n - 1) == 0
def largest_power_of_2_less_than(n: int) -> int:
"""For an integer `n` which is at least 2, returns the largest exact power of 2 that is strictly less than `n`."""
assert n > 1
if is_power_of_2(n):
return n // 2
else:
return 1 << floor_lg(n)
def element_hash(element_preimage: bytes) -> bytes:
"""Computes the hash of an element to be stored in the Merkle tree."""
return sha256(b'\x00' + element_preimage)
def combine_hashes(left: bytes, right: bytes) -> bytes:
if len(left) != 32 or len(right) != 32:
raise ValueError("The elements must be 32-bytes sha256 outputs.")
return sha256(b'\x01' + left + right)
# root is the only node with parent == None
# leaves have left == right == None
class Node:
def __init__(self, left, right, parent, value: bytes):
self.left = left
self.right = right
self.parent = parent
self.value = value
def recompute_value(self):
assert self.left is not None
assert self.right is not None
self.value = combine_hashes(self.left.value, self.right.value)
def sibling(self):
if self.parent is None:
raise IndexError("The root does not have a sibling.")
if self.parent.left == self:
return self.parent.right
elif self.parent.right == self:
return self.parent.left
else:
raise IndexError("Invalid state: not a child of his parent.")
def make_tree(leaves: List[Node], begin: int, size: int) -> Node:
"""Given a list of nodes, builds the left-complete Merkle tree on top of it.
The nodes in `leaves` are modified by setting their `parent` field appropriately.
It returns the root of the newly built tree.
"""
if size == 0:
return []
if size == 1:
return leaves[begin]
lchild_size = largest_power_of_2_less_than(size)
lchild = make_tree(leaves, begin, lchild_size)
rchild = make_tree(leaves, begin + lchild_size, size - lchild_size)
root = Node(lchild, rchild, None, None)
root.recompute_value()
lchild.parent = rchild.parent = root
return root
class MerkleTree:
"""
Maintains a dynamic vector of values and the Merkle tree built on top of it. The elements of the vector are stored
as the leaves of a binary tree. It is possible to add a new element to the vector, or change an existing element;
the hashes in the Merkle tree will be recomputed after each operation in O(log n) time, for a vector with n
elements.
The value of each internal node is the hash of the concatenation of:
- a single byte 0x01;
- the values of the left child;
- the value of the right child.
The binary tree has the following properties (assuming the vector contains n leaves):
- There are always n - 1 internal nodes; all the internal nodes have exactly two children.
- If a subtree has n > 1 leaves, then the left subchild is a complete subtree with p leaves, where p is the largest
power of 2 smaller than n.
"""
def __init__(self, elements: Iterable[bytes] = []):
self.leaves = [Node(None, None, None, el) for el in elements]
n_elements = len(self.leaves)
if n_elements > 0:
self.root_node = make_tree(self.leaves, 0, n_elements)
self.depth = ceil_lg(n_elements)
else:
self.root_node = None
self.depth = None
def __len__(self) -> int:
"""Return the total number of leaves in the tree."""
return len(self.leaves)
@property
def root(self) -> bytes:
"""Return the Merkle root, or None if the tree is empty."""
return NIL if self.root_node is None else self.root_node.value
def copy(self):
"""Return an identical copy of this Merkle tree."""
return MerkleTree([leaf.value for leaf in self.leaves])
def add(self, x: bytes) -> None:
"""Add an element as new leaf, and recompute the tree accordingly. Cost O(log n)."""
if len(x) != 32:
raise ValueError("Inserted elements must be exactly 32 bytes long")
new_leaf = Node(None, None, None, x)
self.leaves.append(new_leaf)
if len(self.leaves) == 1:
self.root_node = new_leaf
self.depth = 0
return
# add a new leaf
if self.depth == 0:
ltree_size = 0
else:
# number of leaves of the left subtree of cur_root
ltree_size = 1 << (self.depth - 1)
cur_root = self.root_node
cur_root_size = len(self.leaves) - 1
while not is_power_of_2(cur_root_size):
cur_root = cur_root.right
cur_root_size -= ltree_size
ltree_size /= 2
# node value will be computed later
new_node = Node(cur_root, new_leaf, cur_root.parent, None)
if cur_root.parent is None:
# replacing the root
self.depth += 1
self.root_node = new_node
else:
assert cur_root.parent.right == cur_root
cur_root.parent.right = new_node
cur_root.parent = new_node
new_leaf.parent = new_node
self.fix_up(new_node)
def set(self, index: int, x: bytes) -> None:
"""
Set the value of the leaf at position `index` to `x`, recomputing the tree accordingly.
If `index` equals the current number of leaves, then it is equivalent to `add(x)`.
Cost: Worst case O(log n).
"""
assert 0 <= index <= len(self.leaves)
if not (0 <= index <= len(self.leaves)):
raise ValueError(
"The index must be at least 0, and at most the current number of leaves.")
if len(x) != 32:
raise ValueError("Inserted elements must be exactly 32 bytes long.")
if index == len(self.leaves):
self.add(x)
else:
self.leaves[index].value = x
self.fix_up(self.leaves[index].parent)
def fix_up(self, node: Node):
while node is not None:
node.recompute_value()
node = node.parent
def get(self, i: int) -> bytes:
"""Return the value of the leaf with index `i`, where 0 <= i < len(self)."""
return self.leaves[i].value
def leaf_index(self, x: bytes) -> int:
"""Return the index of the leaf with hash `x`. Raises `ValueError` if not found."""
idx = 0
while idx < len(self):
if self.leaves[idx].value == x:
return idx
idx += 1
raise ValueError("Leaf not found")
def prove_leaf(self, index: int) -> List[bytes]:
"""Produce the Merkle proof of membership for the leaf with the given index where 0 <= index < len(self)."""
node = self.leaves[index]
proof = []
while node.parent is not None:
sibling = node.sibling()
assert sibling is not None
proof.append(sibling.value)
node = node.parent
return proof
def get_merkleized_map_commitment(mapping: Mapping[bytes, bytes]) -> bytes:
"""Returns a serialized Merkleized map commitment, encoded as the concatenation of:
- the number of key/value pairs, as a Bitcoin-style varint;
- the root of the Merkle tree of the keys
- the root of the Merkle tree of the values.
"""
items_sorted = list(sorted(mapping.items()))
keys_hashes = [element_hash(i[0]) for i in items_sorted]
values_hashes = [element_hash(i[1]) for i in items_sorted]
return write_varint(len(mapping)) + MerkleTree(keys_hashes).root + MerkleTree(values_hashes).root
|
1636496
|
import sys, os
sys.path.insert(0, os.path.abspath('.') + '/_extensions')
project = 'Project Bureau'
copyright = '2019-2020, whitequark'
master_doc = 'index'
rst_epilog = """
.. |o| raw:: html
<i class="fa fa-times" style="display:block;text-align:center;color:darkred;"></i>
.. |x| raw:: html
<i class="fa fa-check" style="display:block;text-align:center;color:green;"></i>
.. |-| raw:: html
<i class="fa fa-minus" style="display:block;text-align:center;"></i>
"""
extensions = [
'sphinx.ext.todo',
'sphinx_rtd_theme',
'sphinxarg.ext',
'sphinx_prjbureau',
]
todo_include_todos = True
templates_path = ['_templates']
html_theme = 'sphinx_rtd_theme'
html_static_path = ["_static"]
html_css_files = ["custom.css"]
|
1636537
|
def arrange_number(a, b):
if a>b:
s=b
else:
s=a
for i in range(1, s+1):
if a%i==0 and b%i==0:
result=i
return result
m=int(input())
n=int(input())
print(arrange_number(m,n))
|
1636576
|
class AudioNetwork:
"""Create a generic audio network"""
def set_volume(self, volume):
raise NotImplemented("Not implemented")
def volume(self):
raise NotImplemented("Not implemented")
def speakers(self):
"""Return a list of available devices"""
raise NotImplemented("Not implemented")
def stop(self):
raise NotImplemented("Not implemented")
def play(self):
raise NotImplemented("Not implemented")
class Speaker:
STATUS_PLAYING = 'PLAYING'
STATUS_STOPPED = 'STOPPED'
def __init__(self, name, volume, status):
self.name = name
self.volume = volume
self.status = status
|
1636586
|
import discord
import collections
import operator
import random
import asyncio
import datetime
import uuid
from queue import Queue
from redbot.core import Config
from redbot.core import commands
from redbot.core import checks
from redbot.core.utils.predicates import ReactionPredicate
from redbot.core.utils.menus import start_adding_reactions, menu, DEFAULT_CONTROLS
team_size = 3
minimum_game_time = 600 #Seconds (10 Minutes)
verify_timeout = 15
start_game_verify_timeout = 60
k_factor = 50
default_elo = 1500
defaults = {"CategoryChannel": None, "TextChannel": None, "HelperRole": None, "Games": {}, "GamesPlayed": 0, "Teams": {}, "Scores": []}
class Ladder(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.config = Config.get_conf(self, identifier=1234567880, force_registration=True)
self.config.register_guild(**defaults)
self.games = []
self.teams = []
@commands.guild_only()
@commands.command(aliases=["rlt"])
async def registerLadderTeam(self, ctx, team_name, captain: discord.Member, *players: discord.Member):
"""Creates a team with a given name and players. The team will need to be approved first before it can actually begin participating.
The first player listed when using the command will be made captain of the team. **Team names will need to be in quotes.**"""
await self.load_teams(ctx)
if any(team.name.lower() == team_name.lower() for team in self.teams):
await ctx.send(":x: {} is already the name of a team".format(team_name))
return
player_list = list(players)
if captain not in player_list:
player_list.append(captain)
if len(player_list) != team_size:
await ctx.send(":x: Teams must be {} players exactly".format(team_size))
return
if ctx.author not in player_list:
await ctx.send(":x: You can only register a team that you're a player on")
return
team = Team(team_name, captain, player_list, 0, 0, default_elo, False)
self.teams.append(team)
await self._save_teams(ctx, self.teams)
await ctx.send("Done\nYour team will need to be approved first before you can participate in the event. You'll get a dm when that has occurred.")
@commands.guild_only()
@commands.command(aliases=["glt"])
async def getLadderTeams(self, ctx):
"""Gets info for all the ladder teams that have been approved already."""
await self.load_teams(ctx)
approvedTeams = [team for team in self.teams if team.approved]
if not approvedTeams:
ctx.send("There are no approved teams at this time")
return
embeds = []
for team in approvedTeams:
embed = self.embed_team_info(team)
embeds.append(embed)
await ctx.send("There are currently {} ladder teams:".format(len(approvedTeams)))
await menu(ctx, embeds, DEFAULT_CONTROLS)
@commands.guild_only()
@commands.command(aliases=["gult"])
async def getUnapprovedLadderTeams(self, ctx):
"""Gets a list of the registered teams that haven't been approved to participate yet. This should help when determining whether to approve a team or not."""
if not await self.has_perms(ctx):
return
await self.load_teams(ctx)
unapprovedTeams = [team for team in self.teams if not team.approved]
if not unapprovedTeams:
ctx.send("There are no unapproved teams at this time")
return
embeds = []
for team in unapprovedTeams:
embed = self.embed_team_player_info(team)
embeds.append(embed)
await ctx.send("There are currently {} unapproved teams:".format(len(unapprovedTeams)))
await menu(ctx, embeds, DEFAULT_CONTROLS)
@commands.guild_only()
@commands.command(aliases=["alt"])
async def approveLadderTeam(self, ctx, team_name, elo_rating: int = default_elo):
"""Approves a team to participate in the event. Make sure that they fit under whatever guidelines we set before approving them.
The players will get a dm saying their team has been approved."""
if not await self.has_perms(ctx):
return
try:
team = next(team for team in self.teams if team.name.lower() == team_name.lower())
except:
await ctx.send(":x: No team found with name {}".format(team_name))
return
if team.approved:
await ctx.send(":x: {} has already been approved".format(team_name))
return
team.elo_rating = elo_rating
team.approved = True
await self._save_teams(ctx, self.teams)
for player in team.players:
try:
await player.send(":white_check_mark: Your ladder team, {0}, has been approved to participate in the ladder event within {1}".format(team.name, ctx.guild.name))
except:
pass
await ctx.send("Done\nDM was sent to the players of that team.")
@commands.guild_only()
@commands.command(aliases=["rjlt"])
async def rejectLadderTeam(self, ctx, team_name, *reason):
"""Rejects a team and prevents them from participating in the event. This cannot be undone, but the team can try and register again.
There's an optional parameter for including a reason why they were rejected. The players will get a dm saying their team was rejected along with the reason if given."""
if not await self.has_perms(ctx):
return
try:
team = next(team for team in self.teams if team.name.lower() == team_name.lower())
except:
await ctx.send(":x: No team found with name {}".format(team_name))
return
if team.approved:
await ctx.send(":x: {} has already been approved".format(team_name))
return
self.teams.remove(team)
await self._save_teams(ctx, self.teams)
for player in team.players:
try:
if len(reason) > 0:
await player.send(":x: Your ladder team, {0}, has been rejected and will not be allowed to participate in the ladder event within {1}. "
"Reason for rejection: `{2}`. To register a new team you can use the `{3}rlt` command again".format(team.name, ctx.guild.name, reason, ctx.prefix))
else:
await player.send(":x: Your ladder team, {0}, has been rejected and will not be allowed to participate in the ladder event within {1}. "
"To register a new team you can use the `{2}rlt` command again".format(team.name, ctx.guild.name, ctx.prefix))
except:
pass
await ctx.send("Done\nDM was sent to the players of that team.")
@commands.guild_only()
@commands.command(aliases=["slg"])
async def startLadderGame(self, ctx, team_1_name, team_2_name):
"""Attempts to start a ladder game between the two teams. You'll need to be a player on one of the teams to use this
command and the other team's captain will need to agree to start the game as well."""
await self.load_teams(ctx)
try:
team_1 = next(team for team in self.teams if team.name.lower() == team_1_name.lower())
team_2 = next(team for team in self.teams if team.name.lower() == team_2_name.lower())
except:
await ctx.send(":x: One of the two team names didn't match a team")
return
if team_1 == team_2:
await ctx.send(":x: The two teams used in the command are the same")
return
if not team_1.approved or not team_2.approved:
await ctx.send(":x: One of the two teams has not been approved to participate yet")
return
if ctx.author not in team_1.players and ctx.author not in team_2.players:
await ctx.send(":x: You can't start a game between two teams that you're not a player on")
return
if await self.verify_start_game(ctx, team_1, team_2, self.get_opposing_captain_by_teams(ctx, team_1, team_2)):
game = await self.create_game(ctx, team_1, team_2)
await self.send_game_info(ctx, game)
self.games.append(game)
await self._save_games(ctx, self.games)
await ctx.send("Done")
@commands.guild_only()
@commands.command(aliases=["fslg"])
@checks.admin_or_permissions(manage_guild=True)
async def forceStartLadderGame(self, ctx, team_1_name, team_2_name):
"""Primarily for testing. Forces a new game between two teams."""
await self.load_teams(ctx)
try:
team_1 = next(team for team in self.teams if team.name.lower() == team_1_name.lower())
team_2 = next(team for team in self.teams if team.name.lower() == team_2_name.lower())
except:
await ctx.send(":x: One of the two team names didn't match a team")
return
if team_1 == team_2:
await ctx.send(":x: The two teams used in the command are the same")
return
if not team_1.approved or not team_2.approved:
await ctx.send(":x: One of the two teams has not been approved to participate yet")
return
if ctx.author not in team_1.players and ctx.author not in team_2.players:
await ctx.send(":x: You can't start a game between two teams that you're not a player on")
return
game = await self.create_game(ctx, team_1, team_2)
await self.send_game_info(ctx, game)
self.games.append(game)
await self._save_games(ctx, self.games)
await ctx.send("Done")
@commands.guild_only()
@commands.command(aliases=["clg"])
async def cancelLadderGame(self, ctx):
"""Cancel the current ladder game. Can only be used in a ladder game channel.
The game will end with no wins given to either team. The teams will then be allowed to start a new game."""
await self.load_teams(ctx)
await self.load_games(ctx)
try:
game = next(game for game in self.games if game.textChannel == ctx.channel)
except:
await ctx.send(":x: This command can only be used in a ladder game channel.")
return
opposing_captain = self.get_opposing_captain(ctx, game)
if opposing_captain is None:
await ctx.send(":x: Only players on one of the two teams can cancel the game.")
return
msg = await ctx.send("{0} Please verify that both teams want to cancel the game. You have {1} seconds to verify".format(opposing_captain.mention, verify_timeout))
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg, opposing_captain)
try:
await ctx.bot.wait_for("reaction_add", check=pred, timeout=verify_timeout)
if pred.result is True:
await ctx.send("Done. Feel free to start a new game.\n**This channel will be deleted in 30 seconds**")
await self.remove_game(ctx, game)
else:
await ctx.send(":x: Cancel not verified. To cancel the game you will need to use the `{0}clg` command again.".format(ctx.prefix))
except asyncio.TimeoutError:
await ctx.send(":x: Cancel not verified in time. To cancel the game you will need to use the `{0}clg` command again."
"\n**If one of the captains is afk, have someone from that team use the command.**".format(ctx.prefix))
@commands.guild_only()
@commands.command(aliases=["fclg"])
async def forceCancelLadderGame(self, ctx):
"""Cancel the current ladder game. Can only be used in a ladder game channel.
The game will end with no wins given to either team. The teams will then be allowed to start a new game."""
if not await self.has_perms(ctx):
return
await self.load_teams(ctx)
await self.load_games(ctx)
try:
game = next(game for game in self.games if game.textChannel == ctx.channel)
except:
await ctx.send(":x: This command can only be used in a ladder game channel.")
return
msg = await ctx.send("{0} Please verify that you want to cancel this game.".format(ctx.author.mention))
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
game.scoreReported = True
pred = ReactionPredicate.yes_or_no(msg, ctx.author)
try:
await ctx.bot.wait_for("reaction_add", check=pred, timeout=verify_timeout)
if pred.result is True:
await ctx.send("Done. Feel free to start a new game.\n**This channel will be deleted in 30 seconds**")
await self.remove_game(ctx, game)
else:
await ctx.send(":x: Cancel not verified. To cancel the game you will need to use the `{0}clg` command again.".format(ctx.prefix))
except asyncio.TimeoutError:
await ctx.send(":x: Cancel not verified in time. To cancel the game you will need to use the `{0}clg` command again.".format(ctx.prefix))
@commands.guild_only()
@commands.command(aliases=["lr"])
async def ladderResult(self, ctx, blue_team_wins: int, orange_team_wins: int):
"""Submits the result of the ladder game. Should be used in the text channel corresponding to the game.
Both teams need to agree on the result before it is finalized."""
await self.load_teams(ctx)
await self.load_games(ctx)
try:
game = next(game for game in self.games if game.textChannel == ctx.channel)
except:
await ctx.send(":x: This command can only be used in a ladder game channel.")
return
if game.scoreReported == True:
await ctx.send(":x: Someone has already reported the results or is waiting for verification")
return
game_time = ctx.message.created_at - ctx.channel.created_at
if game_time.seconds < minimum_game_time:
await ctx.send(":x: You can't report a game outcome until at least **10 minutes** have passed since the game was created."
"\nCurrent time that's passed = **{0} minute(s)**".format(game_time.seconds // 60))
return
opposing_captain = self.get_opposing_captain(ctx, game)
if opposing_captain is None:
await ctx.send(":x: Only players on one of the two teams can report the result.")
return
if await self.verify_game_results(ctx, game, blue_team_wins, orange_team_wins, opposing_captain):
await self.finish_game(ctx, game, blue_team_wins, orange_team_wins)
await ctx.send("Done. Thanks for playing!\n**This channel and the team voice channels will be deleted in 30 seconds**")
await self.remove_game(ctx, game)
@commands.guild_only()
@commands.command(aliases=["flr"])
async def forceLadderResult(self, ctx, blue_team_wins: int, orange_team_wins: int):
"""Overrides the verification process for submitting the result of a game in the case that the two teams can't submit it themselves."""
if not await self.has_perms(ctx):
return
await self.load_teams(ctx)
await self.load_games(ctx)
try:
game = next(game for game in self.games if game.textChannel == ctx.channel)
except:
await ctx.send(":x: This command can only be used in a ladder game channel.")
return
if await self.verify_game_results(ctx, game, blue_team_wins, orange_team_wins, ctx.author):
await self.finish_game(ctx, game, blue_team_wins, orange_team_wins)
await ctx.send("Done. Thanks for playing!\n**This channel and the team voice channels will be deleted in 30 seconds**")
await self.remove_game(ctx, game)
@commands.guild_only()
@commands.command(aliases=["llb"])
async def ladderLeaderboard(self, ctx):
"""Shows the top ten teams in terms of current Elo rating"""
await self.load_teams(ctx)
approvedTeams = [team for team in self.teams if team.approved]
if not approvedTeams:
ctx.send("There are no approved teams at this time")
return
approvedTeams.sort(key=lambda team: team.elo_rating, reverse=True)
await ctx.send(embed=self.embed_leaderboard(ctx, approvedTeams, await self._games_played(ctx)))
@commands.guild_only()
@commands.command(aliases=["glti"])
async def getLadderTeamInfo(self, ctx, team_name):
""""Gets all the info corresponding to a ladder team. **Team names will need to be in quotes.**
Shows the captain, players, team record, and Elo rating."""
await self.load_teams(ctx)
try:
team = next(team for team in self.teams if team.name.lower() == team_name.lower())
await ctx.send(embed=self.embed_team_info(team))
except:
await ctx.send(":x: There's no team with the name: {}".format(team_name))
@commands.guild_only()
@commands.command()
@checks.admin_or_permissions(manage_guild=True)
async def setLadderTextChannel(self, ctx, text_channel: discord.TextChannel):
"""Sets the ladder text channel where general ladder info will be sent"""
await self._save_text_channel(ctx, text_channel.id)
await ctx.send("Done")
@commands.guild_only()
@commands.command()
@checks.admin_or_permissions(manage_guild=True)
async def getLadderTextChannel(self, ctx):
"""Gets the ladder text channel"""
try:
await ctx.send("Ladder text channel set to: {0}".format((await self._text_channel(ctx)).mention))
except:
await ctx.send(":x: Ladder text channel not set")
@commands.guild_only()
@commands.command()
@checks.admin_or_permissions(manage_guild=True)
async def unsetLadderTextChannel(self, ctx):
"""Unsets the ladder text channel"""
await self._save_text_channel(ctx, None)
await ctx.send("Done")
@commands.guild_only()
@commands.command()
@checks.admin_or_permissions(manage_guild=True)
async def setLadderCategory(self, ctx, category_channel: discord.CategoryChannel):
"""Sets the ladder category channel where all ladder channels will be created under"""
await self._save_category(ctx, category_channel.id)
await ctx.send("Done")
@commands.guild_only()
@commands.command()
@checks.admin_or_permissions(manage_guild=True)
async def getLadderCategory(self, ctx):
"""Gets the channel currently assigned as the ladder category channel"""
try:
await ctx.send("Ladder category channel set to: {0}".format((await self._category(ctx)).mention))
except:
await ctx.send(":x: Ladder category channel not set")
@commands.guild_only()
@commands.command()
@checks.admin_or_permissions(manage_guild=True)
async def unsetLadderCategory(self, ctx):
"""Unsets the ladder category channel. Ladder channels will not be created if this is not set"""
await self._save_category(ctx, None)
await ctx.send("Done")
@commands.guild_only()
@commands.command()
@checks.admin_or_permissions(manage_guild=True)
async def setLadderHelperRole(self, ctx, helper_role: discord.Role):
"""Sets the ladder helper role. Anyone with this role will be able to see all the ladder game channels that are created"""
await self._save_helper_role(ctx, helper_role.id)
await ctx.send("Done")
@commands.guild_only()
@commands.command()
@checks.admin_or_permissions(manage_guild=True)
async def getLadderHelperRole(self, ctx):
"""Gets the ladder helper role"""
try:
await ctx.send("Ladder helper role set to: {0}".format((await self._helper_role(ctx)).name))
except:
await ctx.send(":x: ladder helper role not set")
@commands.guild_only()
@commands.command()
@checks.admin_or_permissions(manage_guild=True)
async def unsetLadderHelperRole(self, ctx):
"""Unsets the ladder helper role"""
await self._save_helper_role(ctx, None)
await ctx.send("Done")
async def has_perms(self, ctx):
helper_role = await self._helper_role(ctx)
if ctx.author.guild_permissions.administrator:
return True
elif helper_role and helper_role in ctx.author.roles:
return True
async def create_game(self, ctx, team_1, team_2):
text_channel, voice_channels = await self.create_game_channels(ctx, team_1, team_2)
players = list(team_1.players) + list(team_2.players)
for player in players:
await text_channel.set_permissions(player, read_messages=True)
return Game(team_1, team_2, text_channel, voice_channels)
async def create_game_channels(self, ctx, team_1, team_2):
guild = ctx.message.guild
helper_role = await self._helper_role(ctx)
if helper_role:
text_overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False),
helper_role: discord.PermissionOverwrite(read_messages=True, manage_channels=True)
}
voice_overwrites = {
guild.default_role: discord.PermissionOverwrite(connect=False),
helper_role: discord.PermissionOverwrite(connect=True, manage_channels=True)
}
else:
text_overwrites = {
guild.default_role: discord.PermissionOverwrite(read_messages=False)
}
voice_overwrites = {
guild.default_role: discord.PermissionOverwrite(connect=False)
}
text_channel = await guild.create_text_channel("{0} vs {1} Ladder Game".format(team_1.name, team_2.name), overwrites= text_overwrites,
category= await self._category(ctx))
voice_channels = [
await guild.create_voice_channel("{}".format(team_1.name), overwrites= voice_overwrites, category= await self._category(ctx)),
await guild.create_voice_channel("{}".format(team_2.name), overwrites= voice_overwrites, category= await self._category(ctx))
]
return text_channel, voice_channels
async def verify_start_game(self, ctx, team_1, team_2, opposing_captain: discord.Member):
msg = await ctx.send("{0} Please verify that you want to start the game between these two teams:".format(opposing_captain.mention), embed=self.embed_team_comparison(team_1, team_2))
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
pred = ReactionPredicate.yes_or_no(msg, opposing_captain)
try:
await ctx.bot.wait_for("reaction_add", check=pred, timeout=start_game_verify_timeout)
if pred.result is True:
return True
else:
await ctx.send(":x: Ladder game between **{0}** and **{1}** not started.\nTo try and start a new game again use the `{2}slg` command.".format(team_1.name, team_2.name, ctx.prefix))
return False
except asyncio.TimeoutError:
await ctx.send(":x: Ladder game between **{0}** and **{1}** not verified in time.\nTo try and start a new game again use the `{2}slg` command.".format(team_1.name, team_2.name, ctx.prefix))
return False
async def verify_game_results(self, ctx, game, blue_team_wins, orange_team_wins, verifier: discord.Member):
msg = await ctx.send("{0} Please verify the results:\n**{1}** {2} - {3} **{4}**".format(verifier.mention, game.blue.name,
blue_team_wins, orange_team_wins, game.orange.name))
start_adding_reactions(msg, ReactionPredicate.YES_OR_NO_EMOJIS)
game.scoreReported = True
pred = ReactionPredicate.yes_or_no(msg, verifier)
try:
await ctx.bot.wait_for("reaction_add", check=pred, timeout=verify_timeout)
if pred.result is True:
return True
else:
game.scoreReported = False
await ctx.send(":x: Ladder game result not verified. To report the result you will need to use the `{0}lr` command again.".format(ctx.prefix))
return False
except asyncio.TimeoutError:
game.scoreReported = False
await ctx.send(":x: Ladder game result not verified in time. To report the result you will need to use the `{0}lr` command again.\n"
"**If one of the captains is afk, have someone from that team use the command.**".format(ctx.prefix))
return False
async def finish_game(self, ctx, game, blue_team_wins, orange_team_wins):
blue_team = game.blue
orange_team = game.orange
blue_team_new_elo, orange_team_new_elo = self.update_elo(blue_team.elo_rating, orange_team.elo_rating, blue_team_wins / (blue_team_wins + orange_team_wins))
await ctx.send(embed=self.embed_game_results(blue_team, blue_team_wins, orange_team_wins, orange_team, blue_team_new_elo, orange_team_new_elo))
self.update_team_info(blue_team, blue_team_wins, orange_team_wins, blue_team_new_elo)
self.update_team_info(orange_team, orange_team_wins, blue_team_wins, orange_team_new_elo)
await self._save_teams(ctx, self.teams)
await self._save_games_played(ctx, (await self._games_played(ctx)) + blue_team_wins + orange_team_wins)
async def remove_game(self, ctx, game):
self.games.remove(game)
await self._save_games(ctx, self.games)
await asyncio.sleep(30)
await ctx.channel.delete()
for vc in game.voiceChannels:
await vc.delete()
def get_opposing_captain(self, ctx, game):
return self.get_opposing_captain_by_teams(ctx, game.blue, game.orange)
def get_opposing_captain_by_teams(self, ctx, team_1, team_2):
opposing_captain = None
if ctx.author in team_1.players:
opposing_captain = team_2.captain
elif ctx.author in team_2.players:
opposing_captain = team_1.captain
return opposing_captain
def update_team_info(self, team, wins, losses, elo_rating):
team.wins += wins
team.losses += losses
team.elo_rating = elo_rating
def update_elo(self, team_1_elo, team_2_elo, result):
"""Calculates and returns the new Elo ratings for the two teams based on their match results and the K-factor.
Result param should be a decimal between 0 and 1 relating to the match results for team 1, i.e. a result of 1
means team 1 won all the games in the match, a result of .25 means team 1 won 25% of the games in the match."""
elo_dif = team_1_elo - team_2_elo
exponent = -1 * (elo_dif / 100)
expectation = 1 / (1 + pow(10, exponent))
team_1_new_elo = round(team_1_elo + (k_factor * (result - expectation)))
team_2_new_elo = round(team_2_elo + (k_factor * ((1 - result) - (1 - expectation))))
return team_1_new_elo, team_2_new_elo
def embed_team_comparison(self, team_1, team_2):
embed = discord.Embed(title="{0} vs. {1} Team Comparison".format(team_1.name, team_2.name), color=discord.Colour.blue())
embed.add_field(name="Players", value="**{0}**: {1}\n**{2}**: {3}\n".format(team_1.name, ", ".join([player.mention for player in team_1.players]),
team_2.name, ", ".join([player.mention for player in team_2.players])), inline=False)
embed.add_field(name="Records", value="**{0}**: {1} - {2}\n**{3}**: {4} - {5}\n".format(team_1.name, team_1.wins, team_1.losses, team_2.name, team_2.wins, team_2.losses), inline=False)
embed.add_field(name="Elo Ratings", value="**{0}**: {1}\n**{2}**: {3}\n".format(team_1.name, team_1.elo_rating, team_2.name, team_2.elo_rating), inline=False)
return embed
async def send_game_info(self, ctx, game):
helper_role = await self._helper_role(ctx)
await game.textChannel.send("{}\n".format(", ".join([player.mention for player in game.players])))
embed = discord.Embed(title="{0} vs. {1} Ladder Game Info".format(game.blue.name, game.orange.name), color=discord.Colour.blue())
embed.add_field(name="Blue Team", value="**{0}**: {1}\n".format(game.blue.name, ", ".join([player.mention for player in game.blue.players])), inline=False)
embed.add_field(name="Orange Team", value="**{0}**: {1}\n".format(game.orange.name, ", ".join([player.mention for player in game.orange.players])), inline=False)
embed.add_field(name="Captains", value="**Blue:** {0}\n**Orange:** {1}".format(game.blue.captain.mention, game.orange.captain.mention), inline=False)
embed.add_field(name="Lobby Info", value="**Name:** {0}\n**Password:** {1}".format(game.roomName, game.roomPass), inline=False)
embed.add_field(name="Additional Info", value="Feel free to play whatever type of series you want, whether a bo3, bo5, or any other.\n\n"
"When you are done playing with the current teams please report the results using the command `{0}lr [blue_team_wins] [orange_team_wins]` where both "
"the `blue_team_wins` and `orange_team_wins` parameters are the number of wins each team had. Both teams will need to verify the results.\n\nIf you wish to cancel "
"the game you can use the `{0}clg` command. Both teams will need to verify that they wish to cancel the game.".format(ctx.prefix), inline=False)
help_message = "If you think the bot isn't working correctly or have suggestions to improve it, please contact adammast."
if helper_role:
help_message = "If you need any help or have questions please contact someone with the {0} role. ".format(helper_role.mention) + help_message
embed.add_field(name="Help", value=help_message, inline=False)
await game.textChannel.send(embed=embed)
def embed_game_results(self, team_1, team_1_wins: int, team_2_wins: int, team_2, team_1_new_elo, team_2_new_elo):
embed = discord.Embed(title="{0} vs. {1}".format(team_1.name, team_2.name), color=discord.Colour.blue())
embed.add_field(name="Players", value="**{0}**: {1}\n**{2}**: {3}\n".format(team_1.name, ", ".join([player.mention for player in team_1.players]),
team_2.name, ", ".join([player.mention for player in team_2.players])), inline=False)
embed.add_field(name="Result", value="**{0}** {1} - {2} **{3}**\n".format(team_1.name, team_1_wins, team_2_wins, team_2.name), inline=False)
embed.add_field(name="Updated Elo Rating", value="**{0}** = {1} ({2})\n**{3}** = {4} ({5})\n".format(team_1.name, team_1_new_elo, team_1_new_elo - team_1.elo_rating,
team_2.name, team_2_new_elo, team_2_new_elo - team_2.elo_rating), inline=False)
return embed
def embed_team_player_info(self, team):
embed = discord.Embed(title="{0}".format(team.name), color=discord.Colour.blue())
embed.add_field(name="Captain", value="{}\n".format(team.captain.mention), inline=False)
embed.add_field(name="Players", value="{}\n".format(", ".join([player.mention for player in team.players])), inline=False)
return embed
def embed_team_info(self, team):
embed = self.embed_team_player_info(team)
embed.add_field(name="Games Played", value="{}\n".format(team.wins + team.losses), inline=False)
embed.add_field(name="Record", value="{0} - {1}\n".format(team.wins, team.losses), inline=False)
embed.add_field(name="Elo Rating", value="{}\n".format(team.elo_rating), inline=False)
return embed
def embed_leaderboard(self, ctx, sorted_teams, games_played):
embed = discord.Embed(title="{0} Ladder Leaderboard".format(ctx.guild.name), color=discord.Colour.blue())
embed.add_field(name="Total Games Played", value="{}\n".format(games_played), inline=True)
index = 1
message = ""
for team in sorted_teams:
message += "`{0}` __**{1}:**__ **Elo Rating:** {2} **Record:** {3} - {4} **Games Played:** {5}\n".format(index, team.name, team.elo_rating,
team.wins, team.losses, team.wins + team.losses)
index += 1
if index > 10:
break
embed.add_field(name="Highest Elo Rating", value=message, inline=False)
return embed
async def load_teams(self, ctx, force_load = False):
if self.teams is None or self.teams == [] or force_load:
teams = await self._teams(ctx)
team_list = []
for key, value in teams.items():
name = value["Name"]
captain = ctx.guild.get_member(value["Captain"])
players = [ctx.guild.get_member(x) for x in value["Players"]]
wins = value["Wins"]
losses = value["Losses"]
elo_rating = value["EloRating"]
approved = value["Approved"]
team = Team(name, captain, players, wins, losses, elo_rating, approved)
team.id = int(key)
team_list.append(team)
self.teams = team_list
async def _teams(self, ctx):
return await self.config.guild(ctx.guild).Teams()
async def _save_teams(self, ctx, teams):
team_dict = {}
for team in teams:
team_dict[team.id] = team._to_dict()
await self.config.guild(ctx.guild).Teams.set(team_dict)
async def load_games(self, ctx, force_load = False):
if self.games is None or self.games == [] or force_load:
self.load_teams(ctx, force_load)
games = await self._games(ctx)
game_list = []
for key, value in games.items():
text_channel = ctx.guild.get_channel(value["TextChannel"])
voice_channels = [ctx.guild.get_channel(x) for x in value["VoiceChannels"]]
blue_team_id = value["Blue"]
orange_team_id = value["Orange"]
blue_team = next(x for x in self.teams if x.id == blue_team_id)
orange_team = next(x for x in self.teams if x.id == orange_team_id)
game = Game(blue_team, orange_team, text_channel, voice_channels)
game.id = int(key)
game.roomName = value["RoomName"]
game.roomPass = value["RoomPass"]
game.scoreReported = value["ScoreReported"]
game_list.append(game)
self.games = game_list
async def _games(self, ctx):
return await self.config.guild(ctx.guild).Games()
async def _save_games(self, ctx, games):
game_dict = {}
for game in games:
game_dict[game.id] = game._to_dict()
await self.config.guild(ctx.guild).Games.set(game_dict)
async def _scores(self, ctx):
return await self.config.guild(ctx.guild).Scores()
async def _save_scores(self, ctx, scores):
await self.config.guild(ctx.guild).Scores.set(scores)
async def _games_played(self, ctx):
return await self.config.guild(ctx.guild).GamesPlayed()
async def _save_games_played(self, ctx, games_played):
await self.config.guild(ctx.guild).GamesPlayed.set(games_played)
async def _category(self, ctx):
return ctx.guild.get_channel(await self.config.guild(ctx.guild).CategoryChannel())
async def _save_category(self, ctx, category):
await self.config.guild(ctx.guild).CategoryChannel.set(category)
async def _text_channel(self, ctx):
return ctx.guild.get_channel(await self.config.guild(ctx.guild).TextChannel())
async def _save_text_channel(self, ctx, text_channel):
await self.config.guild(ctx.guild).TextChannel.set(text_channel)
async def _helper_role(self, ctx):
return ctx.guild.get_role(await self.config.guild(ctx.guild).HelperRole())
async def _save_helper_role(self, ctx, helper_role):
await self.config.guild(ctx.guild).HelperRole.set(helper_role)
class Team:
def __init__(self, name, captain, players, wins, losses, elo_rating, approved):
self.id = uuid.uuid4().int
self.name = name
self.captain = captain
self.players = set(players)
self.wins = wins
self.losses = losses
self.elo_rating = elo_rating
self.approved = approved
def _to_dict(self):
return {
"Name": self.name,
"Captain": self.captain.id,
"Players": [x.id for x in self.players],
"Wins": self.wins,
"Losses": self.losses,
"EloRating": self.elo_rating,
"Approved": self.approved
}
class Game:
def __init__(self, blue_team: Team, orange_team: Team, text_channel, voice_channels):
self.id = uuid.uuid4().int
self.captains = [blue_team.captain, orange_team.captain]
self.players = blue_team.players.union(orange_team.players)
self.blue = blue_team
self.orange = orange_team
self.roomName = self._generate_name_pass()
self.roomPass = self._generate_name_pass()
self.textChannel = text_channel
self.voiceChannels = voice_channels #List of voice channels: [Blue, Orange]
self.scoreReported = False
def _to_dict(self):
return {
"Blue": self.blue.id,
"Orange": self.orange.id,
"RoomName": self.roomName,
"RoomPass": self.roomPass,
"TextChannel": self.textChannel.id,
"VoiceChannels": [x.id for x in self.voiceChannels],
"ScoreReported": self.scoreReported
}
def _generate_name_pass(self):
return room_pass[random.randrange(len(room_pass))]
# TODO: Load from file?
room_pass = [
'octane', 'takumi', 'dominus', 'hotshot', 'batmobile', 'mantis',
'paladin', 'twinmill', 'centio', 'breakout', 'animus', 'venom',
'xdevil', 'endo', 'masamune', 'merc', 'backfire', 'gizmo',
'roadhog', 'armadillo', 'hogsticker', 'luigi', 'mario', 'samus',
'sweettooth', 'cyclone', 'imperator', 'jager', 'mantis', 'nimbus',
'samurai', 'twinzer', 'werewolf', 'maverick', 'artemis', 'charger',
'skyline', 'aftershock', 'boneshaker', 'delorean', 'esper',
'fast4wd', 'gazella', 'grog', 'jeep', 'marauder', 'mclaren',
'mr11', 'proteus', 'ripper', 'scarab', 'tumbler', 'triton',
'vulcan', 'zippy',
'aquadome', 'beckwith', 'champions', 'dfh', 'mannfield',
'neotokyo', 'saltyshores', 'starbase', 'urban', 'utopia',
'wasteland', 'farmstead', 'arctagon', 'badlands', 'core707',
'dunkhouse', 'throwback', 'underpass', 'badlands',
'20xx', 'biomass', 'bubbly', 'chameleon', 'dissolver', 'heatwave',
'hexed', 'labyrinth', 'parallax', 'slipstream', 'spectre',
'stormwatch', 'tora', 'trigon', 'wetpaint',
'ara51', 'ballacarra', 'chrono', 'clockwork', 'cruxe',
'discotheque', 'draco', 'dynamo', 'equalizer', 'gernot', 'hikari',
'hypnotik', 'illuminata', 'infinium', 'kalos', 'lobo', 'looper',
'photon', 'pulsus', 'raijin', 'reactor', 'roulette', 'turbine',
'voltaic', 'wonderment', 'zomba',
'unranked', 'prospect', 'challenger', 'risingstar', 'allstar',
'superstar', 'champion', 'grandchamp', 'bronze', 'silver', 'gold',
'platinum', 'diamond',
'dropshot', 'hoops', 'soccar', 'rumble', 'snowday', 'solo',
'doubles', 'standard', 'chaos',
'armstrong', 'bandit', 'beast', 'boomer', 'buzz', 'cblock',
'casper', 'caveman', 'centice', 'chipper', 'cougar', 'dude',
'foamer', 'fury', 'gerwin', 'goose', 'heater', 'hollywood',
'hound', 'iceman', 'imp', 'jester', 'junker', 'khan', 'marley',
'maverick', 'merlin', 'middy', 'mountain', 'myrtle', 'outlaw',
'poncho', 'rainmaker', 'raja', 'rex', 'roundhouse', 'sabretooth',
'saltie', 'samara', 'scout', 'shepard', 'slider', 'squall',
'sticks', 'stinger', 'storm', 'sultan', 'sundown', 'swabbie',
'tex', 'tusk', 'viper', 'wolfman', 'yuri'
]
|
1636671
|
import torch.nn as nn
class SubCellFNN(nn.Module):
# in 10 states and a binary classification into membrane-bound vs. soluble
def __init__(self, use_batch_norm=True):
super(SubCellFNN, self).__init__()
# Linear layer, taking embedding dimension 1024 to make predictions:
if use_batch_norm:
self.layer = nn.Sequential(
nn.Linear(1024, 32), # in, out
nn.Dropout(0.25), # dropout
nn.ReLU(),
nn.BatchNorm1d(32)
)
else:
self.layer = nn.Sequential(
nn.Linear(1024, 32), # in, out
nn.Dropout(0.25), # dropout
nn.ReLU(),
)
self.loc_classifier = nn.Linear(32, 10)
self.mem_classifier = nn.Linear(32, 2)
def forward(self, x):
out = self.layer(x) # map 1024-dimensional ELMo vector to 32-dims
# based on 32 dims, predict localization and membrane-bound
Yhat_loc = self.loc_classifier(out)
Yhat_mem = self.mem_classifier(out)
return Yhat_loc, Yhat_mem
class SecStructCNN(nn.Module):
# Convolutional neural network for prediction of Sec.Struct. in 3- & 8-states and disorder
def __init__(self):
super(SecStructCNN, self).__init__()
self.elmo_feature_extractor = nn.Sequential(
nn.Conv2d(1024, 32, kernel_size=(7, 1), padding=(3, 0)),
nn.ReLU(),
nn.Dropout(0.25),
)
self.dssp3_classifier = nn.Sequential(
nn.Conv2d(32, 3, kernel_size=(7, 1), padding=(3, 0))
)
self.dssp8_classifier = nn.Sequential(
nn.Conv2d(32, 8, kernel_size=(7, 1), padding=(3, 0))
)
self.diso_classifier = nn.Sequential(
nn.Conv2d(32, 2, kernel_size=(7, 1), padding=(3, 0))
)
def forward(self, x):
x = self.elmo_feature_extractor(x) # compress ELMo features to 32-dims
d3_Yhat = self.dssp3_classifier(x)
d8_Yhat = self.dssp8_classifier(x)
diso_Yhat = self.diso_classifier(x)
return d3_Yhat, d8_Yhat, diso_Yhat
|
1636685
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from kubernetes import client, config
from kubernetes.client.rest import ApiException
from kubernetes.stream import stream
class K8sController():
def __init__(self, kube_config=None):
if not kube_config:
config.load_incluster_config()
else:
config.load_kube_config(config_file=kube_config)
self._appv1api = client.AppsV1Api()
self._corev1api = client.CoreV1Api()
self._networkv1api = client.NetworkingV1beta1Api()
self._extendv1api = client.ExtensionsV1beta1Api()
self._batchv1api = client.BatchV1Api()
def deployment_ready(self, deploy_name, namespace='default'):
ret = self._appv1api.read_namespaced_deployment(deploy_name, namespace)
return ret.status.ready_replicas == ret.status.replicas
def job_ready(self, job_name, namespace='default'):
pod_names = self.get_job_pod_names(job_name, namespace=namespace)
return len(pod_names) > 0
def get_job_status(self, job_name, namespace='default'):
ret = self._batchv1api.read_namespaced_job(job_name, namespace)
if ret.status.succeeded is not None:
return 'Success'
pods = self._corev1api.list_namespaced_pod(
namespace=namespace,
label_selector='job-name={}'.format(job_name))
if len(pods.items) > 1:
return 'Error'
return pods.items[0].status.phase
def get_job_pod_names(self, job_name, namespace='default', get_all_pods=False):
pod_names = []
pods = self._corev1api.list_namespaced_pod(
namespace=namespace,
label_selector='job-name={}'.format(job_name))
for pod in pods.items:
if get_all_pods or pod.status.phase == "Running":
pod_names.append(pod.metadata.name)
return pod_names
def get_pod_log(self, pod_name, namespace='default'):
return self._corev1api.read_namespaced_pod_log(pod_name, namespace)
def get_deployment_replica_set(self, deploy_name, namespace='default'):
ret = self._appv1api.read_namespaced_deployment(deploy_name, namespace)
if ret.status.ready_replicas == ret.status.replicas:
appid = ret.status.conditions[-1].message.split(" ")[1][1:-1]
return appid
else:
raise RuntimeError(
"Deployment is pendding, ready pods: {}/{}".format(ret.status.ready_replicas, ret.status.replicas))
def get_deployment_available_pod_names(self, deploy_name, namespace='default'):
appid = self.get_deployment_replica_set(deploy_name, namespace=namespace)
pod_names = []
pod_ret = self._corev1api.list_namespaced_pod(namespace)
for i in pod_ret.items:
if appid in i.metadata.name and i.metadata.deletion_timestamp is None:
pod_names.append(i.metadata.name)
return pod_names
def get_pod_ip(self, pod_name, namespace='default'):
pod_ret = self._corev1api.read_namespaced_pod(pod_name, namespace)
return pod_ret.status.pod_ip
def execute_pod(self, pod_name, cmd_str, namespace='default'):
os.system("kubectl exec -it {} -n {} -- {}".format(pod_name, namespace, cmd_str))
def create_service(self, metadata, spec, namespace='default'):
request = client.V1Service(api_version='v1',
kind='Service',
metadata=metadata,
spec=spec)
self._corev1api.create_namespaced_service(
namespace, request)
def create_ingress(self, metadata, spec, namespace='default'):
request = client.NetworkingV1beta1Ingress(
api_version='networking.k8s.io/v1beta1',
kind='Ingress',
metadata=metadata,
spec=spec)
self._networkv1api.create_namespaced_ingress(
namespace, request)
def create_deployment(self, body, namespace='default'):
self._appv1api.create_namespaced_deployment(
body=body,
namespace=namespace)
def create_job(self, body, namespace='default'):
self._batchv1api.create_namespaced_job(
body=body,
namespace=namespace)
def delete_service(self, service_name, namespace='default'):
try:
self._corev1api.delete_namespaced_service(service_name, namespace=namespace)
except ApiException as e:
print("Service {} delete failed, err msg: {}".format(service_name, e))
def delete_ingress(self, ingress_name, namespace='default'):
try:
self._extendv1api.delete_namespaced_ingress(ingress_name, namespace=namespace)
except ApiException as e:
print("Ingress {} delete failed, err msg: {}".format(ingress_name, e))
def delete_deployment(self, deploy_name, namespace='default'):
try:
self._appv1api.delete_namespaced_deployment(deploy_name, namespace=namespace)
except ApiException as e:
print("Deployment {} delete failed, err msg: {}".format(deploy_name, e))
def delete_job(self, job_name, namespace='default'):
try:
self._batchv1api.delete_namespaced_job(job_name, namespace=namespace, propagation_policy='Background')
except ApiException as e:
print("Job {} delete failed, err msg: {}".format(job_name, e))
|
1636690
|
import mtalg
def get_num_threads():
"""Get number of threads for MRNGs and algebra functions
Args:
num_threads: Number of threads
"""
return mtalg.core.threads._global_num_threads
|
1636702
|
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import pandas as pd
import time
NUM_NAMESPACES = 1
TOTAL_GOAL_PODS = 10001
GOAL_PODS = TOTAL_GOAL_PODS/NUM_NAMESPACES
fig = plt.figure()
ax1 = fig.add_subplot(2,1,1)
ax2 = fig.add_subplot(2,1,2)
time_elapsed_text = ax2.text(0.1, 0.25, '', fontsize=15)
running_pods_text = ax2.text(0.1, 0.5, '', fontsize=15)
completion_estimate_text = ax2.text(0.1, 0.75, '', fontsize=15)
def animate(i):
df = pd.read_csv('./progress.csv')
df.columns = df.columns.str.replace(' ', '')
df['relative-timestamp'] = df['timestamp'] - df['timestamp'].min()
df.drop('timestamp', axis='columns', inplace=True)
df.set_index('relative-timestamp', inplace=True)
ax1.clear()
ax1.plot(df)
ax1.legend(df.columns, loc='upper left')
ax1.set_title(f'Progress Towards 10k!')
ax1.set_xlabel('Time Elapsed (s)')
ax1.set_ylabel('Count')
# Calculate estimated time
minutes_elapsed = df.last_valid_index()/60
current_running_pods = df['running-pods'].max()
portion_complete = current_running_pods/GOAL_PODS
estimated_completion_minutes = minutes_elapsed / portion_complete
time_elapsed_str = f'Minutes elapsed: {"{0:.2f}".format(minutes_elapsed)} minutes'
time_elapsed_text.set_text(time_elapsed_str)
running_pods_str = f'Number of running pods: {current_running_pods}'
running_pods_text.set_text(running_pods_str)
completion_estimate_str = f'Estimated completion time: {"{0:.2f}".format(estimated_completion_minutes)} minutes'
completion_estimate_text.set_text(completion_estimate_str)
ani = animation.FuncAnimation(fig, animate, interval=2000)
plt.show()
|
1636735
|
import os
import sys
import traceback
from zlib import compress, decompress, error as zlib_error
from cmemcached_imp import *
import cmemcached_imp
import threading
_FLAG_PICKLE = 1 << 0
_FLAG_INTEGER = 1 << 1
_FLAG_LONG = 1 << 2
_FLAG_BOOL = 1 << 3
_FLAG_COMPRESS = 1 << 4
_FLAG_MARSHAL = 1 << 5
VERSION = "0.41-greenify"
def prepare(val, comp_threshold):
val, flag = cmemcached_imp.prepare(val)
if comp_threshold > 0 and val and len(val) > comp_threshold:
val = compress(val)
flag |= _FLAG_COMPRESS
return val, flag
def restore(val, flag):
if val is None:
return val
if flag & _FLAG_COMPRESS:
try:
val = decompress(val)
except zlib_error:
return None
flag &= ~_FLAG_COMPRESS
return cmemcached_imp.restore(val, flag)
class ThreadUnsafe(Exception):
pass
class Client(cmemcached_imp.Client):
"a wraper around cmemcached_imp"
def __init__(self, servers, do_split=1, comp_threshold=0, behaviors={}, logger=None, cas_support=False, *a, **kw):
cmemcached_imp.Client.__init__(self)
self.servers = servers
self.do_split = do_split
self.comp_threshold = comp_threshold
self.behaviors = dict(behaviors.items())
self.add_server(servers)
self.set_behavior(BEHAVIOR_NO_BLOCK, 1) # nonblock
self.set_behavior(BEHAVIOR_TCP_NODELAY, 1) # nonblock
self.set_behavior(BEHAVIOR_TCP_KEEPALIVE, 1)
self.set_behavior(BEHAVIOR_CACHE_LOOKUPS, 1)
# self.set_behavior(BEHAVIOR_BUFFER_REQUESTS, 0) # no request buffer
#self.set_behavior(BEHAVIOR_KETAMA, 1)
self.set_behavior(BEHAVIOR_HASH, HASH_MD5)
self.set_behavior(BEHAVIOR_KETAMA_HASH, HASH_MD5)
self.set_behavior(BEHAVIOR_DISTRIBUTION, DIST_CONSISTENT_KETAMA)
if cas_support:
self.set_behavior(BEHAVIOR_SUPPORT_CAS, 1)
for k, v in behaviors.items():
self.set_behavior(k, v)
self._thread_ident = None
self._created_stack = traceback.extract_stack()
def __reduce__(self):
return (Client, (self.servers, self.do_split, self.comp_threshold, self.behaviors))
def set_behavior(self, k, v):
self.behaviors[k] = v
return cmemcached_imp.Client.set_behavior(self, k, v)
def set(self, key, val, time=0, compress=True):
comp = compress and self.comp_threshold or 0
val, flag = prepare(val, comp)
if val is not None:
return self.set_raw(key, val, time, flag)
else:
print >>sys.stderr, '[cmemcached]', 'serialize %s failed' % key
def set_raw(self, key, val, time, flag):
self._record_thread_ident()
self._check_thread_ident()
return cmemcached_imp.Client.set_raw(self, key, val, time, flag)
def set_multi(self, values, time=0, compress=True, return_failure=False):
self._record_thread_ident()
self._check_thread_ident()
comp = compress and self.comp_threshold or 0
raw_values = dict((k, prepare(v, comp)) for k, v in values.iteritems())
return self.set_multi_raw(raw_values, time, return_failure=return_failure)
def get(self, key):
self._record_thread_ident()
val, flag = cmemcached_imp.Client.get_raw(self, key)
return restore(val, flag)
def get_multi(self, keys):
self._record_thread_ident()
result = cmemcached_imp.Client.get_multi_raw(self, keys)
return dict((k, restore(v, flag))
for k, (v, flag) in result.iteritems())
def gets(self, key):
self._record_thread_ident()
val, flag, cas = cmemcached_imp.Client.gets_raw(self, key)
return restore(val, flag), cas
def get_list(self, keys):
self._record_thread_ident()
result = self.get_multi(keys)
return [result.get(key) for key in keys]
def expire(self, key):
self._record_thread_ident()
return self.touch(key, -1)
def reset(self):
self.clear_thread_ident()
def clear_thread_ident(self):
self._thread_ident = None
self._thread_ident_stack = None
def _record_thread_ident(self):
if self._thread_ident is None:
self._thread_ident = self._get_current_thread_ident()
def _check_thread_ident(self):
if self._get_current_thread_ident() != self._thread_ident:
raise ThreadUnsafe("mc client created in %s\n%s, called in %s" %
(self._thread_ident,
self._created_stack,
self._get_current_thread_ident()))
def _get_current_thread_ident(self):
return (os.getpid(), threading.current_thread().name)
|
1636748
|
import subscription.signals
def impossible_downgrade(sender, subscription, **kwargs):
before = sender.subscription
after = subscription
if not after.price:
if before.price: return "You cannot downgrade to a free plan."
else: return None
if before.recurrence_unit:
if not after.recurrence_unit:
return "You cannot downgrade from recurring subscription to one-time."
else:
if after.price_per_day() > before.price_per_day(): return None
else: return "You cannot downgrade to a cheaper plan."
else:
if not after.recurrence_unit:
if after.price > before.price: return None
else: return "You cannot downgrade to a cheaper plan."
__installed = False
def install():
global __installed
if not __installed:
subscription.signals.change_check.connect(impossible_downgrade)
__installed = True
|
1636753
|
import scrapy
import logging
from scrapy.crawler import CrawlerProcess
logging.getLogger('scrapy').propagate = False
class Cloner(scrapy.Spider):
name = "test"
custom_settings ={
'LOG_ENABLED': False
}
def parse(self, response):
#filename = response.url.split("/")[-1] + '.html'
with open('core/cloned.html', 'wb') as f:
f.write(response.body)
def Clone(url):
process = CrawlerProcess()
process.crawl(Cloner,start_urls=[url])
process.start()
|
1636777
|
import requests
import pandas as pd
from bs4 import BeautifulSoup
import numpy as np
import datetime
pd.set_option('display.expand_frame_repr', False)
import re
def get_financial_statements(code):
# 인증값 추출
re_enc = re.compile("encparam: '(.*)'", re.IGNORECASE)
re_id = re.compile("id: '([a-zA-Z0-9]*)' ?", re.IGNORECASE)
url = "https://companyinfo.stock.naver.com/v1/company/c1010001.aspx?cmp_cd={}".format(code)
html = requests.get(url).text
search = re_enc.search(html)
if search is None:
return {}
encparam = re_enc.search(html).group(1)
encid = re_id.search(html).group(1)
# 스크래핑
url = "https://companyinfo.stock.naver.com/v1/company/ajax/cF1001.aspx?cmp_cd={}&fin_typ=0&freq_typ=A&encparam={}&id={}".format(code, encparam, encid)
headers = {"Referer": "HACK"}
html = requests.get(url, headers=headers).text
soup = BeautifulSoup(html, "html5lib")
dividend = soup.select("table:nth-of-type(2) tr:nth-of-type(33) td span")
years = soup.select("table:nth-of-type(2) th")
dividend_dict = {}
for i in range(len(dividend)):
dividend_dict[years[i+3].text.strip()[:4]] = dividend[i].text
return dividend_dict
def get_3year_treasury():
url = "http://www.index.go.kr/strata/jsp/showStblGams3.jsp?stts_cd=288401&idx_cd=2884&freq=Y&period=1998:2018"
html = requests.get(url).text
soup = BeautifulSoup(html, 'html5lib')
td_data = soup.select("tr td")
treasury_3year = {}
start_year = 1998
for x in td_data:
treasury_3year[start_year] = x.text
start_year += 1
return treasury_3year
def get_dividend_yield(code):
url = "http://companyinfo.stock.naver.com/company/c1010001.aspx?cmp_cd=" + code
html = requests.get(url).text
soup = BeautifulSoup(html, 'html5lib')
dt_data = soup.select("td dl dt")
dividend_yield = dt_data[-2].text
dividend_yield = dividend_yield.split(' ')[1]
dividend_yield = dividend_yield[:-1]
return dividend_yield
def get_estimated_dividend_yield(code):
dividend_yield = get_financial_statements(code)
if len(dividend_yield) == 0:
return 0
dividend_yield = sorted(dividend_yield.items())[-1]
return dividend_yield[1]
def get_current_3year_treasury():
url = "http://finance.naver.com/marketindex/interestDailyQuote.nhn?marketindexCd=IRR_GOVT03Y&page=1"
html = requests.get(url).text
soup = BeautifulSoup(html, 'html5lib')
td_data = soup.select("tr td")
return td_data[1].text
def get_previous_dividend_yield(code):
dividend_yield = get_financial_statements(code)
now = datetime.datetime.now()
cur_year = now.year
previous_dividend_yield = {}
for year in range(cur_year-5, cur_year):
if str(year) in dividend_yield:
previous_dividend_yield[year] = dividend_yield[str(year)]
return previous_dividend_yield
if __name__ == "__main__":
estimated_dividend_yield = get_estimated_dividend_yield("058470")
print(estimated_dividend_yield)
current_3year_treasury = get_current_3year_treasury()
print(current_3year_treasury)
estimated_dividend_to_treasury = float(estimated_dividend_yield) / float(current_3year_treasury)
print(estimated_dividend_to_treasury)
# print(get_estimated_dividend_yield('058470'))
# print(get_current_3year_treasury())
# print(get_previous_dividend_yield('058470'))
|
1636782
|
import torch
import os
import random
from torch.utils.data import Dataset
from PIL import Image
import numpy as np
import sys
import json
from glob import glob
from PIL import ImageDraw
from misc.mask_utils import scatterMask
from misc.utils import denorm
import glob
from scipy.io import loadmat
from tqdm import tqdm
module_path = os.path.abspath(os.getcwd())
if module_path not in sys.path:
sys.path.append(module_path)
# ==================================================================#
# == CelebA
# ==================================================================#
MASK_LABELS = {
0: 'Background',
1: 'Cap/hat',
2: 'Helmet',
3: 'Face',
4: 'Hair',
5: 'Left-arm',
6: 'Right-arm',
7: 'Left-hand',
8: 'Right-hand',
9: 'Protector',
10: 'Bikini/bra',
11: 'Jacket/windbreaker/hoodie',
12: 'Tee-shirt',
13: 'Polo-shirt',
14: 'Sweater',
15: 'Singlet',
16: 'Torso-skin',
17: 'Pants',
18: 'Shorts/swim-shorts',
19: 'Skirt',
20: 'Stockings',
21: 'Socks',
22: 'Left-boot',
23: 'Right-boot',
24: 'Left-shoe',
25: 'Right-shoe',
26: 'Left-highheel',
27: 'Right-highheel',
28: 'Left-sandal',
29: 'Right-sandal',
30: 'Left-leg',
31: 'Right-leg',
32: 'Left-foot',
33: 'Right-foot',
34: 'Coat',
35: 'Dress',
36: 'Robe',
37: 'Jumpsuit',
38: 'Other-full-body-clothes',
39: 'Headwear',
40: 'Backpack',
41: 'Ball',
42: 'Bats',
43: 'Belt',
44: 'Bottle',
45: 'Carrybag',
46: 'Cases',
47: 'Sunglasses',
48: 'Eyewear',
49: 'Glove',
50: 'Scarf',
51: 'Umbrella',
52: 'Wallet/purse',
53: 'Watch',
54: 'Wristband',
55: 'Tie',
56: 'Other-accessary',
57: 'Other-upper-body-clothes',
58: 'Other-lower-body-clothes',
}
MASK_ATTRS = {value: key for key, value in MASK_LABELS.items()}
# Pose
# 0: Right-ankle
# 1: Right-knee
# 2: Right-hip
# 3: Left-hip
# 4: Left-knee
# 5: Left-ankle
# 6: Pelvis
# 7: Thorax
# 8: Upper-neck
# 9: Head-top
# 10: Right-wrist
# 11: Right-elbow
# 12: Right-shoulder
# 13: Left-shoulder
# 14: Left-elbow
# 15: Left-wrist
# 16: Face-bbox-top-left-corner-point
# 17: Face-bbox-bottom-right-corner-point
# 18: Instance-bbox-top-left-corner-point
# 19: Instance-bbox-bottom-right-corner-point
class LV_MHP_v2(Dataset):
def __init__(self,
image_size,
transform,
mode,
shuffling=False,
all_attr=0,
verbose=False,
sampled=100,
show_attr='',
CREATE_DATASET=False,
**kwargs):
self.image_size = image_size
self.shuffling = shuffling
mode = 'train' if mode == 'train' else 'val'
self.mode = mode
self.name = self.__class__.__name__
self.all_attr = all_attr
self.verbose = verbose
self.show_attr = show_attr.split(',')
self.sampled = sampled # How much data to train (percentage)
self.data_dir = 'data/{}'.format(self.name)
ids = os.path.join(self.data_dir, 'list', self.mode + '.txt')
self.ids = [f.strip() for f in open(ids).readlines()]
self.colormap = loadmat(
os.path.join(self.data_dir,
'LV-MHP-v2_colormap.mat'))['MHP_colormap']
self.colorize = Colorize(self.colormap)
self.data_dir = os.path.join(self.data_dir, self.mode)
self.attr2idx = {}
self.idx2attr = {}
self.mask_label = MASK_LABELS
self.mask_attr = MASK_ATTRS
self.attr2filenames = {}
self.NOTattr2filenames = {}
self.transform_resize_img = transform.resize_rgb
self.transform_resize_mask = transform.resize_mask
self.transform_common = transform.common
self.transform = transform
if 'config' in kwargs.keys():
self.config = kwargs['config']
else:
from types import SimpleNamespace
self.config = SimpleNamespace()
if self.verbose:
print('Start preprocessing %s: %s!' % (self.name, mode))
random.seed(1)
if CREATE_DATASET:
self.create_dataset()
else:
self.preprocess()
self.filenames, self.labels = self.subsample(self.filenames,
self.labels)
if self.verbose:
_str = str(self.num_data)
print('Finished preprocessing %s: %s (%s)!' %
(self.name, mode, _str))
# self.write_lines()
def write_lines(self):
with open('{}/LV_MHP_v2_list_{}.txt'.format(self.data_dir, self.mode),
'w') as f:
for line in self.filenames:
f.writelines(line + '\n')
def histogram(self):
from misc.utils import PRINT
values = np.sum(self.labels, axis=0)
dict_ = {}
# import ipdb; ipdb.set_trace()
for key, value in zip(self.selected_attrs, values):
dict_[key] = value
total = 0
with open('datasets/{}_histogram_attributes.txt'.format(self.name),
'w') as f:
for key, value in sorted(dict_.items(),
key=lambda kv: (kv[1], kv[0]),
reverse=True):
total += value
PRINT(f, '{} {}'.format(key, value))
PRINT(f, 'TOTAL {}'.format(total))
def preprocess(self):
if self.show_attr != '':
self.selected_attrs = self.show_attr
self.config.ATTR = self.show_attr
else:
self.selected_attrs = [
'NOT_Cap/hat',
'Cap/hat',
'NOT_Jacket/windbreaker/hoodie',
'Jacket/windbreaker/hoodie',
]
for i, attr in enumerate(self.selected_attrs):
self.attr2idx[attr] = i
self.idx2attr[i] = attr
self.attr2filenames[attr] = []
self.NOTattr2filenames[attr] = []
# lines = self.subsample(lines)
# if self.shuffling:
# random.shuffle(self.lines)
# random.shuffle(self.lines)
self.filenames = []
self.labels = []
self.segs = []
self.pose = []
no_pose = 0
for i, line in enumerate(tqdm(self.ids, leave=False)):
filename = os.path.join(self.data_dir, 'images', line + '.jpg')
pose = os.path.join(self.data_dir, 'pose_annos', line + '.mat')
segs = sorted(
glob.glob(
os.path.join(self.data_dir, 'parsing_annos', line + '_*')))
# import ipdb; ipdb.set_trace()
no_show_attr = True
# import ipdb; ipdb.set_trace()
for seg in segs:
person_id = int(
os.path.splitext(os.path.basename(seg))[0].split('_')
[-1]) - 1 # starts from 0
# segmap = self.get_mask_from_file(seg, no_show=True)
values_sem = self.get_mask_from_file(seg, label=True)
values_sem = values_sem.unique()
label = []
for attr in self.selected_attrs:
selected_value = self.get_value(values_sem, attr)
if selected_value >= 1:
label.append(selected_value)
self.attr2filenames[attr].append(line[0])
no_show_attr = False
else:
label.append(0)
self.NOTattr2filenames[attr].append(line[0])
if self.show_attr and no_show_attr:
continue
try:
pose_id = loadmat(pose)['person_%d' % person_id]
except BaseException:
# import ipdb; ipdb.set_trace()
no_pose += 1
continue
# import ipdb; ipdb.set_trace()
self.filenames.append(filename)
self.labels.append(label)
self.segs.append(seg)
self.pose.append(pose_id)
print("No pose found:", no_pose)
if not self.show_attr:
self.histogram()
self.num_data = len(self.filenames)
def create_dataset(self):
no_pose = 0
new_images = os.path.join(self.data_dir, 'new_images')
os.makedirs(new_images, exist_ok=True)
new_segs = os.path.join(self.data_dir, 'new_segs')
os.makedirs(new_segs, exist_ok=True)
new_pose = os.path.join(self.data_dir, 'new_pose')
os.makedirs(new_pose, exist_ok=True)
new_labels = os.path.join(self.data_dir, 'new_labels')
os.makedirs(new_labels, exist_ok=True)
self.selected_attrs = self.mask_attr.keys()
for i, line in enumerate(tqdm(self.ids, leave=False)):
filename = os.path.join(self.data_dir, 'images', line + '.jpg')
pose = os.path.join(self.data_dir, 'pose_annos', line + '.mat')
segs = sorted(
glob.glob(
os.path.join(self.data_dir, 'parsing_annos', line + '_*')))
# import ipdb; ipdb.set_trace()
no_show_attr = True
# import ipdb; ipdb.set_trace()
for seg in segs:
person_id = int(
os.path.splitext(os.path.basename(seg))[0].split('_')
[-1]) - 1 # starts from 0
# segmap = self.get_mask_from_file(seg, no_show=True)
segmap = self.get_mask_from_file(seg, label=True)[0]
try:
pose_id = loadmat(pose)['person_%d' % person_id]
except BaseException:
# import ipdb; ipdb.set_trace()
no_pose += 1
continue
import ipdb
ipdb.set_trace()
labels = [self.mask_label[i] for i in segmap.unique()]
nonzero = torch.nonzero(segmap)
bbox = nonzero.min(0)[0].tolist()
bbox.extend(nonzero.max(0)[0].tolist()) # x1, y1, x2, y2
self.filenames.append(filename)
self.labels.append(label)
self.segs.append(seg)
self.pose.append(pose_id)
print("No pose found:", no_pose)
if not self.show_attr:
self.histogram()
self.num_data = len(self.filenames)
def get_value(self, values, attr):
NOT = False
if 'NOT_' in attr:
NOT = True
attr = attr.replace('NOT_', '')
index = list(self.mask_label.values()).index(attr)
value = int(index in values)
if NOT:
value = 1 - value
if value == -1:
import ipdb
ipdb.set_trace()
assert value != -1
return value
def get_data(self):
return self.filenames, self.labels
def get_mask_from_file(self, maskname, no_show=False, label=False):
mask = Image.open(maskname).convert('RGB')
mask = self.transform_resize_mask(mask)
mask = self.transform_common(mask)[0] * 255. # 0, 255
if self.show_attr and not no_show:
labels_real = self.get_partial_mask(mask).unsqueeze(0)
elif label:
labels_real = mask
else:
labels_real = scatterMask(mask, num_channels=len(self.mask_label))
# labels_real: C x size x size
return labels_real # 59 attrs
def get_partial_mask(self, mask):
new_mask = torch.zeros_like(mask)
for attr in self.selected_attrs:
label = self.mask_attr[attr]
new_mask[mask == label] = label
return new_mask
def __getitem__(self, index):
filename = self.filenames[index]
seg = self.segs[index]
label = self.labels[index]
pose = self.pose[index]
image = Image.open(filename)
# import ipdb; ipdb.set_trace()
if self.show_attr:
image = image.convert('RGBA')
img2 = image.copy()
zero_seg = np.zeros((image.size[::-1])).astype(np.uint8)
org_seg = self.get_mask_from_file(seg, no_show=True)
for idx, attr in enumerate(self.attributes):
_label = label[idx]
if _label == 1:
zero_seg += org_seg[self.mask_attr[attr]]
import ipdb
ipdb.set_trace()
zero_seg = self.colorize(zero_seg) / 255.
else:
image = image.convert('RGB')
# import ipdb; ipdb.set_trace()
seg = np.zeros((1 + len(self.attr2idx.keys()),
*image.size[::-1])).astype(np.uint8)
for label, segs in self.segs[index].items():
img_temp = Image.new('L', image.size, 0)
draw = ImageDraw.Draw(img_temp)
draw.polygon(segs, outline=1, fill=1)
img_temp = np.array(img_temp)
seg[label + 1][img_temp == 1] = 1
seg[0][seg.sum(0) == 0] = 1 # background
# seg = seg[None,:,:,:].repeat(3,0).transpose(1,0,2,3)
# to match the transform variable
# import ipdb; ipdb.set_trace()
bbox = self.bbox[index] # x1,y1,x2,y2
margin = (0.075, 0.075)
width_ = image.size[0] * margin[0]
height_ = image.size[1] * margin[1]
bbox[0] = max(0, bbox[0] - width_)
bbox[1] = max(0, bbox[1] - height_)
bbox[2] = min(image.size[0], bbox[2] + width_)
bbox[3] = min(image.size[1], bbox[3] + height_)
image = image.crop(bbox)
# import ipdb; ipdb.set_trace()
keyp = keyp.crop(bbox)
keyp = self.transform_resize_img(keyp)
keyp = self.transform_common(keyp)[0].unsqueeze(0)
seg = [Image.fromarray(i).crop(bbox).convert('RGB') for i in seg]
seg = [self.transform_resize_mask(i) for i in seg]
seg = [self.transform_common(i)[0] for i in seg]
seg = torch.stack(seg, dim=0) * 255
# seg = scatterMask(seg, num_channels=1+len(self.attr2idx.keys()))
image = self.transform_resize_img(image)
image = self.transform_common(image)
image = self.transform.norm(image)
# import ipdb; ipdb.set_trace()
if self.show_attr:
alpha = 0.4
image = (alpha * image) + (1 - alpha) * keyp
label = torch.FloatTensor(self.labels[index])
if self.config.TRAIN_MASK: # or self.config.ONLY_GEN:
_seg = image
image = seg
seg = _seg
return image, label, seg, keyp
def __len__(self):
return self.num_data
def shuffle(self, seed):
random.seed(seed)
random.shuffle(self.filenames)
random.seed(seed)
random.shuffle(self.labels)
def show_me(args):
from data_loader import get_transformations
from torch.utils.data import DataLoader
from torchvision.utils import make_grid
from misc.utils import denorm
import numpy as np
import matplotlib.pyplot as plt
attrs = args.attr # .split(',')
mode = 'train'
transform = get_transformations(mode='test', image_size=256)
data = LV_MHP_v2(256,
transform,
mode,
show_attr=attrs,
CREATE_DATASET=args.CREATE_DATASET,
verbose=True)
data_loader = DataLoader(dataset=data,
batch_size=64,
shuffle=False,
num_workers=4)
for i, (data, label, *_) in enumerate(data_loader):
data = denorm(data)
data = make_grid(data).numpy()
plt.figure(figsize=(20, 20))
plt.imshow(np.transpose(data, (1, 2, 0)), interpolation='nearest')
plt.show(block=True)
class Colorize(object):
def __init__(self, cmap):
# self.cmap = labelcolormap(n)
self.cmap = torch.from_numpy(cmap)
def __call__(self, gray_image):
size = gray_image.size()
color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
for label in range(0, len(self.cmap)):
mask = (label == gray_image[0]).cpu()
color_image[0][mask] = self.cmap[label][0]
color_image[1][mask] = self.cmap[label][1]
color_image[2][mask] = self.cmap[label][2]
return color_image
if __name__ == '__main__':
# ipython datasets/DeepFashion2.py -- --attr=vest
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--attr', type=str, default='all')
parser.add_argument('--CREATE_DATASET', action='store_true', default=False)
args = parser.parse_args()
# train_inception()
show_me(args)
|
1636789
|
from __future__ import absolute_import
from . import backend
from . import datasets
from . import layers
from . import preprocessing
from . import utils
from . import wrappers
from . import callbacks
from . import constraints
from . import initializers
from . import metrics
from . import losses
from . import optimizers
from . import regularizers
__version__ = '0.0.2'
|
1636816
|
import random
from jinja2 import Environment, PackageLoader
env = Environment(loader=PackageLoader('generate_test', '.'))
template = env.get_template('template.jinja')
def answer(n):
s = 0
p = 0
n = str(n)
for c in n:
if int(c) % 2 == 1:
s += 1
if p == 0:
p = int(c)
else:
p = p * int(c)
return s, p
def gen_test():
n = random.randint(1, 10000)
ans = answer(n)
return {"x": n, "a": ans[0], "b": ans[1]}
tests = [gen_test() for _ in range(1000)]
for i, test in enumerate(tests):
test["i"] = i
with open("autotest.c", "w") as f:
render = template.render(tests=tests)
f.write(render)
|
1636854
|
import attr
@attr.s(slots=True)
class Booking:
id: int = attr.ib()
name: str = attr.ib()
is_active: bool = attr.ib()
asdict = attr.asdict
|
1636863
|
from photons_app.errors import ApplicationCancelled, ApplicationStopped
from photons_app.errors import UserQuit
from photons_app import helpers as hp
import platform
import asyncio
import logging
import signal
import sys
log = logging.getLogger("photons_app.tasks.runner")
class Runner:
def __init__(self, task, kwargs):
self.task = task
self.kwargs = kwargs
def run_loop(self):
photons_app = self.task.photons_app
target_register = self.task.collector.configuration["target_register"]
self.Run(self.task.run(**self.kwargs), photons_app, target_register).run()
class Run:
def __init__(self, coro, photons_app, target_register):
self.coro = coro
self.photons_app = photons_app
self.target_register = target_register
self.loop = self.photons_app.loop
@property
def significant_future(self):
graceful_future = self.photons_app.graceful_final_future
if graceful_future.setup:
return graceful_future
return self.photons_app.final_future
def run(self):
self.photons_app.final_future.add_done_callback(hp.silent_reporter)
self.significant_future.add_done_callback(hp.silent_reporter)
self.register_sigterm_handler(self.significant_future)
task, waiter = self.make_waiter()
override = None
graceful = self.significant_future is self.photons_app.graceful_final_future
try:
self.loop.run_until_complete(waiter)
except KeyboardInterrupt as error:
override = self.got_keyboard_interrupt(error)
except asyncio.CancelledError as error:
override = self.got_cancelled(error)
except:
override = sys.exc_info()[1]
log.debug("CLEANING UP")
try:
self.final(task, waiter)
finally:
self.final_close()
if isinstance(override, ApplicationStopped) and graceful:
return
if override is not None:
raise override from None
def register_sigterm_handler(self, final_future):
if platform.system() != "Windows":
def stop_final_fut():
if not final_future.done():
final_future.set_exception(ApplicationStopped())
self.loop.add_signal_handler(signal.SIGTERM, stop_final_fut)
async def wait(self, task):
wait = [self.photons_app.final_future, self.significant_future, task]
await hp.wait_for_first_future(*wait, name="||run>wait[wait_for_program_exit]")
if task.done():
await task
if self.photons_app.final_future.done():
await self.photons_app.final_future
if self.significant_future.done():
await self.significant_future
def make_waiter(self):
task = self.loop.create_task(self.coro)
task.add_done_callback(hp.silent_reporter)
waiter = self.loop.create_task(self.wait(task))
waiter.add_done_callback(hp.silent_reporter)
return task, waiter
def got_keyboard_interrupt(self, error):
error = UserQuit()
if not self.significant_future.done():
try:
self.significant_future.set_exception(error)
except RuntimeError:
pass
return error
def got_cancelled(self, error):
error = ApplicationCancelled()
if not self.significant_future.done():
try:
self.significant_future.set_exception(error)
except RuntimeError:
pass
return error
def transfer_result(self, complete, pending):
if complete is None or complete.cancelled():
if not pending.done():
pending.cancel()
return
if not complete.done():
return
exc = complete.exception()
if exc:
if not pending.done():
pending.set_exception(exc)
return
complete.result()
if not pending.done():
pending.set_result(None)
def final(self, task, waiter):
self.wait_for_main_task(task)
self.wait_for_waiter(waiter)
self.ensure_finished_futures(task, waiter)
self.run_cleanup()
self.ensure_all_tasks_cancelled()
def wait_for_main_task(self, task):
log.debug("Waiting for main task to finish")
# If we exited because final future is done but graceful is not
# Then the task won't end, so let's tell graceful we're done now
if self.photons_app.final_future.done() and not self.significant_future.done():
self.transfer_result(self.photons_app.final_future, self.significant_future)
# If we're not using the graceful future then we assume the task won't stop by itself
# The graceful future is about saying the task will stop by itself when you resolve graceful
if not self.photons_app.graceful_final_future.setup:
task.cancel()
try:
self.loop.run_until_complete(
asyncio.tasks.gather(task, loop=self.loop, return_exceptions=True)
)
except KeyboardInterrupt:
pass
except:
pass
finally:
task.cancel()
def wait_for_waiter(self, waiter):
log.debug("Waiting for waiter task to finish")
waiter.cancel()
try:
self.loop.run_until_complete(
asyncio.tasks.gather(waiter, loop=self.loop, return_exceptions=True)
)
except:
pass
def run_cleanup(self):
log.debug("Running cleaners")
targets = self.target_register.used_targets
self.loop.run_until_complete(self.photons_app.cleanup(targets))
def ensure_finished_futures(self, task, waiter):
self.transfer_result(None if not task.done() else task, self.photons_app.final_future)
if not self.significant_future.done():
self.significant_future.cancel()
if self.photons_app.graceful_final_future.setup:
if self.significant_future.cancelled() or isinstance(
self.significant_future.exception(),
(UserQuit, ApplicationStopped, ApplicationCancelled),
):
self.photons_app.final_future.cancel()
self.transfer_result(self.significant_future, self.photons_app.final_future)
def ensure_all_tasks_cancelled(self):
log.debug("Cancelling tasks and async generators")
self.cancel_all_tasks()
self.loop.run_until_complete(self.shutdown_asyncgens())
def final_close(self):
self.loop.close()
del self.photons_app.loop
del self.photons_app.final_future
del self.photons_app.graceful_final_future
def cancel_all_tasks(self):
if hasattr(asyncio.tasks, "all_tasks"):
to_cancel = asyncio.tasks.all_tasks(self.loop)
else:
to_cancel = asyncio.Task.all_tasks(self.loop)
to_cancel = [t for t in to_cancel if not t.done()]
if not to_cancel:
return
for task in to_cancel:
task.cancel()
gathered = asyncio.tasks.gather(*to_cancel, loop=self.loop, return_exceptions=True)
self.loop.run_until_complete(gathered)
for task in to_cancel:
if task.cancelled():
continue
if task.exception() is not None:
self.loop.call_exception_handler(
{
"message": "unhandled exception during shutdown",
"exception": task.exception(),
"task": task,
}
)
async def shutdown_asyncgens(self):
if not len(self.loop._asyncgens):
return
closing_agens = list(self.loop._asyncgens)
self.loop._asyncgens.clear()
# I would do an asyncio.tasks.gather but it would appear that just causes
# the asyncio loop to think it's shutdown, so I have to do them one at a time
for ag in closing_agens:
try:
await hp.stop_async_generator(
ag, name="||shutdown_asyncgens[wait_for_closing_agens]"
)
except asyncio.CancelledError:
pass
except:
exc = sys.exc_info()[1]
self.loop.call_exception_handler(
{
"message": "an error occurred during closing of asynchronous generator",
"exception": exc,
"asyncgen": ag,
}
)
|
1636876
|
from networkx.algorithms.assortativity import *
from networkx.algorithms.asteroidal import *
from networkx.algorithms.boundary import *
from networkx.algorithms.bridges import *
from networkx.algorithms.chains import *
from networkx.algorithms.centrality import *
from networkx.algorithms.chordal import *
from networkx.algorithms.cluster import *
from networkx.algorithms.clique import *
from networkx.algorithms.communicability_alg import *
from networkx.algorithms.components import *
from networkx.algorithms.coloring import *
from networkx.algorithms.core import *
from networkx.algorithms.covering import *
from networkx.algorithms.cycles import *
from networkx.algorithms.cuts import *
from networkx.algorithms.d_separation import *
from networkx.algorithms.dag import *
from networkx.algorithms.distance_measures import *
from networkx.algorithms.distance_regular import *
from networkx.algorithms.dominance import *
from networkx.algorithms.dominating import *
from networkx.algorithms.efficiency_measures import *
from networkx.algorithms.euler import *
from networkx.algorithms.graphical import *
from networkx.algorithms.hierarchy import *
from networkx.algorithms.hybrid import *
from networkx.algorithms.link_analysis import *
from networkx.algorithms.link_prediction import *
from networkx.algorithms.lowest_common_ancestors import *
from networkx.algorithms.isolate import *
from networkx.algorithms.matching import *
from networkx.algorithms.minors import *
from networkx.algorithms.mis import *
from networkx.algorithms.moral import *
from networkx.algorithms.non_randomness import *
from networkx.algorithms.operators import *
from networkx.algorithms.planarity import *
from networkx.algorithms.planar_drawing import *
from networkx.algorithms.reciprocity import *
from networkx.algorithms.regular import *
from networkx.algorithms.richclub import *
from networkx.algorithms.shortest_paths import *
from networkx.algorithms.similarity import *
from networkx.algorithms.graph_hashing import *
from networkx.algorithms.simple_paths import *
from networkx.algorithms.smallworld import *
from networkx.algorithms.smetric import *
from networkx.algorithms.structuralholes import *
from networkx.algorithms.sparsifiers import *
from networkx.algorithms.summarization import *
from networkx.algorithms.swap import *
from networkx.algorithms.traversal import *
from networkx.algorithms.triads import *
from networkx.algorithms.vitality import *
from networkx.algorithms.voronoi import *
from networkx.algorithms.wiener import *
# Make certain subpackages available to the user as direct imports from
# the `networkx` namespace.
from networkx.algorithms import approximation
from networkx.algorithms import assortativity
from networkx.algorithms import bipartite
from networkx.algorithms import node_classification
from networkx.algorithms import centrality
from networkx.algorithms import chordal
from networkx.algorithms import cluster
from networkx.algorithms import clique
from networkx.algorithms import components
from networkx.algorithms import connectivity
from networkx.algorithms import community
from networkx.algorithms import coloring
from networkx.algorithms import flow
from networkx.algorithms import isomorphism
from networkx.algorithms import link_analysis
from networkx.algorithms import lowest_common_ancestors
from networkx.algorithms import operators
from networkx.algorithms import shortest_paths
from networkx.algorithms import tournament
from networkx.algorithms import traversal
from networkx.algorithms import tree
# Make certain functions from some of the previous subpackages available
# to the user as direct imports from the `networkx` namespace.
from networkx.algorithms.bipartite import complete_bipartite_graph
from networkx.algorithms.bipartite import is_bipartite
from networkx.algorithms.bipartite import project
from networkx.algorithms.bipartite import projected_graph
from networkx.algorithms.connectivity import all_pairs_node_connectivity
from networkx.algorithms.connectivity import all_node_cuts
from networkx.algorithms.connectivity import average_node_connectivity
from networkx.algorithms.connectivity import edge_connectivity
from networkx.algorithms.connectivity import edge_disjoint_paths
from networkx.algorithms.connectivity import k_components
from networkx.algorithms.connectivity import k_edge_components
from networkx.algorithms.connectivity import k_edge_subgraphs
from networkx.algorithms.connectivity import k_edge_augmentation
from networkx.algorithms.connectivity import is_k_edge_connected
from networkx.algorithms.connectivity import minimum_edge_cut
from networkx.algorithms.connectivity import minimum_node_cut
from networkx.algorithms.connectivity import node_connectivity
from networkx.algorithms.connectivity import node_disjoint_paths
from networkx.algorithms.connectivity import stoer_wagner
from networkx.algorithms.flow import capacity_scaling
from networkx.algorithms.flow import cost_of_flow
from networkx.algorithms.flow import gomory_hu_tree
from networkx.algorithms.flow import max_flow_min_cost
from networkx.algorithms.flow import maximum_flow
from networkx.algorithms.flow import maximum_flow_value
from networkx.algorithms.flow import min_cost_flow
from networkx.algorithms.flow import min_cost_flow_cost
from networkx.algorithms.flow import minimum_cut
from networkx.algorithms.flow import minimum_cut_value
from networkx.algorithms.flow import network_simplex
from networkx.algorithms.isomorphism import could_be_isomorphic
from networkx.algorithms.isomorphism import fast_could_be_isomorphic
from networkx.algorithms.isomorphism import faster_could_be_isomorphic
from networkx.algorithms.isomorphism import is_isomorphic
from networkx.algorithms.tree.branchings import maximum_branching
from networkx.algorithms.tree.branchings import maximum_spanning_arborescence
from networkx.algorithms.tree.branchings import minimum_branching
from networkx.algorithms.tree.branchings import minimum_spanning_arborescence
from networkx.algorithms.tree.branchings import ArborescenceIterator
from networkx.algorithms.tree.coding import *
from networkx.algorithms.tree.decomposition import *
from networkx.algorithms.tree.mst import *
from networkx.algorithms.tree.operations import *
from networkx.algorithms.tree.recognition import *
|
1636940
|
import click
import src.cli.console as console
from src.cli.context import show_context
from src.graphql import GraphQL
from src.local.providers.helper import get_cluster_or_exit
from src.local.system import Telepresence
from src.storage.user import get_local_storage_user
@click.command()
@click.pass_obj
def ps(ctx, **kwargs):
"""
Displays the current process state.
"""
# cluster
cluster_list = ctx.cluster_manager.get_cluster_list(ready=True)
cluster_id_list = [cluster.id for cluster in cluster_list]
# GraphQL
try:
graph_ql = GraphQL(authentication=ctx.auth)
data = graph_ql.query(
"""
query {
allProjects {
results {
title
id
description
}
}
}
""",
)
project_list = data["allProjects"]["results"]
except Exception as e:
console.debug(e)
console.exit_generic_error()
cluster_data = []
for project in project_list:
if project["id"] in cluster_id_list:
cluster_data.append(project)
console.info("Project:")
console.table(
data={
"id": [cluster["id"] for cluster in cluster_data],
"title": [cluster["title"] for cluster in cluster_data],
"description": [cluster["description"] for cluster in cluster_data],
},
headers=["cluster: id", "name", "description"],
)
console.echo("")
# switch
intercept_count = 0
if cluster_data:
cluster = get_cluster_or_exit(ctx, cluster_data[0]["id"])
provider_data = cluster.storage.get()
telepresence = Telepresence(provider_data)
intercept_count = telepresence.intercept_count()
if intercept_count == 0 or not intercept_count:
console.info("No app switched!")
else:
console.info(f"Apps switched: #{intercept_count}")
console.echo("")
# context
local_storage_user = get_local_storage_user()
user_data = local_storage_user.get()
show_context(user_data.context)
|
1636941
|
from datetime import datetime
from django.db.models import Count
import olympia.core.logger
from olympia.amo.celery import task
from olympia.amo.decorators import use_primary_db
from .models import Collection, CollectionAddon
log = olympia.core.logger.getLogger('z.task')
@task
@use_primary_db
def collection_meta(*ids, **kw):
log.info(f'[{len(ids)}@{collection_meta.rate_limit}] Updating collection metadata.')
qs = CollectionAddon.objects.filter(collection__in=ids).values_list('collection')
counts = dict(qs.annotate(Count('id')))
now = datetime.now()
for collection_id, old_count in Collection.objects.filter(id__in=ids).values_list(
'pk', 'addon_count'
):
addon_count = counts.get(collection_id, 0)
if addon_count == old_count:
continue
# We want to set addon_count & modified without triggering post_save
# as it would cause an infinite loop (this task is called on
# post_save). So we update queryset.update() and set modified ourselves
# instead of relying on auto_now behaviour.
Collection.objects.filter(id=collection_id).update(
addon_count=addon_count, modified=now
)
|
1636942
|
from collections import defaultdict
class Solution(object):
def longestPalindrome(self, s):
"""
:type s: str
:rtype: str
"""
# A char -> [list of indice]
matrix = defaultdict(bool)
result = ""
# initialize
for i in range(len(s)):
matrix[(i, i)] = True
# N*N
maxlen = 1
result = ""
for diff in range(1, len(s)):
for x in range(len(s) - diff):
if s[x] == s[x + diff] and (diff == 1 or matrix[(x + 1, x + diff - 1)]):
matrix[(x, x + diff)] = True
if diff > maxlen:
maxlen = diff
result = s[x : x + diff + 1]
if maxlen < diff:
break
return result
|
1636949
|
from numba import jit
import numpy as np
import cv2
random = np.array(np.power(np.random.rand(16, 8, 3), 3) * 255, dtype=np.uint8)
class Camera:
def _resize_frame(self, frame, dst, flip=0):
frame_shape = np.shape(frame)
frame_crop_height = int(frame_shape[1] / self._ratio)
crop_offset = (frame_shape[0] - frame_crop_height) // 2
if crop_offset > 0:
cropped_frame = frame[crop_offset:-crop_offset, :, :]
else:
cropped_frame = frame
if flip == 1: # horizontal
cv2.resize(cv2.flip(cropped_frame, 1), self._size, dst=dst)
elif flip == 2: # verticle
cv2.resize(cv2.flip(cropped_frame, 0), self._size, dst=dst)
elif flip == 3: # both
cv2.resize(cv2.flip(cropped_frame, -1), self._size, dst=dst)
else:
cv2.resize(cropped_frame, self._size, dst=dst)
def __init__(self, size=(640,360), camera_index=0, no_cam_allowed=False):
self._no_cam_allowed = no_cam_allowed
self._cap = cv2.VideoCapture(camera_index)
self._size = size
self._ratio = size[0] / size[1]
self._fgbg = cv2.createBackgroundSubtractorKNN()
self._mask = np.zeros(self._size[::-1], dtype=np.uint8)
self._input_frame = np.zeros((*self._size[::-1], 3), dtype=np.uint8)
self._hsv_field = np.zeros((*self._size[::-1], 3), dtype=np.uint8)
self._last_grey = np.zeros(self._size[::-1], dtype=np.uint8)
self._current_grey = np.zeros(self._size[::-1], dtype=np.uint8)
if not self._cap.isOpened():
# random = np.array(np.power(np.random.rand(16, 8, 3), 3) * 255, dtype=np.uint8)
self._resize_frame(random, dst=self._input_frame)
''' HSV test image
test_image = np.zeros_like(self._input_frame, dtype=np.uint8)
x = np.linspace(0, 255, size[0], dtype=np.uint8)
y = np.linspace(255, 0, size[1], dtype=np.uint8)
XX, YY = np.meshgrid(x, y)
test_image[:, :, 1] = XX
test_image[:, :, 2] = YY
self._input_frame = cv2.cvtColor(test_image, cv2.COLOR_HSV2BGR)
'''
def __del__(self):
self._cap.release()
@jit
def update(self, bg_option, mirror_screen, mask_level, mask_width):
if self._cap.isOpened():
# update frame if webcam is active
ret, frame = self._cap.read()
if ret:
self._resize_frame(frame, self._input_frame, mirror_screen)
else:
# else use a random image
self._resize_frame(random, self._input_frame, mirror_screen)
self._last_grey[:] = self._current_grey
cv2.cvtColor(self._input_frame, cv2.COLOR_BGR2GRAY, dst=self._current_grey)
if bg_option == 3: # background subtraction
self._mask[:] = self._fgbg.apply(self._input_frame, learningRate=0.003)
else:
self._mask[:] = 0
cv2.cvtColor(self._input_frame, cv2.COLOR_BGR2HSV, dst=self._hsv_field)
if bg_option == 2: # hue
x = np.abs(np.array(self._hsv_field[:,:,0], np.float) / 180 - mask_level)
self._mask[x > mask_width] = 255
elif bg_option == 0: # white
x = np.array(self._hsv_field[:,:,1], np.float) / 255
x = 1 / mask_width * x * x + mask_level
y = np.array(self._hsv_field[:,:,2], np.float) / 255
self._mask[y <= x] = 255
else: # black
self._mask[self._hsv_field[:,:,2] > (255 * (1 - mask_level))] = 255
def reset(self):
if not self._cap.isOpened():
random[:] = np.array(np.power(np.random.rand(16, 8, 3), 3) * 255, dtype=np.uint8)
@property
def active(self):
return self._cap.isOpened() or self._no_cam_allowed
@property
def shape(self):
return self._size
@property
def input_frame(self):
return self._input_frame
@property
def mask(self):
return self._mask
@property
def current_grey(self):
return self._current_grey
@property
def last_grey(self):
return self._last_grey
@jit
def get_mask(self, size, transpose):
return cv2.resize(self._mask, size).T
|
1636966
|
from django.db import models
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
# Create your models here.
class Feature(models.Model):
object_content_type = models.ForeignKey(ContentType, related_name='features', on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
object = GenericForeignKey('object_content_type', 'object_id')
feature = models.CharField(max_length=1000)
date_observed = models.DateTimeField(auto_now=True)
|
1636974
|
import nbformat as nbf
from astropy.table import Table
#oof = nbf.read('magical_transofrms.ipynb', as_version=4)
# gool = '06.01-Initial-reduction.ipynb'
gool = 'magical_transofrms.ipynb'
def markdown_cells(nb):
"""
Iterator for markdown cells in notebook.
"""
for cell in nb['cells']:
if cell['cell_type'] == "markdown":
yield cell
def link_fix(text, name_dict):
"""
Replace old file names with new in markdown links.
"""
new_text = text
for old, new in name_dict.items():
new_text = new_text.replace(f']({old})', f']({new})')
return new_text
if __name__ == '__main__':
names = {k: v for k, v in Table.read('old-and-new-names.csv')}
for notebook_name in names.values():
try:
notebook = nbf.read(notebook_name, as_version=4)
except FileNotFoundError:
continue
for cell in markdown_cells(notebook):
new_source = link_fix(cell['source'], names)
if new_source != cell['source']:
print(f'fixed link in {notebook_name}')
cell['source'] = new_source
with open(notebook_name, 'w') as f:
nbf.write(notebook, f)
|
1636980
|
import os
import sys
STRICTDOC_ROOT_PATH = os.path.abspath(
os.path.join(__file__, "../../../../strictdoc")
)
assert os.path.exists(STRICTDOC_ROOT_PATH), "does not exist: {}".format(
STRICTDOC_ROOT_PATH
)
sys.path.append(STRICTDOC_ROOT_PATH)
|
1636993
|
import unittest
import unittest.mock
from programy.clients.render.html import HtmlRenderer
class MockHtmlBotClient(object):
def __init__(self):
self._response = None
self.configuration = unittest.mock.Mock()
self.configuration.host = "127.0.0.1"
self.configuration.port = "6666"
self.configuration.api = "/api/web/v1.0/ask"
def process_response(self, client_context, response):
self._response = response
class HtmlRendererTests(unittest.TestCase):
def test_create_postback_url(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
postback = renderer.create_postback_url()
self.assertIsNotNone(postback)
self.assertEqual(postback, "#")
def test_text_only(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "Hello world")
self.assertEqual(mock_console._response, "Hello world")
def test_url_button(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<button"><text>Hello</text><url>http://click.me</url></button>')
self.assertEqual(mock_console._response, '<a class="programy" href="http://click.me">Hello</a>')
def test_url_button_with_class_and_id(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<button class="class1" id="id1"><text>Hello</text><url>http://click.me</url></button>')
self.assertEqual(mock_console._response, '<a class="class1 programy" id="id1" href="http://click.me">Hello</a>')
def test_postback_button(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<button><text>Hello</text><postback>HELLO</postback></button>")
self.assertEqual(mock_console._response, '<a class="programy" postback="HELLO" href="#">Hello</a>')
def test_postback_button_with_class_and_id(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<button class="class1" id="id1"><text>Hello</text><postback>HELLO</postback></button>')
self.assertEqual(mock_console._response, '<a class="class1 programy" id="id1" postback="HELLO" href="#">Hello</a>')
def test_link(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<link><text>Hello</text><url>http://click.me</url></link>")
self.assertEqual(mock_console._response, '<a class="programy" href="http://click.me">Hello</a>')
def test_link_with_class_and_id(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<link class="class1" id="id1"><text>Hello</text><url>http://click.me</url></link>')
self.assertEqual(mock_console._response, '<a class="class1 programy" id="id1" href="http://click.me">Hello</a>')
def test_image(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '')
self.assertEqual(mock_console._response, '<img class="programy" src="http://servusai.com/aiml.png" />')
def test_image_with_class_and_id(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<image class="class1" id="id1">http://servusai.com/aiml.png</image>')
self.assertEqual(mock_console._response, '<img class="class1 programy" id="id1" src="http://servusai.com/aiml.png" />')
def test_video(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<video>http://servusai.com/aiml.mov</video>")
self.assertEqual(mock_console._response, """<video class="programy" src="http://servusai.com/aiml.mov">
Sorry, your browser doesn't support embedded videos,
but don't worry, you can <a href="http://servusai.com/aiml.mov">download it</a>
and watch it with your favorite video player!
</video>""")
def test_video_with_class_and_id(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<video class="class1" id="id1">http://servusai.com/aiml.mov</video>')
self.assertEqual(mock_console._response, """<video class="class1 programy" id="id1" src="http://servusai.com/aiml.mov">
Sorry, your browser doesn't support embedded videos,
but don't worry, you can <a href="http://servusai.com/aiml.mov">download it</a>
and watch it with your favorite video player!
</video>""")
def test_card(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card>')
self.assertEqual(mock_console._response, '<div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div>')
def test_card_with_class_and_id(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<card class="class1" id="id1"><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card>')
self.assertEqual(mock_console._response, '<div class="class1 programy" id="id1"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div>')
def test_carousel(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<carousel><card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card></carousel>")
self.assertEqual(mock_console._response, '<div class="programy"><div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div></div>')
def test_carousel_with_class_and_id(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<carousel class="class1" id="id1"><card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card></carousel>')
self.assertEqual(mock_console._response, '<div class="class1 programy" id="id1"><div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div></div>')
def test_reply_with_postback(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<reply><text>Hello</text><postback>HELLO</postback></reply>")
self.assertEqual(mock_console._response, '<a class="programy" postback="HELLO" href="#">Hello</a>')
def test_reply_with_postback_with_class_and_id(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<reply class="class1" id="id1"><text>Hello</text><postback>HELLO</postback></reply>')
self.assertEqual(mock_console._response, '<a class="class1 programy" id="id1" postback="HELLO" href="#">Hello</a>')
def test_reply_without_postback(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<reply><text>Hello</text></reply>")
self.assertEqual(mock_console._response, '<a class="programy" postback="Hello" href="#">Hello</a>')
def test_reply_without_postback_with_class_and_id(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<reply class="class1" id="id1"><text>Hello</text></reply>')
self.assertEqual(mock_console._response, '<a class="class1 programy" id="id1" postback="Hello" href="#">Hello</a>')
def test_delay(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<delay><seconds>0</seconds></delay>")
self.assertEqual(mock_console._response, '<div class="programy">...</div>')
def test_delay_with_class_and_id(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<delay class="class1" id="id1" ><seconds>0</seconds></delay>')
self.assertEqual(mock_console._response, '<div class="class1 programy" id="id1">...</div>')
def test_split(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<split />")
self.assertEqual(mock_console._response, '<br class="programy" />')
def test_split_with_class_and_id(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<split class="class1" id="id1"/>')
self.assertEqual(mock_console._response, '<br class="class1 programy" id="id1" />')
def test_list(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<list><item>Item1</item><item>Item2</item></list>')
self.assertEqual(mock_console._response, '<ul class="programy"><li>Item1</li><li>Item2</li></ul>')
def test_list_with_children(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<list><item>Hi</item><item><button"><text>Hello</text><url>http://click.me</url></button></item><item><button"><text>Goodbye</text><url>http://click.me</url></button></item></list>')
print(mock_console._response)
self.assertEqual(mock_console._response, '<ul class="programy"><li>Hi</li><li><a class="programy" postback="Hello" href="#">Hello</a></li><li><a class="programy" postback="Goodbye" href="#">Goodbye</a></li></ul>')
def test_list_with_class_and_id(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<list class="class1" id="id1"><item>Item1</item><item>Item2</item></list>')
self.assertEqual(mock_console._response, '<ul class="class1 programy" id="id1"><li>Item1</li><li>Item2</li></ul>')
def test_olist(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<olist><item>Item1</item><item>Item2</item></olist>")
self.assertEqual(mock_console._response, '<ol class="programy"><li>Item1</li><li>Item2</li></ol>')
def test_olist_with_children(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<olist><item>Hi</item><item><button"><text>Hello</text><url>http://click.me</url></button></item><item><button"><text>Goodbye</text><url>http://click.me</url></button></item></olist>')
print(mock_console._response)
self.assertEqual(mock_console._response, '<ol class="programy"><li>Hi</li><li><a class="programy" postback="Hello" href="#">Hello</a></li><li><a class="programy" postback="Goodbye" href="#">Goodbye</a></li></ol>')
def test_olist_with_class_and_id(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<olist class="class1" id="id1"><item>Item1</item><item>Item2</item></olist>')
self.assertEqual(mock_console._response, '<ol class="class1 programy" id="id1"><li>Item1</li><li>Item2</li></ol>')
def test_location(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", "<location />")
self.assertEqual(mock_console._response, "")
def test_location(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<location />')
self.assertEqual(mock_console._response, "")
def test_location_with_class_and_id(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
renderer.render("testuser", '<location class="class1 programy" id="id1"/>')
self.assertEqual(mock_console._response, "")
def test_tts(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
rendered = renderer.render("testuser", '<tts>Some speech</tts>')
self.assertEqual(rendered, '')
def test_tts_with_class_and_id(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
rendered = renderer.render("testuser", '<tts class="class1 programy" id="id1">Some speech</tts>')
self.assertEqual(rendered, '')
def test_card_with_xml_at_front(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
rendered = renderer.render("testuser", '<something>Some speech</something><card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card>')
self.assertEqual(rendered, '<something>Some speech</something><div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div>')
def test_card_with_xml_at_end(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
rendered = renderer.render("testuser", '<card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card><something>Some speech</something>')
self.assertEqual(rendered, '<div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div><something>Some speech</something>')
def test_card_with_xml_at_front_and_end(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
rendered = renderer.render("testuser", '<something>Some speech</something><card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card><something>Some speech</something>')
self.assertEqual(rendered, '<something>Some speech</something><div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div><something>Some speech</something>')
def test_card_with_text_at_front(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
rendered = renderer.render("testuser", 'Hello<card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card>')
self.assertEqual(rendered, 'Hello<div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div>')
def test_card_with_text_at_end(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
rendered = renderer.render("testuser", '<card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card>Hello')
self.assertEqual(rendered, '<div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div>Hello')
def test_card_with_text_at_front_and_end(self):
mock_console = MockHtmlBotClient()
renderer = HtmlRenderer(mock_console)
self.assertIsNotNone(renderer)
rendered = renderer.render("testuser", 'Hello<card><title>Servusai</title><subtitle>Home of ProgramY</subtitle><button><text>Hello</text><url>http://click.me</url></button></card>Hello')
self.assertEqual(rendered, 'Hello<div class="programy"><img src="http://servusai.com/aiml.png" /><h1>Servusai</h1><h2>Home of ProgramY</h2><a href="http://click.me">Hello</a></div>Hello')
|
1637002
|
import sys, cgi
import glob
import re
import keyword, token, tokenize
import string, cStringIO, StringIO
SIKULI_KEYWORDS = [
"find", "wait",
"click", "clickAll", "repeatClickAll", "doubleClick",
"doubleClickAll", "repeatDoubleClickAll", "rightClick",
"dragDrop", "type", "sleep", "popup", "capture", "input",
"assertExist", "assertNotExist" ]
HEADER = """
<pre class="sikuli-code">
"""
FOOTER = """
</pre>
"""
_KEYWORD = token.NT_OFFSET + 1
_SIKULI_KEYWORD = token.NT_OFFSET + 3
_colors = {
token.NUMBER: 'dig',
token.STRING: 'str',
tokenize.COMMENT: 'cmt',
_KEYWORD: 'kw',
_SIKULI_KEYWORD: 'skw',
}
if locals().has_key('local_convert'):
LOCAL_CONVERT = True
else:
LOCAL_CONVERT = False
class Parser:
def __init__(self, raw, out = sys.stdout):
self.raw = string.strip(raw.expandtabs(4))
self.out = out
def printLineNo(self, lineno):
self.out.write("<span class='lineno'>%d</span>" % lineno)
def format(self, srcdir, destdir):
global HEADER
self.srcdir = srcdir
self.destdir = destdir
# store line offsets in self.lines
self.lines = [0, 0]
pos = 0
while 1:
pos = string.find(self.raw, '\n', pos) + 1
if not pos: break
self.lines.append(pos)
self.lines.append(len(self.raw))
# parse the source and write it
self.pos = 0
text = StringIO.StringIO(self.raw)
#HEADER = HEADER.replace("$FILE", filename)
#if LOCAL_CONVERT:
# HEADER = HEADER.replace("$HIDE_INFO", "display: none;")
self.out.write(HEADER)
self.printLineNo(1)
try:
tokenize.tokenize(text.readline, self)
except tokenize.TokenError, ex:
msg = ex[0]
line = ex[1][0]
self.out.write("<h3>ERROR: %s</h3>%s\n" % (
msg, self.raw[self.lines[line]:]))
self.out.write('</font></pre>')
self.out.write(FOOTER)
def __call__(self, toktype, toktext, (srow,scol), (erow,ecol), line):
if 0:
print "type", toktype, token.tok_name[toktype], "text", toktext,
print "start", srow,scol, "end", erow,ecol, "<br>"
# calculate new positions
oldpos = self.pos
newpos = self.lines[srow] + scol
self.pos = newpos + len(toktext)
#print "%d-%d" % (oldpos, newpos)
# handle newlines
if toktype in [token.NEWLINE, tokenize.NL]:
self.out.write('\n')
lineno = srow + 1
self.printLineNo(lineno)
return
# send the original whitespace, if needed
if newpos > oldpos:
self.out.write(self.raw[oldpos:newpos])
# skip indenting tokens
# hack to force tabspace = 4
if toktype in [token.INDENT, token.DEDENT]:
#self.pos =
#newpos = newpos/2
#newpos = self.pos - len(toktext)/2
newpos = self.pos
self.out.write(self.raw[oldpos:newpos])
#print "[I]%d-%d" % (oldpos, newpos)
return
# map token type to a color group
if token.LPAR <= toktype and toktype <= token.OP:
toktype = token.OP
elif toktype == token.NAME and keyword.iskeyword(toktext):
toktype = _KEYWORD
elif toktype == token.NAME and toktext in SIKULI_KEYWORDS:
toktype = _SIKULI_KEYWORD
color = ''
if toktype in _colors:
color = _colors.get(toktype)
if toktype == token.STRING and toktext.endswith(".png\""):
m = re.search('[\'\"](.*)[\'\"]',toktext)
filename = m.group(1)
src = "%s/%s" % (self.srcdir, filename)
print "copy image %s to %s" % (src, self.destdir)
import shutil
shutil.copy(src,self.destdir)
self.out.write('<img src="' + filename + '"/>')
return
if color:
self.out.write('<span class="%s">' % (color))
self.out.write(cgi.escape(toktext))
self.out.write('</span>')
else:
self.out.write(cgi.escape(toktext))
from sphinx.util.compat import Directive
from docutils import nodes
from docutils.utils import relative_path
import os.path
import os
class SikuliCodeDirective(Directive):
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
#option_spec = dict([(key, directives.flag) for key in VARIANTS])
has_content = True
def run(self):
self.assert_has_content()
#txt = "<b>%s</b>" % self.content
#print ' '.join(self.content)
#x = self.content[0]
#x = x.encode('ascii'
#print self.state.document.__dict__
#print self.state.document.settings.env.__dict__
env = self.state.document.settings.env
root = env.srcdir
src = self.state.document.settings._source
srcdir = os.path.dirname(src)
#print env.__dict__
relpath = relative_path(root, src)
#print relpath
dest = os.path.join(root, "../build/html", relpath)
destdir = os.path.dirname(dest)
#print destdir
if not os.path.exists(destdir):
os.makedirs(destdir)
dir = os.path.dirname(src)
#print os.path.basename(src)
#print os.path.join(dir, "../build/html")
txtout = StringIO.StringIO()
txtin = '\n'.join(self.content)
p = Parser(txtin,txtout)
p.format(srcdir, destdir)
#print txti
#print txtout.getvalue()
txt = txtout.getvalue()
return [nodes.raw('', txt, format='html')]
def setup(app):
app.add_directive('sikulicode', SikuliCodeDirective)
|
1637011
|
from enum import Enum
from typing import Generator, NamedTuple
import pytest
from pms import SensorWarning
from pms.core import Sensor, Supported
@pytest.mark.parametrize("sensor", Supported)
@pytest.mark.parametrize("attr", ["Message", "Data", "Commands"])
def test_sensor_attrs(sensor, attr):
assert getattr(Sensor[sensor], attr)
@pytest.mark.parametrize("sensor", Supported)
@pytest.mark.parametrize("command", "passive_mode passive_read active_mode sleep wake".split())
def test_commands(sensor, command):
assert Sensor[sensor].command(command)
@pytest.mark.parametrize("sensor", Supported)
def test_baud(sensor):
baud = 9600 if sensor != "SPS30" else 115200
assert Sensor[sensor].baud == baud
@pytest.mark.parametrize("sensor", Supported)
def test_pre_heat(sensor):
pre_heat = 0 if sensor != "MHZ19B" else 180
assert Sensor[sensor].pre_heat == pre_heat
class RawData(NamedTuple):
hex: str
raw: tuple
id: str = "good data"
@property
def msg(self) -> bytes:
return bytes.fromhex(self.hex)
@property
def long_buffer(self) -> "RawData":
buffer = self.hex * 2
return self._replace(hex=buffer[6:], id="data at the end of the buffer")
class GoodData(Enum):
PMSx003 = RawData(
"424d001c0005000d00160005000d001602fd00fc001d000f00060006970003c5",
(5, 13, 22, 5, 13, 22, 765, 252, 29, 15, 6, 6),
)
PMS3003 = RawData(
"424d00140051006A007700350046004F33D20F28003F041A",
(81, 106, 119, 53, 70, 79),
)
SDS01x = RawData(
"AAC0D4043A0AA1601DAB",
(1236, 2618),
)
SDS198 = RawData(
"AACF0C001600E90510AB",
(22,),
)
HPMA115S0 = RawData(
"4005040030003156",
(48, 49),
)
HPMA115C0 = RawData(
"400D04003000310032003300000000E9",
(48, 49, 50, 51),
)
SPS30 = RawData(
"7E0003002842280000422800004228000042280000422800004228000042280000422800004228000042280000B07E",
(42.0, 42.0, 42.0, 42.0, 42.0, 42.0, 42.0, 42.0, 42.0, 42.0),
"fake data",
)
MCU680 = RawData(
"5A5A3F0F0835198A01885430D200032BE1004A1A",
(2101, 6538, 392, 84, 12498, 207841, 74),
)
MHZ19B = RawData(
"FF8601F40000000085",
(500,),
)
ZH0xx = RawData(
"FF86008500960065FA",
(133, 150, 101),
)
@classmethod
def test_param(cls) -> Generator[pytest.param, None, None]: # type: ignore[valid-type]
for sensor in cls:
data = sensor.value
yield pytest.param(sensor.name, data.msg, data.raw, id=f"{sensor.name} {data.id}")
data = data.long_buffer
yield pytest.param(sensor.name, data.msg, data.raw, id=f"{sensor.name} {data.id}")
@classmethod
def test_obs(cls, secs: int = 1567201793) -> Generator[pytest.param, None, None]: # type: ignore[valid-type]
for sensor in cls:
obs = Sensor[sensor.name].decode(sensor.value.msg, time=secs)
yield pytest.param(obs, id=sensor.name)
@pytest.mark.parametrize("sensor,msg,raw", GoodData.test_param())
def test_check(sensor, msg, raw, secs=1567201793):
assert Sensor[sensor].check(msg, "passive_read")
for other in Sensor:
if other.name == sensor:
continue
if sensor == "PMSx003" and other.name in ["PMS5003", "PMS5003S", "PMS5003T"]:
continue
if sensor in ["MHZ19B", "ZH0xx"] and other.name in ["MHZ19B", "ZH0xx"]:
continue
assert not other.check(msg, "passive_read")
@pytest.mark.parametrize(
"sensor,hex",
[
pytest.param(
"PMSx003",
"424d001c000000000000000000000000000000000000000000000000000000ab",
id="PMSx003 empty message",
),
pytest.param(
"PMS3003",
"424d001400000000000000000000000000000000000000a3",
id="PMS3003 empty message",
),
pytest.param(
"SDS01x",
"AAC000000000000000AB",
id="SDS01x empty message",
),
pytest.param(
"HPMA115S0",
"40050400000000B7",
id="HPMA115S0 empty message",
),
pytest.param(
"SPS30",
"7E0003002800000000000000000000000000000000000000000000000000000000000000000000000000000000D47E",
id="SPS30 empty message",
),
],
)
def test_check_warming_up(sensor: str, hex: str):
assert Sensor[sensor].check(bytes.fromhex(hex), "passive_read")
@pytest.mark.parametrize("sensor,msg,raw", GoodData.test_param())
def test_decode(sensor, msg, raw, secs=1567201793):
assert Sensor[sensor].decode(msg, time=secs) == Sensor[sensor].Data(secs, *raw)
@pytest.mark.parametrize("obs", GoodData.test_obs())
def test_obs_prop(obs):
prop = dict(
pm01=lambda x: x.pm01 == x.pm1,
pm25=lambda x: x.pm25 == x.pm2_5,
pm04=lambda x: x.pm04 == x.pm4,
raw01=lambda x: x.raw01 == x.raw1,
raw25=lambda x: x.raw25 == x.raw2_5,
cf01=lambda x: x.cf01 == x.cf1,
cf25=lambda x: x.cf25 == x.cf2_5,
)
for field, check in prop.items():
if getattr(obs, field, None) is None:
continue
assert check(obs)
@pytest.mark.parametrize(
"sensor,hex,error",
[
pytest.param(
"PMSx003",
"424d001c0000000a00200000000a002000000000000000000000000097000196",
"inconsistent obs: PM10=32 and N0.3=0.0",
id="PMSx003",
),
pytest.param(
"PMS5003T",
"424d001c0000000a00200000000a002000000000000000000000000097000196",
"inconsistent obs: PM10=32 and N0.3=0.0",
id="PMS5003T",
),
pytest.param(
"PMS5003S",
"424d001c0000000a00200000000a002000000000000000000000000097000196",
"inconsistent obs: PM10=32 and N0.3=0.0",
id="PMS5003S",
),
pytest.param(
"PMS5003ST",
"424d00240000000a00200000000a002000000000000000000000000000000000000000009700019E",
"inconsistent obs: PM10=32 and N0.3=0.0",
id="PMS5003ST",
),
],
)
def test_decode_error(sensor, hex, error, secs=1567201793):
with pytest.raises(SensorWarning) as e:
Sensor[sensor].decode(bytes.fromhex(hex), time=secs)
assert str(e.value) == error
@pytest.mark.parametrize(
"sensor,command,hex,length",
[
pytest.param("PMSx003", "passive_mode", "424DE100000170", 8, id="PMSx003 passive"),
pytest.param("PMSx003", "passive_read", "424DE200000171", 32, id="PMSx003 read"),
pytest.param("PMSx003", "sleep", "424DE400000173", 8, id="PMSx003 sleep"),
pytest.param("PMSx003", "wake", "424DE400010174", 32, id="PMSx003 wake"),
pytest.param("PMS3003", "passive_mode", "", 24, id="PMS3003 passive"),
pytest.param(
"SDS01x",
"passive_mode",
"AAB402010100000000000000000000FFFF02AB",
10,
id="SDS01x passive",
),
pytest.param(
"SDS01x", "passive_read", "AAB404000000000000000000000000FFFF02AB", 10, id="SDS01x read"
),
pytest.param(
"SDS01x", "sleep", "AAB406010000000000000000000000FFFF05AB", 10, id="SDS01x sleep"
),
pytest.param(
"SDS01x", "wake", "AAB406010100000000000000000000FFFF06AB", 10, id="SDS01x wake"
),
pytest.param("HPMA115S0", "passive_read", "68010493", 8, id="HPMA115S0 read"),
pytest.param("HPMA115S0", "sleep", "68010295", 2, id="HPMA115S0 sleep"),
pytest.param("HPMA115S0", "wake", "68010196", 2, id="HPMA115S0 wake"),
pytest.param("HPMA115C0", "passive_read", "68010493", 16, id="HPMA115C0 read"),
],
)
def test_command(sensor, command, hex, length):
cmd = Sensor[sensor].command(command)
assert cmd.command == bytes.fromhex(hex)
assert cmd.answer_length == length
|
1637026
|
import logging
from celery import current_app
from django_celery_beat.schedulers import ModelEntry, DatabaseScheduler
from nautobot.extras.models import ScheduledJob, ScheduledJobs
logger = logging.getLogger(__name__)
class NautobotScheduleEntry(ModelEntry):
"""
Nautobot variant of the django-celery-beat ModelEntry which uses the
nautobot.extras.models.ScheduledJob model
"""
def __init__(self, model, app=None):
"""Initialize the model entry."""
self.app = app or current_app._get_current_object()
self.name = "{}_{}".format(model.name, model.pk)
self.task = model.task
self.args = model.args
self.kwargs = model.kwargs
try:
self.schedule = model.schedule
except model.DoesNotExist:
logger.error(
"Disabling schedule %s that was removed from database",
self.name,
)
self._disable(model)
self.options = {}
if model.queue:
self.options["queue"] = model.queue
self.options["headers"] = {}
self.total_run_count = model.total_run_count
self.model = model
if not model.last_run_at:
model.last_run_at = self._default_now()
self.last_run_at = model.last_run_at
class NautobotDatabaseScheduler(DatabaseScheduler):
"""
Nautobot variant of the django-celery-beat DatabaseScheduler which uses the
nautobot.extras.models.ScheduledJob model
"""
Entry = NautobotScheduleEntry
Model = ScheduledJob
Changes = ScheduledJobs
|
1637035
|
from setuptools import setup, Extension, find_packages
setup(name='gitgud',
version='1.1',
author='<NAME>',
author_email="<EMAIL>",
description="Git Gud - a utility for when you are told to 'get good'",
url="https://github.com/fsufitch/git-gud",
package_dir={'':'src'},
packages=['gitgud'],
entry_points = {
"console_scripts": [
"git-gud=gitgud.gitgud:git.gud",
"git-job=gitgud.gitgud:git.job",
"git-rekt=gitgud.gitgud:git.rekt",
"git-spooked=gitgud.gitgud:git.spooked",
"git-money=gitgud.gitgud:git.money",
],
},
install_requires=['argparse', 'pyfiglet'],
)
|
1637048
|
import os
import unittest
import yaml
from jsonasobj import as_json
from biolinkml.meta import SchemaDefinition
from biolinkml.utils.rawloader import load_raw_schema
from biolinkml.utils.yamlutils import DupCheckYamlLoader, as_yaml
from tests.test_utils.environment import env
from tests.utils.test_environment import TestEnvironmentTestCase
class YamlUtilTestCase(TestEnvironmentTestCase):
env = env
def fix_schema_metadata(self, schema: SchemaDefinition) -> SchemaDefinition:
self.assertIsNotNone(schema.generation_date)
schema.source_file = os.path.basename(schema.source_file)
schema.generation_date = "2018-12-31 17:23"
self.assertIsNotNone(schema.metamodel_version)
schema.metamodel_version = "0.5.0"
self.assertIsNotNone(schema.source_file_size)
schema.source_file_size = 259
self.assertIsNotNone(schema.source_file_date)
schema.source_file_date = "2018-12-31 17:23"
return schema
def test_dupcheck_loader(self):
""" Make sure the duplicate checker finds duplicates """
with open(env.input_path('yaml1.yaml')) as f:
y1 = yaml.safe_load(f)
self.assertEqual(17, y1['f1'])
with open(env.input_path('yaml1.yaml')) as f:
with self.assertRaises(ValueError):
yaml.load(f, DupCheckYamlLoader)
with open(env.input_path('yaml2.yaml')) as f:
with self.assertRaises(ValueError):
yaml.load(f, DupCheckYamlLoader)
with open(env.input_path('schema1.yaml')) as f:
s1 = yaml.load(f, DupCheckYamlLoader)
self.assertEqual('schema1', s1['name'])
def test_as_json(self):
schema = self.fix_schema_metadata(load_raw_schema(env.input_path('schema6.yaml')))
env.eval_single_file(env.expected_path('schema6.json'), as_json(schema), filtr=lambda s: s)
def test_as_yaml(self):
""" Test the YAML output representation """
schema = self.fix_schema_metadata(load_raw_schema(env.input_path('schema4.yaml')))
env.eval_single_file(env.expected_path('schema4.yaml'), as_yaml(schema), filtr=lambda s: s)
if __name__ == '__main__':
unittest.main()
|
1637080
|
from pytg import sender
from pytg.exceptions import IllegalResponseException
import os
import logging
import yaml
import datetime
import time
logging.basicConfig(level=logging.INFO)
# Ugly hack: increate timeout for document reception
# Sub hack: use a list to assign a new value
tmp_f = list(sender.functions["load_document"])
tmp_f[sender.FUNC_TIME] = 3600.0
sender.functions["load_document"] = tuple(tmp_f)
x = sender.Sender("127.0.0.1", 4458)
def build_dialogs_list():
"""Return the list of all dialogs"""
base_list = []
res = True
while res:
res = x.dialog_list(100, len(base_list))
base_list += res
return base_list
def work_on_dialog(d):
"""Backup a particular dialog"""
logging.info("Working on %s %s %s", d['type'], d['print_name'], d['id'])
if not d['print_name']:
logging.error("%s has no print_name, cannot continue.", d['id'])
return
working_dir = "logs/by_ids/{}/".format(d['id'])
if not os.path.isdir(working_dir):
logging.debug("Creating working_dir %s", working_dir)
os.mkdir(working_dir)
symlink = "logs/{},{}".format(d['type'], d['print_name'].replace('/', ''))
if not os.path.exists(symlink):
logging.debug("Creating symlink %s", symlink)
os.symlink(working_dir[5:], symlink)
# "Eat" history until the last message, but stop at the last checkpoint
checkpoint_file = "{}/_checkpoint.yaml".format(working_dir)
last_checkpoint = None
if os.path.exists(checkpoint_file):
logging.debug("Loading checkpoing")
with open(checkpoint_file, 'r') as checkpoint_f:
data = yaml.load(checkpoint_f)
last_checkpoint = data.get('checkpoint', None)
logging.info("Last checkpoint is %s", last_checkpoint)
messages = {}
last_messages = True
while last_messages and last_checkpoint not in messages:
try:
last_messages = x.history(d['print_name'], 250, len(messages), retry_connect=-1)
except IllegalResponseException as e:
last_messages = []
if str(e) == "Result parser does not allow exceptions.":
logging.warning("Slowing down...")
time.sleep(5)
last_messages = True
if last_messages and last_messages != True:
for message in last_messages:
messages[message['id']] = message
logging.info("Loading, offset %s", len(messages))
logging.info("Found %s messages to process", len(messages))
# Save messages by date
loaded_data = {}
for id, message in messages.items():
if 'date' not in message:
logging.error("Not date in message %s", message['id'])
continue
date = datetime.datetime.fromtimestamp(message['date'])
file_key = '{}.{}.yaml'.format(date.year, date.month)
if file_key not in loaded_data:
file_key_name = '{}{}'.format(working_dir, file_key)
if os.path.isfile(file_key_name):
with open(file_key_name, 'r') as file_key_f:
loaded_data[file_key] = yaml.load(file_key_f)
logging.info("Loaded datafile %s", file_key)
else:
loaded_data[file_key] = {}
logging.info("Created datafile %s", file_key)
if message['id'] not in loaded_data[file_key]:
if message['event'] == 'message':
loaded_data[file_key][message['id']] = {'from': message['from']['print_name'], 'text': message.get('text', ''), 'date': message['date']}
if 'media' in message:
if message['media']['type'] not in ['webpage', 'contact']:
result = x.load_document(message['id'])
if os.path.exists(result['result']):
file_dir = "files_{}_{}/".format(date.year, date.month)
file_dir_full = "{}/{}/".format(working_dir, file_dir)
if not os.path.isdir(file_dir_full):
os.mkdir(file_dir_full)
media_file = "{}/{}.{}".format(file_dir_full, message['id'], result['result'].split('.')[-1].replace('/', ''))
os.rename(result['result'], media_file)
loaded_data[file_key][message['id']]['media'] = '{}{}.{}'.format(file_dir, message['id'], result['result'].split('.')[-1].replace('/', ''))
else:
loaded_data[file_key][message['id']]['media'] = result['result']
elif message['event'] == 'service':
pass
else:
logging.error("Unknow type %s", message['event'])
if not last_checkpoint or last_checkpoint < message['id']:
last_checkpoint = message['id']
# Save messages
for file_key, data in loaded_data.items():
with open('{}/{}'.format(working_dir, file_key), 'w') as file_key_f:
yaml.dump(data, file_key_f, default_flow_style=False)
logging.info("Saved datafile %s", file_key)
# Save checkpoint
with open(checkpoint_file, 'w') as checkpoint_f:
yaml.dump({'checkpoint': last_checkpoint}, checkpoint_f)
logging.info("Saved checkpoint")
return True
for d in build_dialogs_list():
work_on_dialog(d)
|
1637084
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from IPython import embed
class CrossE_Loss(nn.Module):
def __init__(self, args, model):
super(CrossE_Loss, self).__init__()
self.args = args
self.model = model
def forward(self, score, label):
pos = torch.log(torch.clamp(score, 1e-10, 1.0)) * torch.clamp(label, 0.0, 1.0)
neg = torch.log(torch.clamp(1-score, 1e-10, 1.0)) * torch.clamp(-label, 0.0, 1.0)
num_pos = torch.sum(torch.clamp(label, 0.0, 1.0), -1)
num_neg = torch.sum(torch.clamp(-label, 0.0, 1.0), -1)
loss = - torch.sum(torch.sum(pos, -1)/num_pos) - torch.sum(torch.sum(neg, -1)/num_neg)
return loss
|
1637106
|
from product_details import product_details
def get_product_details_history():
sections = [
(u'Firefox', 'firefox_history_development_releases'),
(u'Firefox', 'firefox_history_major_releases'),
(u'Firefox', 'firefox_history_stability_releases'),
(u'Firefox for Android', 'mobile_history_development_releases'),
(u'Firefox for Android', 'mobile_history_major_releases'),
(u'Firefox for Android', 'mobile_history_stability_releases')
]
events = []
for product, section in sections:
data = getattr(product_details, section)
events.extend([
{
'product': product,
'date': date,
'version': version
} for version, date in data.items()
])
# Sort the events list by (date, product)
events.sort(key=lambda item: (item['date'], item['product']))
return events
|
1637107
|
from dataclasses import dataclass
from typing import Tuple, List, Optional, Any
import numpy as np
import torch
from src.huggingmolecules.configuration.configuration_api import PretrainedConfigMixin
from src.huggingmolecules.featurization.featurization_api import PretrainedFeaturizerMixin, RecursiveToDeviceMixin
from src.huggingmolecules.featurization.featurization_common_utils import stack_y_list
from src.huggingmolecules.models.models_api import PretrainedModelBase
try:
import chemprop
except ImportError:
raise ImportError('Please install chemprop v.1.1.0 (pip install chemprop==1.1.0) '
'from https://github.com/chemprop/chemprop to use ChempropModelWrapper.')
@dataclass
class ChempropConfig(PretrainedConfigMixin):
d_model: int = 300
features_generators: List[str] = None
depth: int = 3
@dataclass
class ChempropBatchEncoding(RecursiveToDeviceMixin):
batch_mol_graph: Any
batch_features: Optional[List[torch.Tensor]]
y: torch.FloatTensor
batch_size: int
def __len__(self):
return self.batch_size
class ChempropFeaturizer(PretrainedFeaturizerMixin[Tuple[dict, float], ChempropBatchEncoding, ChempropConfig]):
def __init__(self, config: ChempropConfig):
super().__init__(config)
self.features_generators = config.features_generators
def _collate_encodings(self, encodings: List[Tuple[Any, Optional[np.array], float]]) -> ChempropBatchEncoding:
mol_graph_list, features_list, y_list = zip(*encodings)
batch_mol_graph = chemprop.features.BatchMolGraph(mol_graph_list)
if features_list is not None and all(f is not None for f in features_list):
batch_features = [torch.tensor(f).float() for f in features_list]
else:
batch_features = None
return ChempropBatchEncoding(batch_mol_graph=batch_mol_graph,
batch_features=batch_features,
y=stack_y_list(y_list),
batch_size=len(y_list))
def _encode_smiles(self, smiles: str, y: Optional[float]) -> Tuple[Any, np.array, float]:
datapoint = chemprop.data.MoleculeDatapoint([smiles], features_generator=self.features_generators)
mol_graph = chemprop.features.MolGraph(datapoint.mol[0])
features = datapoint.features
return mol_graph, features, y
class ChempropModelWrapper(PretrainedModelBase):
def __init__(self, config: ChempropConfig):
super().__init__(config)
args = chemprop.args.TrainArgs()
args.parse_args(args=["--data_path", "non_existent", "--dataset_type", 'regression'])
args.task_names = ["whatever"]
args.depth = config.depth
args.hidden_size = config.d_model
self.model = chemprop.models.MoleculeModel(args)
@classmethod
def get_featurizer_cls(cls):
return ChempropFeaturizer
@classmethod
def get_config_cls(cls):
return ChempropConfig
def forward(self, batch: ChempropBatchEncoding):
return self.model([batch.batch_mol_graph], batch.batch_features)
def parameters(self, **kwargs):
return self.model.parameters(**kwargs)
|
1637137
|
text = """
//------------------------------------------------------------------------------
// Explicit instantiation.
//------------------------------------------------------------------------------
#include "Geometry/Dimension.hh"
#include "RK/ReproducingKernelMethods.cc"
namespace Spheral {
template class ReproducingKernelMethods<Dim<%(ndim)s>>;
}
"""
|
1637187
|
input = """
1 2 0 0
1 3 0 0
1 4 0 0
1 5 0 0
1 6 0 0
1 7 0 0
1 8 0 0
1 9 0 0
1 10 0 0
1 11 0 0
1 12 0 0
1 13 0 0
1 14 0 0
1 15 0 0
1 16 0 0
1 17 0 0
1 18 0 0
1 19 0 0
1 20 2 1 21 22
1 21 2 1 20 22
1 22 0 0
1 23 2 1 24 25
1 24 2 1 23 25
1 25 0 0
1 26 2 1 27 28
1 27 2 1 26 28
1 28 0 0
1 29 2 1 30 31
1 30 2 1 29 31
1 31 0 0
1 20 2 1 21 32
1 21 2 1 20 32
1 32 0 0
1 23 2 1 24 33
1 24 2 1 23 33
1 33 0 0
1 26 2 1 27 34
1 27 2 1 26 34
1 34 0 0
1 29 2 1 30 35
1 30 2 1 29 35
1 35 0 0
1 36 2 1 37 38
1 37 2 1 36 38
1 38 0 0
1 39 2 1 40 41
1 40 2 1 39 41
1 41 0 0
1 42 2 1 43 44
1 43 2 1 42 44
1 44 0 0
1 45 2 1 46 47
1 46 2 1 45 47
1 47 0 0
1 36 2 1 37 48
1 37 2 1 36 48
1 48 0 0
1 39 2 1 40 49
1 40 2 1 39 49
1 49 0 0
1 42 2 1 43 50
1 43 2 1 42 50
1 50 0 0
1 45 2 1 46 51
1 46 2 1 45 51
1 51 0 0
1 52 1 0 20
1 53 1 0 23
1 54 1 0 42
1 55 1 0 39
1 56 2 0 55 36
1 56 2 0 54 45
1 57 2 0 53 26
1 57 2 0 52 29
1 53 2 0 57 26
1 52 2 0 57 29
1 54 2 0 56 45
1 55 2 0 56 36
1 1 1 1 57
1 1 1 1 56
1 1 2 0 45 29
1 1 2 0 42 26
1 1 2 0 39 23
1 1 2 0 36 20
1 1 2 0 29 45
1 1 2 0 26 42
1 1 2 0 23 39
1 1 2 0 20 36
1 1 2 0 36 23
1 1 2 0 20 39
1 1 2 0 39 20
1 1 2 0 23 36
1 1 2 0 42 23
1 1 2 0 26 39
1 1 2 0 45 20
1 1 2 0 29 36
1 1 2 0 36 29
1 1 2 0 20 45
1 1 2 0 39 26
1 1 2 0 23 42
1 1 2 0 42 29
1 1 2 0 26 45
1 1 2 0 45 26
1 1 2 0 29 42
1 1 2 0 57 54
1 1 2 0 56 52
1 1 2 0 54 57
1 1 2 0 52 56
1 58 2 1 53 26
1 58 2 1 52 29
1 58 2 1 57 26
1 58 2 1 57 29
1 58 2 1 55 36
1 58 2 1 56 45
1 58 2 1 56 36
1 58 2 1 54 45
1 59 1 0 45
1 60 1 0 42
1 61 1 0 39
1 62 1 0 36
1 59 1 0 29
1 60 1 0 26
1 61 1 0 23
1 62 1 0 20
0
12 uedge(e_1_0_1_1,v_1_1,v_1_0)
13 uedge(e_0_1_1_1,v_1_1,v_0_1)
14 uedge(e_0_0_0_1,v_0_1,v_0_0)
15 uedge(e_0_0_1_0,v_1_0,v_0_0)
16 uedge(e_1_0_1_1,v_1_0,v_1_1)
17 uedge(e_0_1_1_1,v_0_1,v_1_1)
18 uedge(e_0_0_0_1,v_0_0,v_0_1)
19 uedge(e_0_0_1_0,v_0_0,v_1_0)
6 edge(e_0_0_1_0,v_0_0,v_1_0)
7 edge(e_0_0_0_1,v_0_0,v_0_1)
8 edge(e_0_1_1_1,v_0_1,v_1_1)
9 edge(e_1_0_1_1,v_1_0,v_1_1)
20 linked(e_0_0_1_0,3)
23 linked(e_0_0_0_1,3)
26 linked(e_0_1_1_1,3)
29 linked(e_1_0_1_1,3)
36 linked(e_0_0_1_0,1)
39 linked(e_0_0_0_1,1)
42 linked(e_0_1_1_1,1)
45 linked(e_1_0_1_1,1)
52 path(3,v_0_0,v_1_0)
53 path(3,v_0_0,v_0_1)
54 path(1,v_0_1,v_1_1)
55 path(1,v_0_1,v_0_0)
56 path(1,v_0_1,v_1_0)
57 path(3,v_0_0,v_1_1)
10 connection(1,v_0_1,v_1_0)
11 connection(3,v_0_0,v_1_1)
59 link(e_1_0_1_1)
60 link(e_0_1_1_1)
61 link(e_0_0_0_1)
62 link(e_0_0_1_0)
58 pippo
21 noLinked(e_0_0_1_0,3)
24 noLinked(e_0_0_0_1,3)
27 noLinked(e_0_1_1_1,3)
30 noLinked(e_1_0_1_1,3)
37 noLinked(e_0_0_1_0,1)
40 noLinked(e_0_0_0_1,1)
43 noLinked(e_0_1_1_1,1)
46 noLinked(e_1_0_1_1,1)
2 node(v_0_0)
3 node(v_0_1)
4 node(v_1_0)
5 node(v_1_1)
0
B+
0
B-
1
0
1
"""
output = """
"""
|
1637213
|
import ast
import sys
import dace
from dace.transformation.transformation import Transformation
from dace.transformation.dataflow import MapFission
from typing import Any, Dict, Set
import warnings
from dace import registry, sdfg as sd, symbolic
from dace.properties import make_properties
from dace.sdfg import nodes, utils, propagation
from stencilflow.stencil.stencil import Stencil
class DimensionAdder(ast.NodeTransformer):
""" Adds a dimension in a Python AST to all subscripts of the specified
arrays. """
def __init__(self, names: Set[str], dim_index: int, value: int = 0):
self.names = names
self.dim = dim_index
self.value = value
def visit_Subscript(self, node: ast.Subscript):
if not isinstance(node.value, ast.Name):
raise TypeError('Only subscripts of variables are supported')
varname = node.value.id
# Add dimension to correct location
if varname in self.names:
node.slice.value.elts.insert(
self.dim,
ast.copy_location(
ast.parse(str(self.value)).body[0].value,
node.slice.value.elts[0]))
return node
return self.generic_visit(node)
@registry.autoregister_params(singlestate=True)
@make_properties
class NestK(Transformation):
""" Transformation that nests a one-dimensional map into a stencil,
including it in the computational domain. """
_map_entry = nodes.MapEntry(nodes.Map('', [], []))
_stencil = Stencil('')
@staticmethod
def expressions():
return [utils.node_path_graph(NestK._map_entry, NestK._stencil)]
@staticmethod
def match_to_str(graph, candidate):
map_entry: nodes.MapEntry = graph.node(candidate[NestK._map_entry])
stencil: Stencil = graph.node(candidate[NestK._stencil])
return '%s into %s' % (map_entry.map.label, stencil.label)
@staticmethod
def can_be_applied(graph: dace.SDFGState,
candidate: Dict[Any, int],
expr_index: int,
sdfg: dace.SDFG,
strict=False):
map_entry: nodes.MapEntry = graph.node(candidate[NestK._map_entry])
stencil: Stencil = graph.node(candidate[NestK._stencil])
if len(map_entry.map.params) != 1:
return False
if sd.has_dynamic_map_inputs(graph, map_entry):
return False
pname = map_entry.map.params[0] # Usually "k"
dim_index = None
for edge in graph.out_edges(map_entry):
if edge.dst != stencil:
return False
for edge in graph.all_edges(stencil):
if edge.data.data is None: # Empty memlet
continue
# TODO: Use bitmap to verify lower-dimensional arrays
if len(edge.data.subset) == 3:
for i, rng in enumerate(edge.data.subset.ndrange()):
for r in rng:
if pname in map(str, r.free_symbols):
if dim_index is not None and dim_index != i:
# k dimension must match in all memlets
return False
if str(r) != pname:
if symbolic.issymbolic(
r - symbolic.symbol(pname),
sdfg.constants):
warnings.warn('k expression is nontrivial')
dim_index = i
# No nesting dimension found
if dim_index is None:
return False
# Ensure the stencil shape is 1 for the found dimension
if stencil.shape[dim_index] != 1:
return False
return True
def apply(self, sdfg: dace.SDFG):
graph: dace.SDFGState = sdfg.node(self.state_id)
map_entry: nodes.MapEntry = graph.node(self.subgraph[NestK._map_entry])
stencil: Stencil = graph.node(self.subgraph[NestK._stencil])
# Find dimension index and name
pname = map_entry.map.params[0]
dim_index = None
for edge in graph.all_edges(stencil):
if edge.data.data is None: # Empty memlet
continue
if len(edge.data.subset) == 3:
for i, rng in enumerate(edge.data.subset.ndrange()):
for r in rng:
if (pname in map(str, r.free_symbols)):
dim_index = i
break
if dim_index is not None:
break
if dim_index is not None:
break
###
map_exit = graph.exit_node(map_entry)
# Reconnect external edges directly to stencil node
for edge in graph.in_edges(map_entry):
# Find matching internal edges
tree = graph.memlet_tree(edge)
for child in tree.children:
memlet = propagation.propagate_memlet(graph, child.edge.data,
map_entry, False)
graph.add_edge(edge.src, edge.src_conn, stencil,
child.edge.dst_conn, memlet)
for edge in graph.out_edges(map_exit):
# Find matching internal edges
tree = graph.memlet_tree(edge)
for child in tree.children:
memlet = propagation.propagate_memlet(graph, child.edge.data,
map_entry, False)
graph.add_edge(stencil, child.edge.src_conn, edge.dst,
edge.dst_conn, memlet)
# Remove map
graph.remove_nodes_from([map_entry, map_exit])
# Reshape stencil node computation based on nested map range
stencil.shape[dim_index] = map_entry.map.range.num_elements()
# Add dimensions to access and output fields
add_dims = set()
for edge in graph.in_edges(stencil):
if edge.data.data and len(edge.data.subset) == 3:
if stencil.accesses[edge.dst_conn][0][dim_index] is False:
add_dims.add(edge.dst_conn)
stencil.accesses[edge.dst_conn][0][dim_index] = True
for edge in graph.out_edges(stencil):
if edge.data.data and len(edge.data.subset) == 3:
if stencil.output_fields[edge.src_conn][0][dim_index] is False:
add_dims.add(edge.src_conn)
stencil.output_fields[edge.src_conn][0][dim_index] = True
# Change all instances in the code as well
if stencil.code.language != dace.Language.Python:
raise ValueError(
'For NestK to work, Stencil code language must be Python')
for i, stmt in enumerate(stencil.code.code):
stencil.code.code[i] = DimensionAdder(
add_dims, dim_index).visit(stmt)
if __name__ == '__main__':
from stencilflow.sdfg_to_stencilflow import standardize_data_layout
sdfg: dace.SDFG = dace.SDFG.from_file(sys.argv[1])
sdfg.apply_transformations_repeated([MapFission])
standardize_data_layout(sdfg)
sdfg.apply_transformations_repeated([NestK])
dace.propagate_labels_sdfg(sdfg)
sdfg.apply_strict_transformations()
sdfg.save('nested.sdfg')
# Stencil.default_implementation = 'CPU'
# sdfg.expand_library_nodes()
|
1637214
|
run(args, *, stdin=None, input=None,
stdout=None, stderr=None, shell=False, timeout=None, check=False)
call(args, *, stdin=None, stdout=None,
stderr=None, shell=False, timeout=None)
check_output(args, *, stdin=None, stdout=None,
stderr=None, shell=False, timeout=None)
|
1637247
|
import copy
import json
import multiprocessing
import os
import random
import shutil
import string
import tempfile
from contextlib import contextmanager
from os import chdir, getcwd, mkdir
from os.path import exists
import pkgpanda.build.constants
import pkgpanda.build.src_fetchers
from pkgpanda import expand_require as expand_require_exceptions
from pkgpanda import Install, PackageId, Repository
from pkgpanda.actions import add_package_file
from pkgpanda.constants import install_root, PKG_DIR, RESERVED_UNIT_NAMES
from pkgpanda.exceptions import FetchError, PackageError, ValidationError
from pkgpanda.subprocess import CalledProcessError, check_call, check_output
from pkgpanda.util import (check_forbidden_services, download_atomic,
hash_checkout, is_windows, load_json, load_string, logger,
make_directory, make_file, make_tar, remove_directory, rewrite_symlinks, write_json,
write_string)
class BuildError(Exception):
"""An error while building something."""
def __init__(self, msg: str):
self.msg = msg
def __str__(self):
return self.msg
class DockerCmd:
def __init__(self):
self.volumes = dict()
self.environment = dict()
self.container = str()
def run(self, name, cmd):
container_name = "{}-{}".format(
name, ''.join(
random.choice(string.ascii_lowercase) for _ in range(10)
)
)
docker = ["docker", "run", "--name={}".format(container_name)]
if is_windows:
# Default number of processes on Windows is 1, so bumping up to use all of them.
# The default memory allowed on Windows is 1GB. Some packages (mesos is an example)
# needs about 3.5gb to compile a single file. Therefore we need about 4gb per CPU.
numprocs = os.environ.get('NUMBER_OF_PROCESSORS')
docker += ["-m", "{0}gb".format(int(numprocs) * 4), "--cpu-count", numprocs]
for host_path, container_path in self.volumes.items():
docker += ["-v", "{0}:{1}".format(host_path, container_path)]
for k, v in self.environment.items():
docker += ["-e", "{0}={1}".format(k, v)]
docker.append(self.container)
docker += cmd
check_call(docker)
DockerCmd.clean(container_name)
@staticmethod
def clean(name):
"""Cleans up the specified container"""
check_call(["docker", "rm", "-v", name])
def get_variants_from_filesystem(directory, extension):
results = set()
for filename in os.listdir(directory):
# Skip things that don't end in the extension
if not filename.endswith(extension):
continue
variant = filename[:-len(extension)]
# Empty name variant shouldn't have a `.` following it
if variant == '.':
raise BuildError("Invalid filename {}. The \"default\" variant file should be just {}".format(
filename, extension))
# Empty / default variant is represented as 'None'.
if variant == '':
variant = None
else:
# Should be foo. since we've moved the extension.
if variant[-1] != '.':
raise BuildError("Invalid variant filename {}. Expected a '.' separating the "
"variant name and extension '{}'.".format(filename, extension))
variant = variant[:-1]
results.add(variant)
return results
def get_src_fetcher(src_info, cache_dir, working_directory):
try:
kind = src_info['kind']
if kind not in pkgpanda.build.src_fetchers.all_fetchers:
raise ValidationError("No known way to catch src with kind '{}'. Known kinds: {}".format(
kind,
pkgpanda.src_fetchers.all_fetchers.keys()))
args = {
'src_info': src_info,
'cache_dir': cache_dir
}
if src_info['kind'] in ['git_local', 'url', 'url_extract']:
args['working_directory'] = working_directory
return pkgpanda.build.src_fetchers.all_fetchers[kind](**args)
except ValidationError as ex:
raise BuildError("Validation error when fetching sources for package: {}".format(ex))
class TreeInfo:
ALLOWED_TREEINFO_KEYS = {'exclude', 'variants', 'core_package_list', 'bootstrap_package_list'}
def __init__(self, treeinfo_dict):
if treeinfo_dict.keys() > self.ALLOWED_TREEINFO_KEYS:
raise BuildError(
"treeinfo can only include the keys {}. Found {}".format(
self.ALLOWED_TREEINFO_KEYS, treeinfo_dict.keys()))
self.excludes = set(self._get_package_list(treeinfo_dict, 'exclude'))
self.core_package_list = set(self._get_package_list(treeinfo_dict, 'core_package_list', self.excludes))
self.bootstrap_package_list = set(self._get_package_list(
treeinfo_dict,
'bootstrap_package_list',
self.excludes))
# List of mandatory package variants to include in the buildinfo.
self.variants = treeinfo_dict.get('variants', dict())
if not isinstance(self.variants, dict):
raise BuildError("treeinfo variants must be a dictionary of package name to variant name")
@staticmethod
def _get_package_list(treeinfo_dict, key, excludes=None):
"""Return a list of package name strings from treeinfo_dict by key.
If key isn't present in treeinfo_dict, an empty list is returned.
"""
excludes = excludes or list()
package_list = treeinfo_dict.get(key, list())
# Validate package list.
if not isinstance(package_list, list):
raise BuildError("{} must be either null (meaning don't use) or a list of package names.".format(key))
for package_name in package_list:
if not isinstance(package_name, str):
raise BuildError("{} must be a list of strings. Found a {} with the value: {}".format(
key, type(package_name), package_name))
try:
PackageId.validate_name(package_name)
except ValidationError as ex:
raise BuildError("Invalid package name in {}: {}".format(key, package_name)) from ex
if package_name in excludes:
raise BuildError("Package found in both exclude and {}: {}".format(key, package_name))
return package_list
class PackageSet:
def __init__(self, variant, treeinfo, package_store):
self.variant = variant
self.all_packages = self.package_tuples_with_dependencies(
# If core_package_list is empty, default to all non-excluded packages.
treeinfo.core_package_list or (package_store.packages_by_name.keys() - treeinfo.excludes),
treeinfo,
package_store
)
self.validate_package_tuples(self.all_packages, treeinfo, package_store)
if treeinfo.bootstrap_package_list:
self.bootstrap_packages = self.package_tuples_with_dependencies(
treeinfo.bootstrap_package_list,
treeinfo,
package_store
)
self.validate_package_tuples(self.bootstrap_packages, treeinfo, package_store)
else:
self.bootstrap_packages = self.all_packages
# Validate bootstrap packages are a subset of all packages.
for package_name, variant in self.bootstrap_packages:
if (package_name, variant) not in self.all_packages:
raise BuildError("Bootstrap package {} (variant {}) not found in set of all packages".format(
package_name, pkgpanda.util.variant_name(variant)))
@staticmethod
def package_tuples_with_dependencies(package_names, treeinfo, package_store):
package_tuples = set((name, treeinfo.variants.get(name)) for name in set(package_names))
to_visit = list(package_tuples)
while to_visit:
package_tuple = to_visit.pop()
for require in package_store.get_buildinfo(*package_tuple)['requires']:
require_tuple = expand_require(require)
if require_tuple not in package_tuples:
to_visit.append(require_tuple)
package_tuples.add(require_tuple)
return package_tuples
@staticmethod
def validate_package_tuples(package_tuples, treeinfo, package_store):
# Validate that all packages have the variant specified in treeinfo.
print('package_tuples = %r' % package_tuples)
print('treeinfo = %r' % treeinfo.variants)
for package_name, variant in package_tuples:
treeinfo_variant = treeinfo.variants.get(package_name)
if variant != treeinfo_variant:
raise BuildError(
"package {} is supposed to have variant {} included in the tree according to the treeinfo, "
"but variant {} was found.".format(
package_name,
pkgpanda.util.variant_name(treeinfo_variant),
pkgpanda.util.variant_name(variant),
)
)
# Validate that all needed packages are built and not excluded by treeinfo.
for package_name, variant in package_tuples:
if (package_name, variant) not in package_store.packages:
raise BuildError(
"package {} variant {} is needed (explicitly requested or as a requires) "
"but is not in the set of built packages.".format(
package_name,
pkgpanda.util.variant_name(variant),
)
)
if package_name in treeinfo.excludes:
raise BuildError("package {} is needed (explicitly requested or as a requires) "
"but is excluded according to the treeinfo.json.".format(package_name))
class PackageStore:
def __init__(self, packages_dir, repository_url):
self._builders = {}
self._repository_url = repository_url.rstrip('/') if repository_url is not None else None
self._packages_dir = packages_dir.rstrip('/')
# Load all possible packages, making a dictionary from (name, variant) -> buildinfo
self._packages = dict()
self._packages_by_name = dict()
self._package_folders = dict()
# Load an upstream if one exists
# TODO(cmaloney): Allow upstreams to have upstreams
self._package_cache_dir = self._packages_dir + "/cache/packages"
self._upstream_dir = self._packages_dir + "/cache/upstream/checkout"
self._upstream = None
self._upstream_package_dir = self._upstream_dir + "/packages"
# TODO(cmaloney): Make it so the upstream directory can be kept around
remove_directory(self._upstream_dir)
upstream_config = self._packages_dir + '/upstream.json'
if os.path.exists(upstream_config):
try:
self._upstream = get_src_fetcher(
load_optional_json(upstream_config),
self._packages_dir + '/cache/upstream',
packages_dir)
self._upstream.checkout_to(self._upstream_dir)
if os.path.exists(self._upstream_package_dir + "/upstream.json"):
raise Exception("Support for upstreams which have upstreams is not currently implemented")
except Exception as ex:
raise BuildError("Error fetching upstream: {}".format(ex))
# Iterate through the packages directory finding all packages. Note this package dir comes
# first, then we ignore duplicate definitions of the same package
package_dirs = [self._packages_dir]
if self._upstream:
package_dirs.append(self._upstream_package_dir)
for directory in package_dirs:
for name in os.listdir(directory):
package_folder = directory + '/' + name
# Ignore files / non-directories
if not os.path.isdir(package_folder):
continue
# If we've already found this package, it means 1+ versions have been defined. Use
# those and ignore everything in the upstreams.
if name in self._packages_by_name:
continue
if is_windows:
builder_folder = os.path.join(directory, name, 'docker.windows')
else:
builder_folder = os.path.join(directory, name, 'docker')
if os.path.exists(builder_folder):
self._builders[name] = builder_folder
# Search the directory for buildinfo.json files, record the variants
for variant in get_variants_from_filesystem(package_folder, 'buildinfo.json'):
# Only adding the default dictionary once we know we have a package.
self._packages_by_name.setdefault(name, dict())
buildinfo = load_buildinfo(package_folder, variant)
self._packages[(name, variant)] = buildinfo
self._packages_by_name[name][variant] = buildinfo
if name in self._package_folders:
assert self._package_folders[name] == package_folder
else:
self._package_folders[name] = package_folder
def get_package_folder(self, name):
return self._package_folders[name]
def get_bootstrap_cache_dir(self):
return self._packages_dir + "/cache/bootstrap"
def get_complete_cache_dir(self):
return self._packages_dir + "/cache/complete"
def get_buildinfo(self, name, variant):
return self._packages[(name, variant)]
def get_last_complete_set(self, variants):
def get_last_complete(variant):
complete_latest = (
self.get_complete_cache_dir() + '/' + pkgpanda.util.variant_prefix(variant) + 'complete.latest.json')
if not os.path.exists(complete_latest):
raise BuildError("No last complete found for variant {}. Expected to find {} to match "
"{}".format(pkgpanda.util.variant_name(variant), complete_latest,
pkgpanda.util.variant_prefix(variant) + 'treeinfo.json'))
return load_json(complete_latest)
result = {}
if variants is None:
# Get all defined variants.
requested_variants = self.list_trees()
else:
requested_variants = variants
for variant in requested_variants:
result[variant] = get_last_complete(variant)
return result
def get_last_build_filename(self, name, variant):
return self.get_package_cache_folder(name) + '/{}latest'.format(pkgpanda.util.variant_prefix(variant))
def get_package_path(self, pkg_id):
return self.get_package_cache_folder(pkg_id.name) + '/{}.tar.xz'.format(pkg_id)
def get_package_cache_folder(self, name):
directory = self._package_cache_dir + '/' + name
make_directory(directory)
return directory
def list_trees(self):
return get_variants_from_filesystem(self._packages_dir, 'treeinfo.json')
def get_package_set(self, variant):
return PackageSet(variant, TreeInfo(load_config_variant(self._packages_dir, variant, 'treeinfo.json')), self)
def get_all_package_sets(self):
return [self.get_package_set(variant) for variant in sorted(self.list_trees(), key=pkgpanda.util.variant_str)]
@property
def packages(self):
return self._packages
@property
def builders(self):
return self._builders.copy()
@property
def packages_by_name(self):
return self._packages_by_name
@property
def packages_dir(self):
return self._packages_dir
def try_fetch_by_id(self, pkg_id: PackageId):
if self._repository_url is None:
return False
# TODO(cmaloney): Use storage providers to download instead of open coding.
pkg_path = "{}.tar.xz".format(pkg_id)
url = self._repository_url + '/packages/{0}/{1}'.format(pkg_id.name, pkg_path)
try:
directory = self.get_package_cache_folder(pkg_id.name)
# TODO(cmaloney): Move to some sort of logging mechanism?
print("Attempting to download", pkg_id, "from", url, "to", directory)
download_atomic(directory + '/' + pkg_path, url, directory)
assert os.path.exists(directory + '/' + pkg_path)
return directory + '/' + pkg_path
except FetchError:
return False
def try_fetch_bootstrap_and_active(self, bootstrap_id):
if self._repository_url is None:
return False
try:
bootstrap_name = '{}.bootstrap.tar.xz'.format(bootstrap_id)
active_name = '{}.active.json'.format(bootstrap_id)
# TODO(cmaloney): Use storage providers to download instead of open coding.
bootstrap_url = self._repository_url + '/bootstrap/' + bootstrap_name
active_url = self._repository_url + '/bootstrap/' + active_name
print("Attempting to download", bootstrap_name, "from", bootstrap_url)
dest_dir = self.get_bootstrap_cache_dir()
# Normalize to no trailing slash for repository_url
download_atomic(dest_dir + '/' + bootstrap_name, bootstrap_url, self._packages_dir)
print("Attempting to download", active_name, "from", active_url)
download_atomic(dest_dir + '/' + active_name, active_url, self._packages_dir)
return True
except FetchError:
return False
def expand_require(require):
try:
return expand_require_exceptions(require)
except ValidationError as ex:
raise BuildError(str(ex)) from ex
def get_docker_id(docker_name):
return check_output(["docker", "inspect", "-f", "{{ .Id }}", docker_name]).decode('utf-8').strip()
def hash_files_in_folder(directory):
"""Given a relative path, hashes all files inside that folder and subfolders
Returns a dictionary from filename to the hash of that file. If that whole
dictionary is hashed, you get a hash of all the contents of the folder.
This is split out from calculating the whole folder hash so that the
behavior in different walking corner cases can be more easily tested.
"""
assert not directory.startswith('/'), \
"For the hash to be reproducible on other machines relative paths must always be used. " \
"Got path: {}".format(directory)
directory = directory.rstrip('/')
file_hash_dict = {}
# TODO(cmaloney): Disallow symlinks as they're hard to hash, people can symlink / copy in their
# build steps if needed.
for root, dirs, filenames in os.walk(directory):
assert not root.startswith('/')
for name in filenames:
path = root + '/' + name
base = path[len(directory) + 1:]
file_hash_dict[base] = pkgpanda.util.sha1(path)
# If the directory has files inside of it, then it'll be picked up implicitly. by the files
# or folders inside of it. If it contains nothing, it wouldn't be picked up but the existence
# is important, so added it with a value for it's hash not-makeable via sha1 (empty string).
if len(filenames) == 0 and len(dirs) == 0:
path = root[len(directory) + 1:]
# Empty path means it is the root directory, in which case we want no entries, not a
# single entry "": ""
if path:
file_hash_dict[root[len(directory) + 1:]] = ""
return file_hash_dict
@contextmanager
def as_cwd(path):
start_dir = getcwd()
chdir(path)
yield
chdir(start_dir)
def hash_folder_abs(directory, work_dir):
assert directory.startswith(work_dir), "directory must be inside work_dir: {} {}".format(directory, work_dir)
assert not work_dir[-1] == '/', "This code assumes no trailing slash on the work_dir"
with as_cwd(work_dir):
return hash_folder(directory[len(work_dir) + 1:])
def hash_folder(directory):
return hash_checkout(hash_files_in_folder(directory))
# Try to read json from the given file. If it is an empty file, then return an
# empty json dictionary.
def load_optional_json(filename):
try:
with open(filename) as f:
text = f.read().strip()
if text:
return json.loads(text)
return {}
except OSError as ex:
raise BuildError("Failed to open JSON file {}: {}".format(filename, ex))
except ValueError as ex:
raise BuildError("Unable to parse json in {}: {}".format(filename, ex))
def load_config_variant(directory, variant, extension):
assert directory[-1] != '/'
return load_optional_json(directory + '/' + pkgpanda.util.variant_prefix(variant) + extension)
def load_buildinfo(path, variant):
buildinfo = load_config_variant(path, variant, 'buildinfo.json')
# Fill in default / guaranteed members so code everywhere doesn't have to guard around it.
default_build_script = 'build'
if is_windows:
default_build_script = 'build.ps1'
buildinfo.setdefault('build_script', pkgpanda.util.variant_prefix(variant) + default_build_script)
buildinfo.setdefault('docker', 'dcos/dcos-builder:dcos-builder_dockerdir-latest')
buildinfo.setdefault('environment', dict())
buildinfo.setdefault('requires', list())
buildinfo.setdefault('state_directory', False)
return buildinfo
def make_bootstrap_tarball(package_store, packages, variant):
# Convert filenames to package ids
pkg_ids = list()
for pkg_path in packages:
# Get the package id from the given package path
filename = os.path.basename(pkg_path)
if not filename.endswith(".tar.xz"):
raise BuildError("Packages must be packaged / end with a .tar.xz. Got {}".format(filename))
pkg_id = filename[:-len(".tar.xz")]
pkg_ids.append(pkg_id)
bootstrap_cache_dir = package_store.get_bootstrap_cache_dir()
# Filename is output_name.<sha-1>.{active.json|.bootstrap.tar.xz}
bootstrap_id = hash_checkout(pkg_ids)
latest_name = "{}/{}bootstrap.latest".format(bootstrap_cache_dir, pkgpanda.util.variant_prefix(variant))
output_name = bootstrap_cache_dir + '/' + bootstrap_id + '.'
# bootstrap tarball = <sha1 of packages in tarball>.bootstrap.tar.xz
bootstrap_name = "{}bootstrap.tar.xz".format(output_name)
active_name = "{}active.json".format(output_name)
def mark_latest():
# Ensure latest is always written
write_string(latest_name, bootstrap_id)
print("bootstrap: {}".format(bootstrap_name))
print("active: {}".format(active_name))
print("latest: {}".format(latest_name))
return bootstrap_id
if (os.path.exists(bootstrap_name)):
print("Bootstrap already up to date, not recreating")
return mark_latest()
make_directory(bootstrap_cache_dir)
# Try downloading.
if package_store.try_fetch_bootstrap_and_active(bootstrap_id):
print("Bootstrap already up to date, Not recreating. Downloaded from repository-url.")
return mark_latest()
print("Unable to download from cache. Building.")
print("Creating bootstrap tarball for variant {}".format(variant))
work_dir = tempfile.mkdtemp(prefix='mkpanda_bootstrap_tmp')
def make_abs(path):
return os.path.join(work_dir, path)
pkgpanda_root = make_abs("opt/mesosphere")
repository = Repository(os.path.join(pkgpanda_root, "packages"))
# Fetch all the packages to the root
for pkg_path in packages:
filename = os.path.basename(pkg_path)
pkg_id = filename[:-len(".tar.xz")]
def local_fetcher(id, target):
shutil.unpack_archive(pkg_path, target, "gztar")
repository.add(local_fetcher, pkg_id, False)
# Activate the packages inside the repository.
# Do generate dcos.target.wants inside the root so that we don't
# try messing with /etc/systemd/system.
install = Install(
root=pkgpanda_root,
config_dir=None,
rooted_systemd=True,
manage_systemd=False,
block_systemd=True,
fake_path=True,
skip_systemd_dirs=True,
manage_users=False,
manage_state_dir=False)
install.activate(repository.load_packages(pkg_ids))
# Mark the tarball as a bootstrap tarball/filesystem so that
# dcos-setup.service will fire.
make_file(make_abs("opt/mesosphere/bootstrap"))
# Write out an active.json for the bootstrap tarball
write_json(active_name, pkg_ids)
# Rewrite all the symlinks to point to /opt/mesosphere
rewrite_symlinks(work_dir, work_dir, "/")
make_tar(bootstrap_name, pkgpanda_root)
remove_directory(work_dir)
# Update latest last so that we don't ever use partially-built things.
write_string(latest_name, bootstrap_id)
print("Built bootstrap")
return mark_latest()
def build_tree_variants(package_store, mkbootstrap):
""" Builds all possible tree variants in a given package store
"""
result = dict()
tree_variants = get_variants_from_filesystem(package_store.packages_dir, 'treeinfo.json')
if len(tree_variants) == 0:
raise Exception('No treeinfo.json can be found in {}'.format(package_store.packages_dir))
for variant in tree_variants:
result[variant] = pkgpanda.build.build_tree(package_store, mkbootstrap, variant)
return result
def build_tree(package_store, mkbootstrap, tree_variants):
"""Build packages and bootstrap tarballs for one or all tree variants.
Returns a dict mapping tree variants to bootstrap IDs.
If tree_variant is None, builds all available tree variants.
"""
# TODO(cmaloney): Add support for circular dependencies. They are doable
# long as there is a pre-built version of enough of the packages.
# TODO(cmaloney): Make it so when we're building a treeinfo which has a
# explicit package list we don't build all the other packages.
build_order = list()
visited = set()
built = set()
def visit(pkg_tuple: tuple):
"""Add a package and its requires to the build order.
Raises AssertionError if pkg_tuple is in the set of visited packages.
If the package has any requires, they're recursively visited and added
to the build order depth-first. Then the package itself is added.
"""
# Visit the node for the first (and only) time.
assert pkg_tuple not in visited
visited.add(pkg_tuple)
# Ensure all dependencies are built. Sorted for stability.
# Requirements may be either strings or dicts, so we convert them all to (name, variant) tuples before sorting.
for require_tuple in sorted(expand_require(r) for r in package_store.packages[pkg_tuple]['requires']):
# If the dependency has already been built, we can move on.
if require_tuple in built:
continue
# If the dependency has not been built but has been visited, then
# there's a cycle in the dependency graph.
if require_tuple in visited:
raise BuildError("Circular dependency. Circular link {0} -> {1}".format(pkg_tuple, require_tuple))
if PackageId.is_id(require_tuple[0]):
raise BuildError("Depending on a specific package id is not supported. Package {} "
"depends on {}".format(pkg_tuple, require_tuple))
if require_tuple not in package_store.packages:
raise BuildError("Package {0} require {1} not buildable from tree.".format(pkg_tuple, require_tuple))
# Add the dependency (after its dependencies, if any) to the build
# order.
visit(require_tuple)
build_order.append(pkg_tuple)
built.add(pkg_tuple)
# Can't compare none to string, so expand none -> "true" / "false", then put
# the string in a field after "" if none, the string if not.
def key_func(elem):
return elem[0], elem[1] is None, elem[1] or ""
def visit_packages(package_tuples):
for pkg_tuple in sorted(package_tuples, key=key_func):
if pkg_tuple in visited:
continue
visit(pkg_tuple)
if tree_variants:
package_sets = [package_store.get_package_set(v) for v in tree_variants]
else:
package_sets = package_store.get_all_package_sets()
with logger.scope("resolve package graph"):
# Build all required packages for all tree variants.
for package_set in package_sets:
visit_packages(package_set.all_packages)
built_packages = dict()
for (name, variant) in build_order:
built_packages.setdefault(name, dict())
# Run the build, store the built package path for later use.
# TODO(cmaloney): Only build the requested variants, rather than all variants.
built_packages[name][variant] = build(
package_store,
name,
variant,
True)
# Build bootstrap tarballs for all tree variants.
def make_bootstrap(package_set):
with logger.scope("Making bootstrap variant: {}".format(pkgpanda.util.variant_name(package_set.variant))):
package_paths = list()
for name, pkg_variant in package_set.bootstrap_packages:
package_paths.append(built_packages[name][pkg_variant])
if mkbootstrap:
return make_bootstrap_tarball(
package_store,
list(sorted(package_paths)),
package_set.variant)
# Build bootstraps and and package lists for all variants.
# TODO(cmaloney): Allow distinguishing between "build all" and "build the default one".
complete_cache_dir = package_store.get_complete_cache_dir()
make_directory(complete_cache_dir)
results = {}
for package_set in package_sets:
info = {
'bootstrap': make_bootstrap(package_set),
'packages': sorted(
load_string(package_store.get_last_build_filename(*pkg_tuple))
for pkg_tuple in package_set.all_packages)}
write_json(
complete_cache_dir + '/' + pkgpanda.util.variant_prefix(package_set.variant) + 'complete.latest.json',
info)
results[package_set.variant] = info
return results
def assert_no_duplicate_keys(lhs, rhs):
if len(lhs.keys() & rhs.keys()) != 0:
print("ASSERTION FAILED: Duplicate keys between {} and {}".format(lhs, rhs))
assert len(lhs.keys() & rhs.keys()) == 0
# Find all build variants and build them
def build_package_variants(package_store, name, clean_after_build=True, recursive=False):
# Find the packages dir / root of the packages tree, and create a PackageStore
results = dict()
for variant in package_store.packages_by_name[name].keys():
results[variant] = build(
package_store,
name,
variant,
clean_after_build=clean_after_build,
recursive=recursive)
return results
class IdBuilder():
def __init__(self, buildinfo):
self._start_keys = set(buildinfo.keys())
self._buildinfo = copy.deepcopy(buildinfo)
self._taken = set()
def _check_no_key(self, field):
if field in self._buildinfo:
raise BuildError("Key {} shouldn't be in buildinfo, but was".format(field))
def add(self, field, value):
self._check_no_key(field)
self._buildinfo[field] = value
def has(self, field):
return field in self._buildinfo
def take(self, field):
self._taken.add(field)
return self._buildinfo[field]
def replace(self, taken_field, new_field, new_value):
assert taken_field in self._buildinfo
self._check_no_key(new_field)
del self._buildinfo[taken_field]
self._buildinfo[new_field] = new_value
self._taken.add(new_field)
def update(self, field, new_value):
assert field in self._buildinfo
self._buildinfo[field] = new_value
def get_build_ids(self):
# If any keys are left in the buildinfo, error that there were unused keys
remaining_keys = self._start_keys - self._taken
if remaining_keys:
raise BuildError("ERROR: Unknown keys {} in buildinfo.json".format(remaining_keys))
return self._buildinfo
def build(package_store: PackageStore, name: str, variant, clean_after_build, recursive=False):
msg = "Building package {} variant {}".format(name, pkgpanda.util.variant_name(variant))
with logger.scope(msg):
return _build(package_store, name, variant, clean_after_build, recursive)
def _build(package_store, name, variant, clean_after_build, recursive):
assert isinstance(package_store, PackageStore)
tmpdir = tempfile.TemporaryDirectory(prefix="pkgpanda_repo")
repository = Repository(tmpdir.name)
package_dir = package_store.get_package_folder(name)
def src_abs(name):
return package_dir + '/' + name
def cache_abs(filename):
return package_store.get_package_cache_folder(name) + '/' + filename
# Build pkginfo over time, translating fields from buildinfo.
pkginfo = {}
# Build up the docker command arguments over time, translating fields as needed.
cmd = DockerCmd()
assert (name, variant) in package_store.packages, \
"Programming error: name, variant should have been validated to be valid before calling build()."
builder = IdBuilder(package_store.get_buildinfo(name, variant))
final_buildinfo = dict()
builder.add('name', name)
builder.add('variant', pkgpanda.util.variant_str(variant))
# Convert single_source -> sources
if builder.has('sources'):
if builder.has('single_source'):
raise BuildError('Both sources and single_source cannot be specified at the same time')
sources = builder.take('sources')
elif builder.has('single_source'):
sources = {name: builder.take('single_source')}
builder.replace('single_source', 'sources', sources)
else:
builder.add('sources', {})
sources = dict()
print("NOTICE: No sources specified")
final_buildinfo['sources'] = sources
# Construct the source fetchers, gather the checkout ids from them
checkout_ids = dict()
fetchers = dict()
try:
for src_name, src_info in sorted(sources.items()):
# TODO(cmaloney): Switch to a unified top level cache directory shared by all packages
cache_dir = package_store.get_package_cache_folder(name) + '/' + src_name
make_directory(cache_dir)
fetcher = get_src_fetcher(src_info, cache_dir, package_dir)
fetchers[src_name] = fetcher
checkout_ids[src_name] = fetcher.get_id()
except ValidationError as ex:
raise BuildError("Validation error when fetching sources for package: {}".format(ex))
for src_name, checkout_id in checkout_ids.items():
# NOTE: single_source buildinfo was expanded above so the src_name is
# always correct here.
# Make sure we never accidentally overwrite something which might be
# important. Fields should match if specified (And that should be
# tested at some point). For now disallowing identical saves hassle.
assert_no_duplicate_keys(checkout_id, final_buildinfo['sources'][src_name])
final_buildinfo['sources'][src_name].update(checkout_id)
# Add the sha1 of the buildinfo.json + build file to the build ids
builder.update('sources', checkout_ids)
build_script_file = builder.take('build_script')
# TODO(cmaloney): Change dest name to build_script_sha1
builder.replace('build_script', 'build', pkgpanda.util.sha1(src_abs(build_script_file)))
builder.add('pkgpanda_version', pkgpanda.build.constants.version)
extra_dir = src_abs("extra")
# Add the "extra" folder inside the package as an additional source if it
# exists
if os.path.exists(extra_dir):
extra_id = hash_folder_abs(extra_dir, package_dir)
builder.add('extra_source', extra_id)
final_buildinfo['extra_source'] = extra_id
# Figure out the docker name.
docker_name = builder.take('docker')
cmd.container = docker_name
# Add the id of the docker build environment to the build_ids.
try:
docker_id = get_docker_id(docker_name)
except CalledProcessError:
# docker pull the container and try again
check_call(['docker', 'pull', docker_name])
docker_id = get_docker_id(docker_name)
builder.update('docker', docker_id)
# TODO(cmaloney): The environment variables should be generated during build
# not live in buildinfo.json.
pkginfo['environment'] = builder.take('environment')
# Whether pkgpanda should on the host make sure a `/var/lib` state directory is available
pkginfo['state_directory'] = builder.take('state_directory')
if pkginfo['state_directory'] not in [True, False]:
raise BuildError("state_directory in buildinfo.json must be a boolean `true` or `false`")
username = None
if builder.has('username'):
username = builder.take('username')
if not isinstance(username, str):
raise BuildError("username in buildinfo.json must be either not set (no user for this"
" package), or a user name string")
try:
pkgpanda.UserManagement.validate_username(username)
except ValidationError as ex:
raise BuildError("username in buildinfo.json didn't meet the validation rules. {}".format(ex))
pkginfo['username'] = username
group = None
if builder.has('group'):
group = builder.take('group')
if not isinstance(group, str):
raise BuildError("group in buildinfo.json must be either not set (use default group for this user)"
", or group must be a string")
try:
pkgpanda.UserManagement.validate_group_name(group)
except ValidationError as ex:
raise BuildError("group in buildinfo.json didn't meet the validation rules. {}".format(ex))
pkginfo['group'] = group
# Packages need directories inside the fake install root (otherwise docker
# will try making the directories on a readonly filesystem), so build the
# install root now, and make the package directories in it as we go.
install_dir = tempfile.mkdtemp(prefix="pkgpanda-")
active_packages = list()
active_package_ids = set()
active_package_variants = dict()
auto_deps = set()
# Final package has the same requires as the build.
requires = builder.take('requires')
pkginfo['requires'] = requires
if builder.has("sysctl"):
pkginfo["sysctl"] = builder.take("sysctl")
# TODO(cmaloney): Pull generating the full set of requires a function.
to_check = copy.deepcopy(requires)
if type(to_check) != list:
raise BuildError("`requires` in buildinfo.json must be an array of dependencies.")
while to_check:
requires_info = to_check.pop(0)
requires_name, requires_variant = expand_require(requires_info)
if requires_name in active_package_variants:
# TODO(cmaloney): If one package depends on the <default>
# variant of a package and 1+ others depends on a non-<default>
# variant then update the dependency to the non-default variant
# rather than erroring.
if requires_variant != active_package_variants[requires_name]:
# TODO(cmaloney): Make this contain the chains of
# dependencies which contain the conflicting packages.
# a -> b -> c -> d {foo}
# e {bar} -> d {baz}
raise BuildError(
"Dependncy on multiple variants of the same package {}. variants: {} {}".format(
requires_name,
requires_variant,
active_package_variants[requires_name]))
# The variant has package {requires_name, variant} already is a
# dependency, don't process it again / move on to the next.
continue
active_package_variants[requires_name] = requires_variant
# Figure out the last build of the dependency, add that as the
# fully expanded dependency.
requires_last_build = package_store.get_last_build_filename(requires_name, requires_variant)
if not os.path.exists(requires_last_build):
if recursive:
# Build the dependency
build(package_store, requires_name, requires_variant, clean_after_build, recursive)
else:
raise BuildError("No last build file found for dependency {} variant {}. Rebuild "
"the dependency".format(requires_name, requires_variant))
try:
pkg_id_str = load_string(requires_last_build)
auto_deps.add(pkg_id_str)
pkg_buildinfo = package_store.get_buildinfo(requires_name, requires_variant)
pkg_requires = pkg_buildinfo['requires']
pkg_path = repository.package_path(pkg_id_str)
pkg_tar = pkg_id_str + '.tar.xz'
if not os.path.exists(package_store.get_package_cache_folder(requires_name) + '/' + pkg_tar):
raise BuildError(
"The build tarball {} refered to by the last_build file of the dependency {} "
"variant {} doesn't exist. Rebuild the dependency.".format(
pkg_tar,
requires_name,
requires_variant))
active_package_ids.add(pkg_id_str)
# Mount the package into the docker container.
cmd.volumes[pkg_path] = install_root + "/packages/{}:ro".format(pkg_id_str)
os.makedirs(os.path.join(install_dir, "packages/{}".format(pkg_id_str)))
# Add the dependencies of the package to the set which will be
# activated.
# TODO(cmaloney): All these 'transitive' dependencies shouldn't
# be available to the package being built, only what depends on
# them directly.
to_check += pkg_requires
except ValidationError as ex:
raise BuildError("validating package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
except PackageError as ex:
raise BuildError("loading package needed as dependency {0}: {1}".format(requires_name, ex)) from ex
# Add requires to the package id, calculate the final package id.
# NOTE: active_packages isn't fully constructed here since we lazily load
# packages not already in the repository.
builder.update('requires', list(active_package_ids))
version_extra = None
if builder.has('version_extra'):
version_extra = builder.take('version_extra')
build_ids = builder.get_build_ids()
version_base = hash_checkout(build_ids)
version = None
if builder.has('version_extra'):
version = "{0}-{1}".format(version_extra, version_base)
else:
version = version_base
pkg_id = PackageId.from_parts(name, version)
# Everything must have been extracted by now. If it wasn't, then we just
# had a hard error that it was set but not used, as well as didn't include
# it in the caluclation of the PackageId.
builder = None
# Save the build_ids. Useful for verify exactly what went into the
# package build hash.
final_buildinfo['build_ids'] = build_ids
final_buildinfo['package_version'] = version
# Save the package name and variant. The variant is used when installing
# packages to validate dependencies.
final_buildinfo['name'] = name
final_buildinfo['variant'] = variant
# If the package is already built, don't do anything.
pkg_path = package_store.get_package_cache_folder(name) + '/{}.tar.xz'.format(pkg_id)
# Done if it exists locally
if exists(pkg_path):
print("Package up to date. Not re-building.")
# TODO(cmaloney): Updating / filling last_build should be moved out of
# the build function.
write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
return pkg_path
# Try downloading.
dl_path = package_store.try_fetch_by_id(pkg_id)
if dl_path:
print("Package up to date. Not re-building. Downloaded from repository-url.")
# TODO(cmaloney): Updating / filling last_build should be moved out of
# the build function.
write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
print(dl_path, pkg_path)
assert dl_path == pkg_path
return pkg_path
# Fall out and do the build since it couldn't be downloaded
print("Unable to download from cache. Proceeding to build")
print("Building package {} with buildinfo: {}".format(
pkg_id,
json.dumps(final_buildinfo, indent=2, sort_keys=True)))
# Clean out src, result so later steps can use them freely for building.
def clean():
# Run a docker container to remove src/ and result/
cmd = DockerCmd()
cmd.volumes = {
package_store.get_package_cache_folder(name): PKG_DIR + "/:rw",
}
if is_windows:
cmd.container = "microsoft/windowsservercore:1709"
filename = PKG_DIR + "\\src"
cmd.run("package-cleaner",
["cmd.exe", "/c", "if", "exist", filename, "rmdir", "/s", "/q", filename])
filename = PKG_DIR + "\\result"
cmd.run("package-cleaner",
["cmd.exe", "/c", "if", "exist", filename, "rmdir", "/s", "/q", filename])
else:
cmd.container = "ubuntu:14.04.4"
cmd.run("package-cleaner", ["rm", "-rf", PKG_DIR + "/src", PKG_DIR + "/result"])
clean()
# Only fresh builds are allowed which don't overlap existing artifacts.
result_dir = cache_abs("result")
if exists(result_dir):
raise BuildError("result folder must not exist. It will be made when the package is "
"built. {}".format(result_dir))
# 'mkpanda add' all implicit dependencies since we actually need to build.
for dep in auto_deps:
print("Auto-adding dependency: {}".format(dep))
# NOTE: Not using the name pkg_id because that overrides the outer one.
id_obj = PackageId(dep)
add_package_file(repository, package_store.get_package_path(id_obj))
package = repository.load(dep)
active_packages.append(package)
# Checkout all the sources int their respective 'src/' folders.
try:
src_dir = cache_abs('src')
if os.path.exists(src_dir):
raise ValidationError(
"'src' directory already exists, did you have a previous build? " +
"Currently all builds must be from scratch. Support should be " +
"added for re-using a src directory when possible. src={}".format(src_dir))
os.mkdir(src_dir)
for src_name, fetcher in sorted(fetchers.items()):
root = cache_abs('src/' + src_name)
os.mkdir(root)
fetcher.checkout_to(root)
except ValidationError as ex:
raise BuildError("Validation error when fetching sources for package: {}".format(ex))
# Activate the packages so that we have a proper path, environment
# variables.
# TODO(cmaloney): RAII type thing for temproary directory so if we
# don't get all the way through things will be cleaned up?
install = Install(
root=install_dir,
config_dir=None,
rooted_systemd=True,
manage_systemd=False,
block_systemd=True,
fake_path=True,
manage_users=False,
manage_state_dir=False)
install.activate(active_packages)
# Rewrite all the symlinks inside the active path because we will
# be mounting the folder into a docker container, and the absolute
# paths to the packages will change.
# TODO(cmaloney): This isn't very clean, it would be much nicer to
# just run pkgpanda inside the package.
rewrite_symlinks(install_dir, repository.path, install_root + "/packages/")
print("Building package in docker")
# TODO(cmaloney): Run as a specific non-root user, make it possible
# for non-root to cleanup afterwards.
# Run the build, prepping the environment as necessary.
mkdir(cache_abs("result"))
# Copy the build info to the resulting tarball
write_json(cache_abs("src/buildinfo.full.json"), final_buildinfo)
write_json(cache_abs("result/buildinfo.full.json"), final_buildinfo)
write_json(cache_abs("result/pkginfo.json"), pkginfo)
# Make the folder for the package we are building. If docker does it, it
# gets auto-created with root permissions and we can't actually delete it.
os.makedirs(os.path.join(install_dir, "packages", str(pkg_id)))
# TOOD(cmaloney): Disallow writing to well known files and directories?
# Source we checked out
cmd.volumes.update({
# TODO(cmaloney): src should be read only...
# Source directory
cache_abs("src"): PKG_DIR + "/src:rw",
# Getting the result out
cache_abs("result"): install_root + "/packages/{}:rw".format(pkg_id),
# The build script directory
package_dir: PKG_DIR + "/build:ro"
})
if is_windows:
cmd.volumes.update({
# todo: This is a temporary work around until Windows RS4 comes out that has a fix
# that allows overlapping mount directories. We should not make this also happen
# on Linux as it will probably break a bunch of stuff unnecessarily that will only
# need to be undone in the future.
install_dir: install_root + "/install_dir:ro"
})
else:
cmd.volumes.update({
install_dir: install_root + ":ro"
})
if os.path.exists(extra_dir):
cmd.volumes[extra_dir] = PKG_DIR + "/extra:ro"
cmd.environment = {
"PKG_VERSION": version,
"PKG_NAME": name,
"PKG_ID": pkg_id,
"PKG_PATH": install_root + "/packages/{}".format(pkg_id),
"PKG_VARIANT": variant if variant is not None else "<default>",
"NUM_CORES": multiprocessing.cpu_count()
}
try:
# TODO(cmaloney): Run a wrapper which sources
# /opt/mesosphere/environment then runs a build. Also should fix
# ownership of /opt/mesosphere/packages/{pkg_id} post build.
command = [PKG_DIR + "/build/" + build_script_file]
cmd.run("package-builder", command)
except CalledProcessError as ex:
raise BuildError("docker exited non-zero: {}\nCommand: {}".format(ex.returncode, ' '.join(ex.cmd)))
# Clean up the temporary install dir used for dependencies.
# TODO(cmaloney): Move to an RAII wrapper.
remove_directory(install_dir)
with logger.scope("Build package tarball"):
# Check for forbidden services before packaging the tarball:
try:
check_forbidden_services(cache_abs("result"), RESERVED_UNIT_NAMES)
except ValidationError as ex:
raise BuildError("Package validation failed: {}".format(ex))
# TODO(cmaloney): Updating / filling last_build should be moved out of
# the build function.
write_string(package_store.get_last_build_filename(name, variant), str(pkg_id))
# Bundle the artifacts into the pkgpanda package
tmp_name = pkg_path + "-tmp.tar.xz"
make_tar(tmp_name, cache_abs("result"))
os.replace(tmp_name, pkg_path)
print("Package built.")
if clean_after_build:
clean()
return pkg_path
|
1637253
|
class Solution:
def replaceWords(self, dictionary: List[str], sentence: str) -> str:
Trie = lambda : defaultdict(Trie)
trie = Trie()
END = True
for word in dictionary:
current = trie
for char in word:
current = current.setdefault(char, Trie())
current[END] = word
def replace(word):
current = trie
for char in word:
if char not in current or END in current:
break
current = current[char]
return current.get(END, word)
return ' '.join(map(replace, sentence.split(' ')))
|
1637271
|
import numpy
import theano
from nose.plugins.skip import SkipTest
from theano.tests.unittest_tools import verify_grad
try:
from pylearn2.sandbox.cuda_convnet.response_norm import (
CrossMapNorm,
CrossMapNormUndo
)
from theano.sandbox.cuda import CudaNdarrayType, CudaNdarray
from theano.sandbox.cuda import gpu_from_host
from theano.sandbox.cuda import ftensor4 as cuda_ftensor4
from theano.sandbox.cuda.basic_ops import gpu_contiguous
except ImportError:
raise SkipTest('cuda not available')
if theano.config.mode=='FAST_COMPILE':
mode_with_gpu = theano.compile.mode.get_mode('FAST_RUN').including('gpu')
else:
mode_with_gpu = theano.compile.mode.get_default_mode().including('gpu')
def test_cross_map_norm_simple():
op = CrossMapNorm(16, 15. / 16., 1., True)
x = CudaNdarray(numpy.ones((16, 2, 2, 2), dtype='float32'))
x_ = theano.tensor.TensorVariable(CudaNdarrayType([False] * 4))
f = theano.function([x_], op(x_)[0])
numpy.testing.assert_allclose(f(x), 0.0625)
def test_cross_map_norm_grad_simple():
rng = numpy.random.RandomState([2013, 2, 10])
op = CrossMapNorm(16, 15/16., 1, True)
make_graph = lambda inp: op(gpu_from_host(inp))[0]
verify = lambda array: verify_grad(make_graph, [array])
inputs = [numpy.ones((16, 1, 1, 1), dtype='float32'),
rng.normal(size=(32, 5, 5, 10)).astype('float32')]
for arr in inputs:
yield verify, arr
def test_cross_map_norm_noncontiguous_grad():
# Check the case reported at https://groups.google.com/d/topic/pylearn-users/KxIYc3hczf4/discussion
x = cuda_ftensor4('x')
x_shuffled = x.dimshuffle(1, 2, 3, 0)
x_shuffled = gpu_contiguous(x_shuffled)
response_norm = CrossMapNorm(
size_f=16, add_scale=(15. / 16.), pow_scale=1, blocked=True)
output_shuffled = response_norm(x_shuffled)[0]
output = output_shuffled.dimshuffle(3, 0, 1, 2)
cost = output.sum()
cost.name = 'cost'
grad_x = theano.grad(cost, x)
f = theano.function([x], grad_x, mode=mode_with_gpu)
x_val = CudaNdarray(numpy.ones((2, 16, 2, 2), dtype='float32'))
f(x_val)
def test_optimization():
op = CrossMapNorm(16, 15./16., 1, True)
x_ = theano.tensor.TensorVariable(CudaNdarrayType([False] * 4))
f = theano.function([x_], theano.grad(op(x_)[0].sum(), x_))
nodes = [x for x in f.maker.fgraph.apply_nodes
if type(x.op) == CrossMapNormUndo]
assert len(nodes) == 1
assert nodes[0].op.inplace
|
1637325
|
from math import ceil, log2
def clog2(x):
return int(ceil(log2(x)))
def flatten(l):
return [item for sublist in l for item in sublist]
def has_kratos_runtime():
try:
import kratos_runtime
return True
except ImportError:
return False
def is_valid_file_mode(file_mode):
'''Return True if the given "file_mode" represents a valid file I/O mode'''
return file_mode in {'r', 'w', 'rb', 'wb', 'a', 'ab', 'r+', 'rb+', 'w+',
'wb+', 'a+', 'ab+'}
def file_mode_allows_reading(file_mode):
'''Return True if the given "file_mode" allows reading'''
return file_mode in {'r', 'rb', 'r+', 'rb+', 'w+', 'wb+', 'a+', 'ab+'}
def file_mode_allows_writing(file_mode):
'''Return True if the given "file_mode" allows writing'''
return file_mode in {'w', 'wb', 'a', 'ab', 'r+', 'rb+', 'w+', 'wb+', 'a+',
'ab+'}
|
1637376
|
from abc import abstractmethod
from datetime import datetime
from typing import TYPE_CHECKING, Dict, List, Optional, Type, cast
import pandas as pd
import pyarrow
from google.protobuf.json_format import MessageToJson
from feast.data_source import DataSource
from feast.dqm.profilers.profiler import Profile, Profiler
from feast.protos.feast.core.SavedDataset_pb2 import SavedDataset as SavedDatasetProto
from feast.protos.feast.core.SavedDataset_pb2 import SavedDatasetMeta, SavedDatasetSpec
from feast.protos.feast.core.SavedDataset_pb2 import (
SavedDatasetStorage as SavedDatasetStorageProto,
)
from feast.protos.feast.core.ValidationProfile_pb2 import (
ValidationReference as ValidationReferenceProto,
)
if TYPE_CHECKING:
from feast.infra.offline_stores.offline_store import RetrievalJob
class _StorageRegistry(type):
classes_by_proto_attr_name: Dict[str, Type["SavedDatasetStorage"]] = {}
def __new__(cls, name, bases, dct):
kls = type.__new__(cls, name, bases, dct)
if dct.get("_proto_attr_name"):
cls.classes_by_proto_attr_name[dct["_proto_attr_name"]] = kls
return kls
class SavedDatasetStorage(metaclass=_StorageRegistry):
_proto_attr_name: str
@staticmethod
def from_proto(storage_proto: SavedDatasetStorageProto) -> "SavedDatasetStorage":
proto_attr_name = cast(str, storage_proto.WhichOneof("kind"))
return _StorageRegistry.classes_by_proto_attr_name[proto_attr_name].from_proto(
storage_proto
)
@abstractmethod
def to_proto(self) -> SavedDatasetStorageProto:
...
@abstractmethod
def to_data_source(self) -> DataSource:
...
class SavedDataset:
name: str
features: List[str]
join_keys: List[str]
full_feature_names: bool
storage: SavedDatasetStorage
tags: Dict[str, str]
feature_service_name: Optional[str] = None
created_timestamp: Optional[datetime] = None
last_updated_timestamp: Optional[datetime] = None
min_event_timestamp: Optional[datetime] = None
max_event_timestamp: Optional[datetime] = None
_retrieval_job: Optional["RetrievalJob"] = None
def __init__(
self,
name: str,
features: List[str],
join_keys: List[str],
storage: SavedDatasetStorage,
full_feature_names: bool = False,
tags: Optional[Dict[str, str]] = None,
feature_service_name: Optional[str] = None,
):
self.name = name
self.features = features
self.join_keys = join_keys
self.storage = storage
self.full_feature_names = full_feature_names
self.tags = tags or {}
self.feature_service_name = feature_service_name
self._retrieval_job = None
def __repr__(self):
items = (f"{k} = {v}" for k, v in self.__dict__.items())
return f"<{self.__class__.__name__}({', '.join(items)})>"
def __str__(self):
return str(MessageToJson(self.to_proto()))
def __hash__(self):
return hash((self.name))
def __eq__(self, other):
if not isinstance(other, SavedDataset):
raise TypeError(
"Comparisons should only involve SavedDataset class objects."
)
if (
self.name != other.name
or sorted(self.features) != sorted(other.features)
or sorted(self.join_keys) != sorted(other.join_keys)
or self.storage != other.storage
or self.full_feature_names != other.full_feature_names
or self.tags != other.tags
or self.feature_service_name != other.feature_service_name
):
return False
return True
@staticmethod
def from_proto(saved_dataset_proto: SavedDatasetProto):
"""
Converts a SavedDatasetProto to a SavedDataset object.
Args:
saved_dataset_proto: A protobuf representation of a SavedDataset.
"""
ds = SavedDataset(
name=saved_dataset_proto.spec.name,
features=list(saved_dataset_proto.spec.features),
join_keys=list(saved_dataset_proto.spec.join_keys),
full_feature_names=saved_dataset_proto.spec.full_feature_names,
storage=SavedDatasetStorage.from_proto(saved_dataset_proto.spec.storage),
tags=dict(saved_dataset_proto.spec.tags.items()),
)
if saved_dataset_proto.spec.feature_service_name:
ds.feature_service_name = saved_dataset_proto.spec.feature_service_name
if saved_dataset_proto.meta.HasField("created_timestamp"):
ds.created_timestamp = (
saved_dataset_proto.meta.created_timestamp.ToDatetime()
)
if saved_dataset_proto.meta.HasField("last_updated_timestamp"):
ds.last_updated_timestamp = (
saved_dataset_proto.meta.last_updated_timestamp.ToDatetime()
)
if saved_dataset_proto.meta.HasField("min_event_timestamp"):
ds.min_event_timestamp = (
saved_dataset_proto.meta.min_event_timestamp.ToDatetime()
)
if saved_dataset_proto.meta.HasField("max_event_timestamp"):
ds.max_event_timestamp = (
saved_dataset_proto.meta.max_event_timestamp.ToDatetime()
)
return ds
def to_proto(self) -> SavedDatasetProto:
"""
Converts a SavedDataset to its protobuf representation.
Returns:
A SavedDatasetProto protobuf.
"""
meta = SavedDatasetMeta()
if self.created_timestamp:
meta.created_timestamp.FromDatetime(self.created_timestamp)
if self.min_event_timestamp:
meta.min_event_timestamp.FromDatetime(self.min_event_timestamp)
if self.max_event_timestamp:
meta.max_event_timestamp.FromDatetime(self.max_event_timestamp)
spec = SavedDatasetSpec(
name=self.name,
features=self.features,
join_keys=self.join_keys,
full_feature_names=self.full_feature_names,
storage=self.storage.to_proto(),
tags=self.tags,
)
if self.feature_service_name:
spec.feature_service_name = self.feature_service_name
saved_dataset_proto = SavedDatasetProto(spec=spec, meta=meta)
return saved_dataset_proto
def with_retrieval_job(self, retrieval_job: "RetrievalJob") -> "SavedDataset":
self._retrieval_job = retrieval_job
return self
def to_df(self) -> pd.DataFrame:
if not self._retrieval_job:
raise RuntimeError(
"To load this dataset use FeatureStore.get_saved_dataset() "
"instead of instantiating it directly."
)
return self._retrieval_job.to_df()
def to_arrow(self) -> pyarrow.Table:
if not self._retrieval_job:
raise RuntimeError(
"To load this dataset use FeatureStore.get_saved_dataset() "
"instead of instantiating it directly."
)
return self._retrieval_job.to_arrow()
def as_reference(self, name: str, profiler: "Profiler") -> "ValidationReference":
return ValidationReference.from_saved_dataset(
name=name, profiler=profiler, dataset=self
)
def get_profile(self, profiler: Profiler) -> Profile:
return profiler.analyze_dataset(self.to_df())
class ValidationReference:
name: str
dataset_name: str
description: str
tags: Dict[str, str]
profiler: Profiler
_profile: Optional[Profile] = None
_dataset: Optional[SavedDataset] = None
def __init__(
self,
name: str,
dataset_name: str,
profiler: Profiler,
description: str = "",
tags: Optional[Dict[str, str]] = None,
):
"""
Validation reference combines a reference dataset (currently only a saved dataset object can be used as
a reference) and a profiler function to generate a validation profile.
The validation profile can be cached in this object, and in this case
the saved dataset retrieval and the profiler call will happen only once.
Validation reference is being stored in the Feast registry and can be retrieved by its name, which
must be unique within one project.
Args:
name: the unique name for validation reference
dataset_name: the name of the saved dataset used as a reference
description: a human-readable description
tags: a dictionary of key-value pairs to store arbitrary metadata
profiler: the profiler function used to generate profile from the saved dataset
"""
self.name = name
self.dataset_name = dataset_name
self.profiler = profiler
self.description = description
self.tags = tags or {}
@classmethod
def from_saved_dataset(cls, name: str, dataset: SavedDataset, profiler: Profiler):
"""
Internal constructor to create validation reference object with actual saved dataset object
(regular constructor requires only its name).
"""
ref = ValidationReference(name, dataset.name, profiler)
ref._dataset = dataset
return ref
@property
def profile(self) -> Profile:
if not self._profile:
if not self._dataset:
raise RuntimeError(
"In order to calculate a profile validation reference must be instantiated from a saved dataset. "
"Use ValidationReference.from_saved_dataset constructor or FeatureStore.get_validation_reference "
"to get validation reference object."
)
self._profile = self.profiler.analyze_dataset(self._dataset.to_df())
return self._profile
@classmethod
def from_proto(cls, proto: ValidationReferenceProto) -> "ValidationReference":
profiler_attr = proto.WhichOneof("profiler")
if profiler_attr == "ge_profiler":
from feast.dqm.profilers.ge_profiler import GEProfiler
profiler = GEProfiler.from_proto(proto.ge_profiler)
else:
raise RuntimeError("Unrecognized profiler")
profile_attr = proto.WhichOneof("cached_profile")
if profile_attr == "ge_profile":
from feast.dqm.profilers.ge_profiler import GEProfile
profile = GEProfile.from_proto(proto.ge_profile)
elif not profile_attr:
profile = None
else:
raise RuntimeError("Unrecognized profile")
ref = ValidationReference(
name=proto.name,
dataset_name=proto.reference_dataset_name,
profiler=profiler,
description=proto.description,
tags=dict(proto.tags),
)
ref._profile = profile
return ref
def to_proto(self) -> ValidationReferenceProto:
from feast.dqm.profilers.ge_profiler import GEProfile, GEProfiler
proto = ValidationReferenceProto(
name=self.name,
reference_dataset_name=self.dataset_name,
tags=self.tags,
description=self.description,
ge_profiler=self.profiler.to_proto()
if isinstance(self.profiler, GEProfiler)
else None,
ge_profile=self._profile.to_proto()
if isinstance(self._profile, GEProfile)
else None,
)
return proto
|
1637394
|
from .__info__ import __authors__, __version__
from .analysis import DataAnalyzer
from .core import MadMiner
from .delphes import DelphesReader
from .fisherinformation import (
FisherInformation,
InformationGeometry,
profile_information,
project_information,
)
from .lhe import LHEReader
from .likelihood import (
HistoLikelihood,
NeuralLikelihood,
fix_params,
project_log_likelihood,
profile_log_likelihood,
)
from .limits import AsymptoticLimits
from .ml import (
ParameterizedRatioEstimator,
DoubleParameterizedRatioEstimator,
LikelihoodEstimator,
ScoreEstimator,
MorphingAwareRatioEstimator,
Ensemble,
load_estimator,
)
from .plotting import (
plot_uncertainty,
plot_systematics,
plot_pvalue_limits,
plot_distribution_of_information,
plot_fisher_information_contours_2d,
plot_fisherinfo_barplot,
plot_nd_morphing_basis_slices,
plot_nd_morphing_basis_scatter,
plot_2d_morphing_basis,
plot_histograms,
plot_distributions,
)
from .sampling import (
SampleAugmenter,
combine_and_shuffle,
benchmark,
benchmarks,
morphing_point,
morphing_points,
random_morphing_points,
iid_nuisance_parameters,
nominal_nuisance_parameters,
)
import logging
logging.getLogger("madminer").addHandler(logging.NullHandler())
logger = logging.getLogger(__name__)
logger.info("")
logger.info("|{}|".format("-" * 74))
logger.info("|{}|".format(" " * 74))
logger.info("|{}|".format(" MadMiner v{}".format(__version__).ljust(74)))
logger.info("|{}|".format(" " * 74))
logger.info("|{}|".format(" {}".format(__authors__).ljust(74)))
logger.info("|{}|".format(" " * 74))
logger.info("|{}|".format("-" * 74))
logger.info("")
|
1637412
|
from django.db.models.sql.where import (
WhereNode,
EverythingNode
)
class CQLWhereNode(WhereNode):
def as_cql(
self,
qn,
connection
):
return self.as_sql(
qn,
connection
)
class CQLEverythingNode(EverythingNode):
pass
|
1637420
|
from binding import *
from ..namespace import llvm
from src.Pass import ImmutablePass
TargetLibraryInfo = llvm.Class(ImmutablePass)
LibFunc = llvm.Namespace('LibFunc')
LibFunc.Enum('Func', '''
ZdaPv, ZdlPv, Znaj, ZnajRKSt9nothrow_t,
Znam, ZnamRKSt9nothrow_t, Znwj, ZnwjRKSt9nothrow_t,
Znwm, ZnwmRKSt9nothrow_t, cxa_atexit, cxa_guard_abort,
cxa_guard_acquire, cxa_guard_release, memcpy_chk,
acos, acosf, acosh, acoshf,
acoshl, acosl, asin, asinf,
asinh, asinhf, asinhl, asinl,
atan, atan2, atan2f, atan2l,
atanf, atanh, atanhf, atanhl,
atanl, calloc, cbrt, cbrtf,
cbrtl, ceil, ceilf, ceill,
copysign, copysignf, copysignl, cos,
cosf, cosh, coshf, coshl,
cosl, exp, exp10, exp10f,
exp10l, exp2, exp2f, exp2l,
expf, expl, expm1, expm1f,
expm1l, fabs, fabsf, fabsl,
fiprintf,
floor, floorf, floorl, fmod,
fmodf, fmodl, fputc,
fputs, free, fwrite, iprintf,
log, log10, log10f, log10l,
log1p, log1pf, log1pl, log2,
log2f, log2l, logb, logbf,
logbl, logf, logl, malloc,
memchr, memcmp, memcpy, memmove,
memset, memset_pattern16, nearbyint, nearbyintf,
nearbyintl, posix_memalign, pow, powf,
powl, putchar, puts,
realloc, reallocf, rint, rintf,
rintl, round, roundf, roundl,
sin, sinf, sinh, sinhf,
sinhl, sinl, siprintf,
sqrt, sqrtf, sqrtl, stpcpy,
strcat, strchr, strcmp, strcpy,
strcspn, strdup, strlen, strncat,
strncmp, strncpy, strndup, strnlen,
strpbrk, strrchr, strspn, strstr,
strtod, strtof, strtol, strtold,
strtoll, strtoul, strtoull, tan,
tanf, tanh, tanhf, tanhl,
tanl, trunc, truncf,
truncl, valloc, NumLibFuncs''')
# not in llvm-3.2 abs, ffs, ffsl, ffsll, fprintf, isascii,
# isdigit, labs, llabs, printf, sprintf, toascii
from src.ADT.Triple import Triple
from src.ADT.StringRef import StringRef
@TargetLibraryInfo
class TargetLibraryInfo:
_include_ = 'llvm/Target/TargetLibraryInfo.h'
new = Constructor()
new |= Constructor(ref(Triple))
delete = Destructor()
has = Method(cast(bool, Bool), LibFunc.Func)
hasOptimizedCodeGen = Method(cast(bool, Bool), LibFunc.Func)
getName = Method(cast(str, StringRef), LibFunc.Func)
setUnavailable = Method(Void, LibFunc.Func)
setAvailable = Method(Void, LibFunc.Func)
setAvailableWithName = Method(Void, LibFunc.Func, cast(str, StringRef))
disableAllFunctions = Method()
|
1637470
|
import emacspy, socket, tempfile, queue, threading
from emacspy import sym
from typing import Optional
import concurrent.futures, traceback
_call_soon_queue: queue.Queue = queue.Queue(0)
_wakeup_conn: Optional[socket.socket] = None
_emacs_thread = threading.current_thread()
def call_soon_in_main_thread(f):
_call_soon_queue.put(f)
if _wakeup_conn:
_wakeup_conn.send(b'x')
def run_in_main_thread_future(f):
fut: concurrent.futures.Future = concurrent.futures.Future()
def wrapper():
try:
fut.set_result(f())
except Exception as exc:
traceback.print_exc()
fut.set_exception(exc)
call_soon_in_main_thread(wrapper)
return fut
def run_in_main_thread(f):
if _emacs_thread == threading.current_thread():
raise Exception('already on emacs main thread')
return run_in_main_thread_future(f).result()
@emacspy.defun('emacspy-threads/wakeup')
def wakeup(p, data):
while True:
try:
f = _call_soon_queue.get_nowait()
except queue.Empty:
break
f()
def init():
with tempfile.TemporaryDirectory() as dir:
path = dir + '/socket'
s = socket.socket(socket.AF_UNIX)
s.bind(path)
s.listen(1)
# this is "self-pipe trick"
emacspy.f.make_network_process(
sym(":name"), "emacspy-wakeup",
sym(":remote"), path,
sym(":filter"), sym('emacspy-threads/wakeup'))
global _wakeup_conn
_wakeup_conn, _ = s.accept()
wakeup(None, None)
|
1637481
|
import os
class Card:
suits = ["clubs", "diamonds", "hearts", "spades"]
def __init__(self, suit: str, value: int, down=False):
self.suit = suit
self.value = value
self.down = down
self.symbol = self.name[0].upper()
@property
def name(self) -> str:
"""The name of the card value."""
if self.value <= 10: return str(self.value)
else: return {
11: 'jack',
12: 'queen',
13: 'king',
14: 'ace',
}[self.value]
@property
def image(self):
return (
f"{self.symbol if self.name != '10' else '10'}"\
f"{self.suit[0].upper()}.png" \
if not self.down else "red_back.png"
)
def flip(self):
self.down = not self.down
return self
def __str__(self) -> str:
return f'{self.name.title()} of {self.suit.title()}'
def __repr__(self) -> str:
return str(self)
|
1637485
|
from typing import NamedTuple
import pandas as pd
from pandas import DataFrame
from dbnd import task
@task(result=("features", "scores"))
def f_returns_two_dataframes_v1(p: int) -> (DataFrame, DataFrame):
return (
pd.DataFrame(data=[[p, 1]], columns=["c1", "c2"]),
pd.DataFrame(data=[[p, 1]], columns=["c1", "c2"]),
)
@task(result="features,scores")
def f_returns_two_dataframes_v2(p: int) -> (DataFrame, DataFrame):
return (
pd.DataFrame(data=[[p, 1]], columns=["c1", "c2"]),
pd.DataFrame(data=[[p, 1]], columns=["c1", "c2"]),
)
@task
def f_returns_two_dataframes_no_hint(p: int) -> (DataFrame, DataFrame):
return (
pd.DataFrame(data=[[p, 1]], columns=["c1", "c2"]),
pd.DataFrame(data=[[p, 1]], columns=["c1", "c2"]),
)
FeatureStore = NamedTuple("FeatureStore", features=DataFrame, scores=DataFrame)
@task
def f_returns_two_dataframes_named_tuple_v1(p: int) -> FeatureStore:
return FeatureStore(
pd.DataFrame(data=[[p, 1]], columns=["c1", "c2"]),
pd.DataFrame(data=[[p, 1]], columns=["c1", "c2"]),
)
@task
def f_returns_two_dataframes_named_tuple_v2(
p: int,
) -> NamedTuple(
"FeatureStore", fields=[("features", DataFrame), ("scores", DataFrame)]
):
return (
pd.DataFrame(data=[[p, 1]], columns=["c1", "c2"]),
pd.DataFrame(data=[[p, 1]], columns=["c1", "c2"]),
)
@task
def f_multiple_outputs_py2():
# type: ()-> (pd.DataFrame, pd.DataFrame)
return (
pd.DataFrame(data=[[1, 1]], columns=["c1", "c2"]),
pd.DataFrame(data=[[1, 1]], columns=["c1", "c2"]),
)
@task
def f_returns_huge_dataframe(p: int) -> DataFrame:
return pd.DataFrame(data=[[p] * 100000])
|
1637508
|
from chainerrl_visualizer.utils.string_generators import generate_timestamp, generate_random_string # NOQA
from chainerrl_visualizer.utils.jsonize_datetime import jsonize_datetime # NOQA
|
1637540
|
import astropy.units as u
import exifread
import matplotlib
import numpy as np
import scipy.ndimage as ndimage
from skimage.transform import hough_circle, hough_circle_peaks
from sunpy.map import GenericMap
import eclipse.meta as m
__all__ = ['find_sun_center_and_radius', 'eclipse_image_to_map']
def find_sun_center_and_radius(im):
"""
Given an image of the eclipsed Sun find the center and radius of the
image.
Parameters
----------
im : `numpy.ndarray`
The image.
Returns
-------
im_cx : `astropy.units.Quantity`
The x coordinate of the centre of the disk.
im_cy : `astropy.units.Quantity`
The y coordinate of the centre of the disk.
im_radius : `astropy.units.Quantity`
The radius of the disk.
"""
blur_im = ndimage.gaussian_filter(im, 8)
mask = blur_im > blur_im.mean() * 3
# the following code limits the region to search for the circle of the Sun
label_im, nb_labels = ndimage.label(mask)
slice_x, slice_y = ndimage.find_objects(label_im == 1)[0]
roi = blur_im[slice_x, slice_y]
# take the derivative of the image to find the edges of the Sun
sx = ndimage.sobel(roi, axis=0, mode='constant')
sy = ndimage.sobel(roi, axis=1, mode='constant')
sob = np.hypot(sx, sy)
hough_radii = np.arange(np.floor(np.mean(sob.shape) / 4),
np.ceil(np.mean(sob.shape) / 2), 10)
hough_res = hough_circle(sob > (sob.mean() * 5), hough_radii)
# Select the most prominent circle
accums, cy, cx, radius = hough_circle_peaks(hough_res, hough_radii,
total_num_peaks=1)
im_cx = (cx + slice_x.start) * u.pix
im_cy = (cy + slice_y.start) * u.pix
im_radius = radius * u.pix
return im_cx, im_cy, im_radius
def eclipse_image_to_map(filename):
"""
Given the filename to a photo, convert it to a `sunpy.map.GenericMap` object.
Parameters
----------
filename : `str`
The filename of the image.
Returns
-------
sunpymap : `sunpy.map.GenericMap`
A SunPy map with valid metadata for the image.
"""
# load the image data
im_rgb = np.flipud(matplotlib.image.imread(filename))
# remove the color information
im = np.average(im_rgb, axis=2)
# find the sun center and radius
im_cx, im_cy, im_radius = find_sun_center_and_radius(im)
tags = exifread.process_file(open(filename, 'rb'))
time = m.get_image_time(tags)
###############################################################################
# With the time and the radius of the solar disk we can calculate the plate
# scale.
plate_scale = m.get_plate_scale(time, im_radius)
###############################################################################
# We can now build a WCS object and a meta dictionary. We then append a few
# more meta tags to the meta dictionary.
wcs = m.build_wcs(im_cx, im_cy, plate_scale)
meta = m.build_meta(wcs, tags)
return GenericMap(data=im, header=meta)
|
1637543
|
import sys, boto3, json
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
from pyspark.sql.functions import *
def load_state_information():
"""
reads state information
:return:
"""
s3 = boto3.resource('s3', region_name=args['region'])
state_information_file = s3.Object(args['temp_workflow_bucket'], 'glue_workflow_distinct_dates')
return json.load(state_information_file.get()['Body'])
def aggregate_and_write_data_to_s3(bucket_path, push_down_predicate=""):
"""
If provided, takes a push down predicate to select exactly the data that are needed to be aggregated.
Otherwise the whole data set gets aggregated
:param bucket_path:
:param push_down_predicate:
"""
meter_data_to_aggregate = glueContext.create_dynamic_frame.from_catalog(database=args['db_name'], \
table_name="daily", \
transformation_ctx="meter_data_to_aggregate", \
push_down_predicate=push_down_predicate)
daily_aggregated_interval_reads = meter_data_to_aggregate.toDF() \
.groupby('meter_id', 'date_str') \
.agg(sum("reading_value").alias("aggregated_consumption"))
daily_aggregated_interval_reads \
.repartition("date_str") \
.write \
.mode("overwrite") \
.option("compression", "snappy") \
.partitionBy("date_str") \
.parquet(bucket_path)
## @params: [JOB_NAME, db_name, business_zone_bucket, temp_workflow_bucket]
args = getResolvedOptions(sys.argv, ['JOB_NAME', 'db_name', 'business_zone_bucket', 'temp_workflow_bucket', 'region'])
sc = SparkContext()
sc._jsc.hadoopConfiguration().set("fs.s3a.endpoint", "s3."+args['region']+".amazonaws.com.cn")
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
# read date information to know which data should be aggregated or re-aggregated
state_information = load_state_information()
business_zone_bucket_path = "s3a://{}/aggregated/daily".format(args['business_zone_bucket'])
if state_information["first_run"]:
aggregate_and_write_data_to_s3(business_zone_bucket_path)
else:
dates_to_process = state_information["dates"]
if dates_to_process:
for date in dates_to_process:
aggregate_and_write_data_to_s3(business_zone_bucket_path, "(reading_type == 'INT' and date_str == '{}')".format(date))
job.commit()
|
1637555
|
from plex.objects.core.base import Descriptor, Property
class Director(Descriptor):
id = Property(type=int)
tag = Property
@classmethod
def from_node(cls, client, node):
return cls.construct(client, cls.helpers.find(node, 'Director'), child=True)
|
1637560
|
import torch
import Corr2D_ext
def int_2_tensor(intList):
return torch.tensor(intList, dtype=torch.int, requires_grad=False)
def tensor_2_int(t):
assert len(t.size()) == 1
assert t.size()[0] == 5
assert t.dtype == torch.int
return t.tolist()
class Corr2DF(torch.autograd.Function):
@staticmethod
def forward(ctx, x0, x1, maxDisplacement, \
padding=1, kernelSize=3, strideK=1, strideD=1):
ctx.maxDisplacement = maxDisplacement
ctx.padding = padding
ctx.kernelSize = kernelSize
ctx.strideK = strideK
ctx.strideD = strideD
out = Corr2D_ext.forward(x0, x1, padding, kernelSize, maxDisplacement, strideK, strideD)
ctx.save_for_backward(x0, x1)
return out[0]
@staticmethod
def backward(ctx, grad):
x0, x1 = ctx.saved_tensors
output = Corr2D_ext.backward( grad, x0, x1,
ctx.padding, ctx.kernelSize, ctx.maxDisplacement, ctx.strideK, ctx.strideD )
return output[0], output[1], None, None, None, None, None
class Corr2DM(torch.nn.Module):
def __init__(self, maxDisplacement, padding=1, kernelSize=3, strideK=1, strideD=1):
super(Corr2DM, self).__init__()
assert maxDisplacement > 0
assert kernelSize > 0
assert kernelSize % 2 == 1
assert strideK > 0
assert strideD > 0
self.maxDisplacement = maxDisplacement
self.padding = padding
self.kernelSize = kernelSize
self.strideK = strideK
self.strideD = strideD
def forward(self, x0, x1):
return Corr2DF.apply( x0, x1, self.maxDisplacement, \
self.padding, self.kernelSize, self.strideK, self.strideD )
class Corr2DZNF(torch.autograd.Function):
@staticmethod
def forward(ctx, x0, x1, maxDisplacement, \
padding=1, kernelSize=3, strideK=1, strideD=1):
ctx.maxDisplacement = maxDisplacement
ctx.padding = padding
ctx.kernelSize = kernelSize
ctx.strideK = strideK
ctx.strideD = strideD
out = Corr2D_ext.forward_zn(x0, x1, padding, kernelSize, maxDisplacement, strideK, strideD)
ctx.save_for_backward(x0, x1, out[0], out[1], out[2])
return out[0]
@staticmethod
def backward(ctx, grad):
x0, x1, C, L0, L1 = ctx.saved_tensors
output = Corr2D_ext.backward_zn( grad, x0, x1, C, L0, L1,
ctx.padding, ctx.kernelSize, ctx.maxDisplacement, ctx.strideK, ctx.strideD )
return output[0], output[1], None, None, None, None, None
class Corr2DZNM(torch.nn.Module):
def __init__(self, maxDisplacement, padding=1, kernelSize=3, strideK=1, strideD=1):
super(Corr2DZNM, self).__init__()
assert maxDisplacement > 0
assert kernelSize > 0
assert kernelSize % 2 == 1
assert strideK > 0
assert strideD > 0
self.maxDisplacement = maxDisplacement
self.padding = padding
self.kernelSize = kernelSize
self.strideK = strideK
self.strideD = strideD
def forward(self, x0, x1):
return Corr2DZNF.apply( x0, x1, self.maxDisplacement, \
self.padding, self.kernelSize, self.strideK, self.strideD )
|
1637576
|
import typing
from anchorpy.error import ProgramError
class TileOutOfBounds(ProgramError):
def __init__(self) -> None:
super().__init__(6000, None)
code = 6000
name = "TileOutOfBounds"
msg = None
class TileAlreadySet(ProgramError):
def __init__(self) -> None:
super().__init__(6001, None)
code = 6001
name = "TileAlreadySet"
msg = None
class GameAlreadyOver(ProgramError):
def __init__(self) -> None:
super().__init__(6002, None)
code = 6002
name = "GameAlreadyOver"
msg = None
class NotPlayersTurn(ProgramError):
def __init__(self) -> None:
super().__init__(6003, None)
code = 6003
name = "NotPlayersTurn"
msg = None
class GameAlreadyStarted(ProgramError):
def __init__(self) -> None:
super().__init__(6004, None)
code = 6004
name = "GameAlreadyStarted"
msg = None
CustomError = typing.Union[
TileOutOfBounds, TileAlreadySet, GameAlreadyOver, NotPlayersTurn, GameAlreadyStarted
]
CUSTOM_ERROR_MAP: dict[int, CustomError] = {
6000: TileOutOfBounds(),
6001: TileAlreadySet(),
6002: GameAlreadyOver(),
6003: NotPlayersTurn(),
6004: GameAlreadyStarted(),
}
def from_code(code: int) -> typing.Optional[CustomError]:
maybe_err = CUSTOM_ERROR_MAP.get(code)
if maybe_err is None:
return None
return maybe_err
|
1637580
|
import zipfile
from matplotlib.ticker import FormatStrFormatter
import matplotlib.ticker as tick
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import logging
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator,MaxNLocator
from matplotlib.offsetbox import TextArea, VPacker, AnnotationBbox,HPacker,PackerBase,PaddedBox
logging.basicConfig(level = logging.INFO)
logging = logging.getLogger("ActionEventPlotter")
#os.chdir("/opt/biogears/core/build/runtime/")
class ActionEventPlotter():
def __init__(self):
self.events = []
self.data = []
self.timeData = []
self.actions = []
def plot(self, job):
"""
Plot
_________________
This function takes in job object as an argument and
tries to fill datapath,logpath with their respective
path which needed to be searched for first search takes
place in Runtime directory for csv files. Then after that
in Scenario folder in Runtime directory then finally
if not found search is done in Baseline directory
"""
if job.dataPath == None :
job.dataPath = os.path.join("Scenarios",job.verificationDirectory,"baselines")
if job.logPath == None:
job.logPath = os.path.join("Scenarios",job.verificationDirectory,"baselines")
if job.scenarioPath == None:
job.scenarioPath = os.path.join("Scenarios",job.verificationDirectory)
if job.dataFile == None:
job.dataFile=job.name+"Results.csv"
if job.logFile == None:
job.logFile = job.name + "Results.zip"
if job.scenarioFile == None:
job.scenarioFile = job.name + ".xml"
if job.outputFilename==None:
job.outputFilename=job.titleOverride+".jpg"
if len(job.outputFilename.split("."))==1:
job.outputFilename+=".jpg"
if job.imageWidth==None and job.imageHeight==None:
job.imageWidth=1600
job.imageHeight=800
if not os.path.exists(job.dataPath):
job.dataPath = os.path.join(job.basedir,job.dataPath)
if not os.path.exists(job.logPath):
job.logPath = os.path.join(job.basedir, job.logPath)
if not os.path.isfile(os.path.join(job.dataPath,job.dataFile)):
job.dataFile = job.name + "Results.zip"
if not os.path.isfile(os.path.join(job.dataPath,job.dataFile)):
job.dataPath = os.path.join(job.basedir,job.dataPath)
if not job.skipAllEvents:
self.events = self.getEventsFromLog(os.path.join(job.logPath, job.logFile),job)
if not job.skipAllActions:
self.actions = self.getActionsFromLog(os.path.join(job.logPath,job.logFile),job)
if len(self.events)>0 and len(self.actions)>0:
for i in self.events:
self.timeData.append(i["time"])
self.data.append("Event:"+i["text"])
for i in self.actions:
self.timeData.append(i["time"])
self.data.append("Actions:"+i["text"])
elif len(self.events)>0 and not len(self.actions)>0:
for i in self.events:
self.timeData.append(i["time"])
self.data.append("Event:"+i["text"])
elif not len(self.events)>0 and len(self.actions)>0:
for i in self.actions:
self.timeData.append(i["time"])
self.data.append("Actions:"+i["text"])
if not os.path.exists(os.path.dirname(job.outputDir)):
os.mkdir(os.path.dirname(job.outputDir))
if not job.fontSize:
job.fontSize=22
if job.log>=2:
logging.info("Name of Plot" +job.name)
logging.info("Input File: "+ os.path.join(job.dataPath,job.dataFile))
logging.info("Output File: "+ os.path.join(job.outputDir,job.outputFilename))
self.drawgraph(self.events,self.actions,job,os.path.join(job.dataPath,job.dataFile),
os.path.join(job.outputDir,job.outputFilename))
def getActionsFromLog(self,file_,job):
"""
getActionFromLog
_________________
This function is used to extract all Actions and it's
respective text from the log files inside datapath and
then log those appropriate to the command line
"""
fin=None
actions = []
flag=0
txt=""
try:
if file_.endswith(".zip"):
try:
zf = zipfile.ZipFile(file_,'r')
for i in zf.filelist:
if i.filename.endswith(".log"):
fin = zf.open(i.filename,'r')
break
# We expect results zips to only contain 1 text file
except IOError as e:
logging.error("ActionEventPlotter couldn't read the log file " + file_)
except IOError as e:
logging.error("Zip file not found " +file_)
if not fin:
return actions
for line in fin:
line=line.decode("utf-8")
if len(line)==0:
continue
if "[Action]" in line.split():
Action = {}
ActionText =line.split("[Action]",1)[1].strip()
ActionTimeIndex = ActionText.find("(s)")
if ActionTimeIndex == -1:
ActionTimeIndex = ActionText.find(",")
try:
Action["time"] = float(ActionText[0:ActionTimeIndex].strip())
except NumberFormatException as e:
logging.error("Couldn't correctly parse log file time to double")
Action["text"] = ActionText[ActionText.find(",") + 1:].strip()
flag=1
txt+=ActionText[ActionText.find(",") + 1:].strip()
elif flag==1 and line.startswith("\t"):
txt+=line
elif flag==1 and not line.startswith("\t"):
txt=txt.replace("\t","\n\t",1)
Action["text"]=txt
if job.logger==True and job.log>2:
logging.info("Adding Action:" + Action["text"])
actions.append(Action)
txt=""
flag=0
fin.close()
return actions
def getEventsFromLog(self, file_,job):
"""
getEventsFromLog
__________________
This function is used to extract respective event
text and time from the log file inside datapath
and then which can be used for plotting to the graph
"""
events = []
fin=None
try:
if file_.endswith(".zip"):
try:
zf = zipfile.ZipFile(file_,'r')
for i in zf.filelist:
if i.filename.endswith(".log"):
fin = zf.open(i.filename,'r')
break
except IOError as e:
logging.error("ActionEventPlotter couldn't read the log file " + file_)
except IOError as e:
logging.error("Zip File not found " +file_)
if not fin:
return events
for line in fin:
line=line.decode("utf-8")
if len(line)==0:
continue
if "[Event]" not in line.split():
continue
else:
event = {}
eventText =line.split("[Event]",1)[1].strip()
endTimeIndex = eventText.find("(s)")
if endTimeIndex == -1:
endTimeIndex = eventText.find(",")
try:
event["time"] = float(eventText[0:endTimeIndex].strip())
except NumberFormatException as e:
logging.error("Couldn't correctly parse log file time to double")
event["text"] = eventText[eventText.find(",") + 1:].strip()
if job.logger==True and job.log>2:
logging.info("Adding Event:" + event["text"])
events.append(event)
fin.close()
return events
def y_fmt(self,x, y):
"""
y_fmt
________
formatting the text to plot into the graph
"""
return '{:2.2e}'.replace("0","").format(x).replace('e', 'E').replace("+","").replace("0","")
def drawgraph(self,events,actions,job,input_zip,output_file):
"""
draw_graph
________________
For plotting the graph it calls the function which after
processing whether it is csv or zip file is used to plot
to the graph
"""
my_dpi=96
col=["red","yellow","green","blue","orange","lime","magenta","violet"
,"black","purple","0.1","0.2","0.75","0.8","0.9","pink"]
try:
if input_zip.endswith(".csv"):
df = pd.read_csv(input_zip,low_memory=False)
try:
self.plotting(events,actions,job,input_zip,output_file,df,my_dpi,col)
except IOError:
logging.error("File Not found at:"+input_zip)
except Exception as e:
logging.error("Exception occured when plotting header \"" + job.headers[0] + "\": " + str(e))
elif input_zip.endswith(".zip"):
zf=zipfile.ZipFile(input_zip)
for i in zf.filelist:
if i.filename.endswith(".csv"):
df = pd.read_csv(zf.open(i.filename),low_memory=False)
try:
self.plotting(events,actions,job,input_zip,output_file,df,my_dpi,col)
except IOError:
logging.error("File Not found at:"+input_zip)
except Exception as e:
logging.error("Exception occured when plotting header \"" + job.headers[0] + "\": " + str(e))
except IOError:
logging.error("Zip file Not found at :"+input_zip)
def plotting(self,events,actions,job,input_zip,output_file,df,my_dpi,col):
"""
plotting
________________
Main function which is a driver which uses the matplotlib plotting ability
to plot to the graph plotting is determined by the commands in config file
each command with parameters determine which if-else statement to execute
and then on the basis of those parameters executing blocks of if-else for
plotting
"""
X=df.iloc[:,0].values[::20]
Y=df.loc[:,job.headers[0]].values[::20]
df2 = None
Xexp = None
Yexp = None
plotExperimentalData = False
try:
if job.experimentalData is not None:
df2 = pd.read_csv(job.experimentalData)
Xexp = df2.iloc[:,0]
Yexp = df2.iloc[:,1]
plotExperimentalData = True
except Exception as e:
logging.info("Exception occured when opening Experimental Data: " + str(e))
if job.legendOnly:
if not os.path.exists(job.outputDir):
os.mkdir(job.outputDir)
colors =["red","yellow","green","blue","orange","lime","magenta",
"violet","black","purple","0.1","0.2","0.75","0.8","0.9","pink"]
f = lambda m,c: plt.plot([],[],marker=m, color=c, ls="none")[0]
handles = [f("_", colors[i]) for i in range(0,len(colors))]
labels = [i.replace("\t"," ") for i in self.data]
legend = plt.legend(handles, labels, loc=3, ncol=3, framealpha=1, frameon=False, fontsize=12)
plt.axis('off')
def export_legend(legend, filename=os.path.join(job.outputDir,job.outputFilename), expand=[-50,-50,50,50]):
fig = legend.figure
fig.canvas.draw()
bbox = legend.get_window_extent()
bbox = bbox.from_extents(*(bbox.extents + np.array(expand)))
bbox = bbox.transformed(fig.dpi_scale_trans.inverted())
fig.savefig(filename, dpi="figure", bbox_inches=bbox,pad_inches=0)
export_legend(legend)
if job.log>0:
logging.info("Creating Graph:"+job.outputFilename.split(".")[0])
plt.close("all")
else:
fig,ax = plt.subplots()
fig.set_size_inches(w=job.imageWidth/my_dpi+1,h=job.imageHeight/my_dpi+1)
if not os.path.exists(job.outputDir):
os.mkdir(job.outputDir)
if job.logAxis:
ax.set_yscale("log")
ax.yaxis.set_major_formatter(tick.FuncFormatter(self.y_fmt))
ax.yaxis.set_ticks_position("both")
ax.yaxis.set_tick_params(labelright=True)
plt.xlabel("Time(s)",fontsize=job.fontSize)
plt.ylabel(job.headers[0],fontsize=job.fontSize)
if job.titleOverride==None:
plt.title(job.headers[0]+"_vs_Time_Action_Event_Plot",fontsize=job.fontSize)
if job.log>0:
logging.info("Creating Graph:"+job.headers[0]+"_vs_Time_Action_Event_Plot")
elif job.titleOverride=="None":
if job.log>0:
logging.info("Creating Graph:"+job.outputFilename.split(".")[0])
else:
plt.title(job.titleOverride,fontsize=job.fontSize)
if job.log>0:
logging.info("Creating Graph:"+job.titleOverride)
plt.xlim(0,max(X))
plt.plot(X, Y)
if (plotExperimentalData):
plt.plot(Xexp, Yexp)
for i in range(0,len(self.timeData)):
plt.axvline(self.timeData[i],color=col[i])
if job.showGridLines:
plt.grid(b=True, which='major', color='r', linestyle='--')
if not job.hideAELegend and not job.removeAllLegends:
legendEntries = job.headers
if (plotExperimentalData):
legendEntries.append("Experimental Data")
plt.legend(legendEntries)
if "(" and ")" in job.outputFilename:
job.outputFilename=job.outputFilename.split("(")[0]+".jpg"
plt.savefig(os.path.join(job.outputDir,job.outputFilename),dpi=my_dpi)
plt.close("all")
else:
ax.get_yaxis().set_major_locator(MaxNLocator(nbins=10,min_n_ticks=8))
ax.get_xaxis().set_major_locator(MaxNLocator(nbins=15,min_n_ticks=10))
ax.yaxis.set_major_formatter(tick.FuncFormatter(self.y_fmt))
ax.yaxis.set_ticks_position("both")
ax.yaxis.set_tick_params(labelright=True)
plt.xlabel("Time(s)",fontsize=job.fontSize)
plt.ylabel(job.headers[0],fontsize=job.fontSize)
if job.titleOverride==None:
plt.title(job.headers[0]+"_vs_Time_Action_Event_Plot",fontsize=job.fontSize)
if job.log>0:
logging.info("Creating Graph:"+job.headers[0]+"_vs_Time_Action_Event_Plot")
elif job.titleOverride=="None":
if job.log>0:
logging.info("Creating Graph:"+job.outputFilename.split(".")[0])
else:
if job.log>0:
logging.info("Creating Graph:"+job.titleOverride)
plt.title(job.titleOverride,fontsize=job.fontSize)
plt.xlim(0,max(X))
plt.plot(X,Y)
if (plotExperimentalData):
plt.plot(Xexp, Yexp)
for i in range(0,min(len(self.timeData), len(col))):
plt.axvline(self.timeData[i],color=col[i])
if job.showGridLines:
plt.grid(b=True, which='major', color='r', linestyle='--')
if not job.hideAELegend and not job.removeAllLegends:
legendEntries = job.headers
if (plotExperimentalData):
legendEntries.append("Experimental Data")
plt.legend(legendEntries)
if "(" and ")" in job.outputFilename:
job.outputFilename=job.outputFilename.split("(")[0]+".jpg"
plt.savefig(os.path.join(job.outputDir,job.outputFilename),dpi=my_dpi)
plt.close("all")
|
1637609
|
from yowsup.layers.protocol_contacts.protocolentities import AddContactNotificationProtocolEntity
from yowsup.structs.protocolentity import ProtocolEntityTest
import time
import unittest
entity = AddContactNotificationProtocolEntity("1234", "<EMAIL>", int(time.time()), "notify", False,
"<EMAIL>")
class AddContactNotificationProtocolEntityTest(ProtocolEntityTest, unittest.TestCase):
def setUp(self):
super(AddContactNotificationProtocolEntityTest, self).setUp()
self.ProtocolEntity = AddContactNotificationProtocolEntity
self.node = entity.toProtocolTreeNode()
|
1637621
|
from .metropolis import Metropolis
from .hamiltonian import Hamiltonian
from .NUTS import NUTS
from .chain import Chain
from .slice import Slice
from .base import Sampler
|
1637634
|
import sys
sys.path.append('../')
import constants as cnst
import os
import torch
import tqdm
import numpy as np
import constants
SHAPE = [0, 1, 2]
EXP = [50, 51, 52]
POSE = [150, 151, 152, 153, 154, 155]
def centre_using_nearest(flame_seq, flame_dataset, one_translation_for_whole_seq=True):
shape_weigth = 0
pose_weight = 0.7
if one_translation_for_whole_seq:
dist = np.linalg.norm(flame_dataset[:, 150:156] - flame_seq[0, 150:156], axis=-1)
min_arg = np.argmin(dist)
flame_seq[:, 156:] = flame_dataset[min_arg, 156:]
else:
for i in range(len(flame_seq)):
shape_dist = np.linalg.norm(flame_dataset[:, SHAPE] - flame_seq[i, SHAPE], axis=-1)
pose_dist = np.linalg.norm(flame_dataset[:, POSE] - flame_seq[i, POSE], axis=-1)
dist = shape_weigth*shape_dist + pose_weight*pose_dist
min_arg = np.argmin(dist)
flame_seq[i, 156:] = flame_dataset[min_arg, 156:]
return flame_seq
def position_to_given_location(deca_flame_decoder, flame_batch):
# import ipdb;
# ipdb.set_trace()
shape, expression, pose = (flame_batch[:, 0:100], flame_batch[:, 100:150], flame_batch[:, 150:156])
verts, _, _ = deca_flame_decoder(shape_params=shape, expression_params=expression, pose_params=pose)
for i in range(verts.shape[0]):
e_1_3d = verts[i, 4051, :]
e_2_3d = verts[i, 4597, :]
eye_3d_mat = torch.zeros(size=(3, 4)).to(flame_batch.device)
eye_3d_mat[1, 0] = eye_3d_mat[1, 1] = eye_3d_mat[2, 2] = eye_3d_mat[2, 3] = 1
eye_3d_mat[0, 0] = e_1_3d[0]
eye_3d_mat[0, 1] = e_2_3d[0]
eye_3d_mat[0, 2] = e_1_3d[1]
eye_3d_mat[0, 3] = e_2_3d[1]
normalized_image_desired_positions_x1_x2_y1_y2 = \
torch.tensor([-0.2419, 0.2441, 0.0501-0.1, 0.0509-0.1]).to(flame_batch.device)
s, s_b_x, s_b_y = torch.matmul(normalized_image_desired_positions_x1_x2_y1_y2, torch.pinverse(eye_3d_mat))
b_x = s_b_x/s
b_y = s_b_y/s
s = -s
# import ipdb;
# ipdb.set_trace()
flame_batch[i, 156] = s
flame_batch[i, 157] = b_x
flame_batch[i, 158] = b_y
return flame_batch
def translate_to_center_eye(flame_decoder, flame_params, original_flame):
shape, expression, pose, translation = (flame_params[:, 0:100,], flame_params[:, 100:150], flame_params[:, 150:156],
flame_params[:, 156:159])
verts, _ = flame_decoder(shape_params=shape, expression_params=expression, pose_params=pose,
translation=translation*0)
if original_flame is not None:
shape_orig, expression_orig, pose_orig, translation_orig = (original_flame[:, 0:100,],
original_flame[:, 100:150],
original_flame[:, 150:156],
original_flame[:, 156:159])
verts_orig, _ = flame_decoder(shape_params=shape_orig, expression_params=expression_orig,
pose_params=pose_orig, translation=translation_orig)
desired_cntr_of_the_eyes = verts_orig[:, 3666, :]
else:
desired_cntr_of_the_eyes = torch.from_numpy(np.array([4.32830852e-02, -47.60086733e-03, 2.41298008e+00])
.astype('float32')).to(flame_params.device)
# desired_cntr_of_the_eyes = torch.from_numpy(np.array([2.2427477e-03, -1.8124590e-02, 2.5114515e+00])
# .astype('float32')).to(flame_params.device)
current_translation = verts[:, 3666, :]
required_translation = desired_cntr_of_the_eyes - current_translation
return torch.cat((shape, expression, pose, required_translation), dim=1)
class RegressorNNSkipPart(torch.nn.Module):
def __init__(self, neurons, regularization, num_layers_per_block, activation_type):
super().__init__()
layers = []
for layer_idx in range(num_layers_per_block):
layers.append(torch.nn.Linear(neurons, neurons, bias=True))
if regularization == 'dropout':
layers.append(torch.nn.Dropout(0.5))
elif regularization == 'batchnorm':
layers.append(torch.nn.BatchNorm1d(neurons))
elif regularization is None:
pass
if activation_type == 'relu':
layers.append(torch.nn.ReLU(True))
elif activation_type == 'lrelu':
layers.append(torch.nn.LeakyReLU(0.3))
self.forward_part = torch.nn.Sequential(*layers)
def forward(self, input):
return input + self.forward_part(input)
class EyeCenteringByRegression:
def __init__(self, eval_mode=False, make_cuda=False, num_skip_blks=2, intermediate_neurons=512,
regularization='batchnorm', num_layers_per_block=2, activation_type='relu'):
self.mean_input = torch.from_numpy(np.array([ 0.4671627 , -0.09504398, -0.12090819,
1.2735702 , 0.00253953, -0.02751609,
0.10822426, -0.01990774, 0.00626311,
0.08915882, 0.00973385, -0.00834262]).astype('float32'))
self.std_input = torch.from_numpy(np.array([0.53506327, 0.52815205, 0.52134556,
1.1373067 , 0.4865559 , 0.21345851,
0.11624492, 0.27343082, 0.02041259,
0.05613742, 0.01074448, 0.03475167]).astype('float32'))
self.mean_output= torch.from_numpy(np.array([8.0179777e+00, 3.4307071e-03, -1.3698899e-04]).astype('float32'))
self.std_output = torch.from_numpy(np.array([0.38766932, 0.03351782, 0.01525018]).astype('float32'))
self.random_model = True
self.model = torch.nn.Sequential(
torch.nn.Linear(len(SHAPE + EXP + POSE), intermediate_neurons, bias=True),
torch.nn.BatchNorm1d(intermediate_neurons),
torch.nn.ReLU(True),
*[RegressorNNSkipPart(intermediate_neurons, regularization=regularization,
num_layers_per_block=num_layers_per_block, activation_type=activation_type)
for skip_blk_id in range(num_skip_blks)],
torch.nn.Linear(intermediate_neurons, 3, bias=True),
)
if make_cuda:
self.device = 'cuda'
self.model = self.model.cuda()
else:
self.device = 'cpu'
self.eval_mode = eval_mode
if eval_mode:
self.model.eval()
self.mdl_optim = torch.optim.Adam(self.model.parameters(), lr=1e-4, betas=(0.0, 0.99))
self.lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.mdl_optim, 'min', factor=0.5, patience=5, verbose=True, threshold=0.0001, min_lr=1e-6)
def load_model(self, checkpoint_path):
self.random_model = False
self.model.load_state_dict(torch.load(checkpoint_path))
def save_model(self, checkpoint_path):
os.makedirs(os.path.dirname(checkpoint_path), exist_ok=True)
torch.save(self.model.state_dict(), checkpoint_path)
def get_camera(self, pose_shape_exp):
if self.random_model:
raise ValueError('Using model inference without training or loading it')
with torch.no_grad():
self.mean_input = self.mean_input.to(pose_shape_exp.device)
self.std_input = self.std_input.to(pose_shape_exp.device)
self.std_output = self.std_output.to(pose_shape_exp.device)
self.mean_output = self.mean_output.to(pose_shape_exp.device)
t = (self.model((pose_shape_exp - self.mean_input) / self.std_input) * self.std_output) + self.mean_output
return t
def substitute_flame_batch_with_regressed_camera(self, flame_batch):
t_cam = self.get_camera(flame_batch[:, SHAPE+EXP+POSE])
flame_batch[:, constants.get_idx_list('TRANS')] = t_cam
return flame_batch
def fit_to_data(self, trn_dataloader, epochs=20, verbose=True, training_criterion=torch.nn.MSELoss(),
validation_loader=None, save_best_mdl_path=None):
assert not self.eval_mode
validation_criterion = torch.nn.MSELoss()
self.random_model = False
trn_dataloader_itr = iter(trn_dataloader)
validation_loss = 0
best_validation_loss = np.inf
for epoch_id in range(epochs):
moving_avg_trn_loss = 0
self.model.train()
if verbose:
pbar = tqdm.tqdm(range(len(trn_dataloader)))
else:
pbar = range(len(trn_dataloader))
for batch_id in pbar:
try:
x_train, y_train = next(trn_dataloader_itr)
except (OSError, StopIteration):
trn_dataloader_itr = iter(trn_dataloader)
x_train, y_train = next(trn_dataloader_itr)
x_train = (x_train - self.mean_input)/self.std_input
y_train = (y_train - self.mean_output)/self.std_output
x_train = x_train.to(self.device)
y_train = y_train.to(self.device)
# import ipdb;
# ipdb.set_trace()
y_hat_train = self.model(x_train)
train_loss = training_criterion(y_hat_train, y_train)
train_loss.backward()
self.mdl_optim.step()
moving_avg_trn_loss += train_loss.item()
state_msg = f'[{epoch_id}/{epochs}] Train_loss: {moving_avg_trn_loss/(batch_id+1):.3f} ' \
f'Valid_loss: {validation_loss:0.3f}'
if verbose:
pbar.set_description(state_msg)
# import ipdb; ipdb.set_trace()
if validation_loader is not None:
validation_loss = 0
num_batches = 0
validation_loader_itr = iter(validation_loader)
# import ipdb; ipdb.set_trace()
self.model.eval()
with torch.no_grad():
for x_valid, y_valid in validation_loader_itr:
x_valid = (x_valid - self.mean_input) / self.std_input
y_valid = (y_valid - self.mean_output) / self.std_output
x_valid = x_valid.to(self.device)
y_valid = y_valid.to(self.device)
num_batches += 1
y_hat_valid = self.model(x_valid)
valid_loss = validation_criterion(y_hat_valid, y_valid)
validation_loss += valid_loss
validation_loss /= num_batches
self.lr_scheduler.step(validation_loss)
if save_best_mdl_path is not None and validation_loader is not None:
if best_validation_loss > validation_loss:
best_validation_loss = validation_loss
self.save_model(save_best_mdl_path)
print(f'New best model saved to {save_best_mdl_path}')
def get_eye_center_camera(self, current_shape_exp_pose):
return self.model(current_shape_exp_pose)
if __name__ == '__main__':
''' Regressor training code'''
from torch.utils.data import Dataset, DataLoader
class FlmDatLoader(Dataset):
def __init__(self, keys, param_dict):
self.param_dict = param_dict
self.keys = keys
self.list_bad_images = np.load(cnst.list_deca_failed_iamges)['bad_images']
def __getitem__(self, index):
curren_file = str(index).zfill(5) + '.npy'
while curren_file in self.list_bad_images:
index = np.random.randint(0, len(self.keys))
curren_file = str(index).zfill(5) + '.npy'
shape_exp_pose = np.concatenate((self.param_dict[keys[index]]['shape'][:3],
self.param_dict[keys[index]]['exp'][:3],
self.param_dict[keys[index]]['pose']), axis=-1)
t_cam = self.param_dict[keys[index]]['cam']
return shape_exp_pose, t_cam
def __len__(self):
return len(self.keys)
params_dict = np.load(cnst.all_flame_params_file, allow_pickle=True).item()
keys = []
for key in params_dict.keys():
keys.append(key)
keys = np.array(keys)
validation_fraction = 0.3
# import ipdb; ipdb.set_trace()
train_keys = keys[:int(len(keys) * (1 - validation_fraction))]
validation_keys = keys[int(len(keys) * (1 - validation_fraction)):]
train_set = FlmDatLoader(train_keys, params_dict)
train_loader = DataLoader(train_set, shuffle=True, batch_size=64, num_workers=0, drop_last=True,
pin_memory=True)
valid_set = FlmDatLoader(validation_keys, params_dict)
validation_loader = DataLoader(valid_set, shuffle=True, batch_size=128, num_workers=0, drop_last=True,
pin_memory=True)
# eye_cntr_reg = EyeCenteringByRegression(num_skip_blks=2, intermediate_neurons=512, regularization='batchnorm',
# num_layers_per_block=2, activation_type='relu')
eye_cntr_reg = EyeCenteringByRegression(make_cuda=True, num_skip_blks=2, intermediate_neurons=825,
regularization='batchnorm', num_layers_per_block=1,
activation_type='relu')
try:
eye_cntr_reg.fit_to_data(trn_dataloader=train_loader, validation_loader=validation_loader, epochs=200,
save_best_mdl_path='../checkpoint/eye_centering/cntr_flm_param_to_cam.mdl',
training_criterion=torch.nn.MSELoss())
finally:
eye_cntr_reg.save_model('../checkpoint/eye_centering/cntr_eye_flm_param_to_cam_last.mdl')
print('..................Model saved .................')
|
1637644
|
import numpy as np
from sklearn.metrics import mean_squared_error, accuracy_score
class BaseModel(object):
"""
Base model to run the test
"""
def __init__(self):
self.max_depth = 6
self.learning_rate = 1
self.min_split_loss = 1
self.min_weight = 1
self.L1_reg = 1
self.L2_reg = 1
self.num_rounds = 40
self.max_bin = 255
self.use_gpu = True
self.params = {}
self.model = None # self.model is different with different libraries
def _config_model(self, data):
"""
To config the model with different params
"""
pass
def _train_model(self, data):
"""
To train model
:param data:
:return:
"""
pass
def _predict(self, data):
pass
def eval(self, data, pred):
"""
To eval the predict results with specified metric
:param data:
:param pred:
:return:
"""
if data.metric == "RMSE":
with open('pred', 'w') as f:
for x in pred:
f.write(str(x) + '\n')
return np.sqrt(mean_squared_error(data.y_test, pred))
elif data.metric == "Accuracy":
# Threshold prediction if binary classification
if data.task == "Classification":
pred = pred > 0.5
elif data.task == "Multiclass classification":
if pred.ndim > 1:
pred = np.argmax(pred, axis=1)
return accuracy_score(data.y_test, pred)
else:
raise ValueError("Unknown metric: " + data.metric)
def run_model(self, data):
"""
To run model
:param data:
:return:
"""
self._config_model(data)
elapsed = self._train_model(data)
# metric = 0
metric = self._predict(data)
print("##### Elapsed time: %.5f #####" % (elapsed))
print("##### Predict %s: %.4f #####" % (data.metric, metric))
return elapsed, metric
def model_name(self):
pass
|
1637649
|
import asyncio
import gevent.selectors
__all__ = ["EventLoop"]
class EventLoop(asyncio.SelectorEventLoop):
"""
An asyncio event loop that uses gevent for scheduling and runs in a spawned
greenlet
"""
def __init__(self, selector=None):
super().__init__(selector or gevent.selectors.DefaultSelector())
def run_forever(self):
greenlet = gevent.spawn(super(EventLoop, self).run_forever)
greenlet.join()
|
1637675
|
import time
import analysis.event
import analysis.beamline
import analysis.background
import analysis.pixel_detector
import ipc
import random
import numpy
numpy.random.seed()
state = {
'Facility': 'dummy',
'squareImage' : True,
'Dummy': {
'Repetition Rate' : 10,
'Data Sources': {
'CCD': {
'data': lambda: numpy.random.rand(256,128),
'unit': 'ADU',
'type': 'photonPixelDetectors'
},
'CCD1': {
'data': lambda: numpy.random.rand(64,64),
'unit': 'ADU',
'type': 'photonPixelDetectors'
},
'tof': {
'data': lambda: numpy.random.rand(2,256),
'unit': 'mJ',
'type': 'ionTOFs'
},
'pulseEnergy1': {
'data': lambda: random.random(),
'unit': 'mJ',
'type': 'pulseEnergies'
}
}
}
}
def onEvent(evt):
analysis.event.printProcessingRate()
ipc.new_data("TOF", evt["ionTOFs"]["tof"].data)
if numpy.random.randint(100) == 0:
time.sleep(1)
|
1637719
|
from django.db import transaction
from denorm.db import base
import logging
logger = logging.getLogger('denorm-sqlite')
class RandomBigInt(base.RandomBigInt):
def sql(self):
return 'RANDOM()'
class TriggerNestedSelect(base.TriggerNestedSelect):
def sql(self):
columns = self.columns
table = self.table
where = ", ".join(["%s = %s" % (k, v) for k, v in self.kwargs.items()])
return 'SELECT DISTINCT %(columns)s FROM %(table)s WHERE %(where)s' % locals(), tuple()
class TriggerActionInsert(base.TriggerActionInsert):
def sql(self):
table = self.model._meta.db_table
columns = "(" + ", ".join(self.columns) + ")"
if isinstance(self.values, TriggerNestedSelect):
sql, params = self.values.sql()
values = "" + sql + ""
else:
values = "VALUES(" + ", ".join(self.values) + ")"
params = []
return 'INSERT OR REPLACE INTO %(table)s %(columns)s %(values)s' % locals(), tuple(params)
class TriggerActionUpdate(base.TriggerActionUpdate):
def sql(self):
table = self.model._meta.db_table
updates = ", ".join(["%s = %s" % (k, v) for k, v in zip(self.columns, self.values)])
if isinstance(self.where, tuple):
where, where_params = self.where
else:
where, where_params = self.where, []
return 'UPDATE %(table)s SET %(updates)s WHERE %(where)s' % locals(), where_params
class Trigger(base.Trigger):
def name(self):
name = base.Trigger.name(self)
if self.content_type_field:
name += "_%s" % self.content_type
return name
def sql(self):
qn = self.connection.ops.quote_name
name = self.name()
params = []
action_list = []
actions_added = set()
for a in self.actions:
sql, action_params = a.sql()
if sql:
if not sql.endswith(';'):
sql += ';'
action_params = tuple(action_params)
if (sql, action_params) not in actions_added:
actions_added.add((sql, action_params))
action_list.extend(sql.split('\n'))
params.extend(action_params)
actions = "\n ".join(action_list)
table = self.db_table
time = self.time.upper()
event = self.event.upper()
content_type = self.content_type
ct_field = self.content_type_field
when = []
if event == "UPDATE":
when.append("(" + "OR".join(["(OLD.%s IS NOT NEW.%s)" % (qn(f), qn(f)) for f, t in self.fields]) + ")")
if ct_field:
ct_field = qn(ct_field)
if event == "DELETE":
when.append("(OLD.%s == %s)" % (ct_field, content_type))
elif event == "INSERT":
when.append("(NEW.%s == %s)" % (ct_field, content_type))
elif event == "UPDATE":
when.append("((OLD.%(ctf)s == %(ct)s) OR (NEW.%(ctf)s == %(ct)s))" % {'ctf': ct_field, 'ct': content_type})
when = "AND".join(when)
if when:
when = "WHEN(%s)" % (when,)
return """
CREATE TRIGGER %(name)s
%(time)s %(event)s ON %(table)s
FOR EACH ROW %(when)s BEGIN
%(actions)s
END;
""" % locals(), tuple(params)
class TriggerSet(base.TriggerSet):
def drop_atomic(self):
qn = self.connection.ops.quote_name
cursor = self.cursor()
cursor.execute("SELECT name, tbl_name FROM sqlite_master WHERE type = 'trigger' AND name LIKE 'denorm_%%';")
for trigger_name, table_name in cursor.fetchall():
cursor.execute("DROP TRIGGER %s;" % (qn(trigger_name),))
def drop(self):
try:
with transaction.atomic():
self.drop_atomic()
except AttributeError:
self.drop_atomic()
transaction.commit_unless_managed(using=self.using)
def install_atomic(self):
cursor = self.cursor()
for name, trigger in self.triggers.items():
sql, args = trigger.sql()
cursor.execute(sql, args)
def install(self):
try:
with transaction.atomic():
self.install_atomic()
except AttributeError:
self.install_atomic()
transaction.commit_unless_managed(using=self.using)
|
1637721
|
import numpy as np
from sklearn.linear_model import LogisticRegression
from .base import TransformationBaseModel
class Kane(TransformationBaseModel):
"""The class which implements the Kane's approach.
+----------------+-----------------------------------------------------------------------------------+
| **Parameters** | | **model : object, optional (default=sklearn.linear_model.LogisticRegression)** |
| | | The classification model which will be used for predict uplift. |
| | | **use_weights : boolean, optional (default=False)** |
| | | Use or not weights? |
+----------------+-----------------------------------------------------------------------------------+
*******
Methods
*******
+-----------------------------------------------+----------------------------------------------------+
| :ref:`fit(self, X, y, t) <lai_fit>` | Build the model from the training set (X, y, t). |
+-----------------------------------------------+----------------------------------------------------+
| :ref:`predict(self, X, t=None) <lai_predict>` | Predict an uplift for X. |
+-----------------------------------------------+----------------------------------------------------+
"""
def __init__(self, model=LogisticRegression(n_jobs=-1), use_weights=False):
try:
model.__getattribute__('fit')
model.__getattribute__('predict')
except AttributeError:
raise ValueError('Model should contains two methods: fit and predict.')
self.model = model
self.use_weights = use_weights
def fit(self, X, y, t):
"""Build the model from the training set (X, y, t).
+------------------+---------------------------------------------------------------------------------+
| **Parameters** | | **X: numpy ndarray with shape = [n_samples, n_features]** |
| | | Matrix of features. |
| | | **y: numpy array with shape = [n_samples,]** |
| | | Array of target of feature. |
| | | **t: numpy array with shape = [n_samples,]** |
| | | Array of treatments. |
+------------------+---------------------------------------------------------------------------------+
| **Returns** | **self : object** |
+------------------+---------------------------------------------------------------------------------+
"""
y_encoded = self.__encode_data(y, t)
self.model.fit(X, y_encoded)
if self.use_weights:
self.__init_weights(t)
return self
def predict(self, X, t=None):
"""Predict an uplift for X.
+------------------+---------------------------------------------------------------------------------+
| **Parameters** | | **X: numpy ndarray with shape = [n_samples, n_features]** |
| | | Matrix of features. |
| | | **t: numpy array with shape = [n_samples,] or None** |
| | | Array of treatments. |
+------------------+---------------------------------------------------------------------------------+
| **Returns** | | **self : object** |
| | | The predicted values. |
+------------------+---------------------------------------------------------------------------------+
"""
p_tr = self.model.predict_proba(X)[:, 0]
p_cn = self.model.predict_proba(X)[:, 1]
p_tn = self.model.predict_proba(X)[:, 2]
p_cr = self.model.predict_proba(X)[:, 3]
if self.use_weights:
return (p_tr / self.treatment_count + p_cn / self.control_count) - \
(p_tn / self.treatment_count + p_cr / self.control_count)
else:
return (p_tr + p_cn) - (p_tn + p_cr)
def __encode_data(self, y, t):
y_values = []
for i in range(y.shape[0]):
if self.is_tr(y[i], t[i]):
y_values.append(0)
elif self.is_cn(y[i], t[i]):
y_values.append(1)
elif self.is_tn(y[i], t[i]):
y_values.append(2)
elif self.is_cr(y[i], t[i]):
y_values.append(3)
return np.array(y_values)
def __init_weights(self, t):
control_count, treatment_count = 0, 0
for el in t:
if el == 0.0:
control_count += 1
else:
treatment_count += 1
self.control_count = control_count
self.treatment_count = treatment_count
|
1637831
|
from pathlib import Path
from numpy import array
from manim import *
class DottedLine(Line):
"""A dotted :class:`Line`.
Parameters
----------
args : Any
Arguments to be passed to :class:`Line`
dot_spacing : Optional[:class:`float`]
Minimal spacing of the dots. The spacing is scaled up to fit the start and end of the line.
dot_kwargs : Any
Arguments to be passed to ::class::`Dot`
kwargs : Any
Additional arguments to be passed to :class:`Line`
Examples
--------
.. manim:: DottedLineExample
:save_last_frame:
class DottedLineExample(Scene):
def construct(self):
# default dotted line
dotted_1 = DottedLine(LEFT, RIGHT))
# reduced spacing
dotted_2 = DottedLine(LEFT, RIGHT, dot_spacing=.3).shift(.5*DOWN))
# smaller and colored dots
dotted_3 = DottedLine(LEFT, RIGHT, dot_kwargs=dict(radius=.04, color=YELLOW)).shift(DOWN))
self.add(dotted_1, dotted_2, dotted_3)
"""
def __init__(
self,
*args,
dot_spacing=0.1,
dot_kwargs={},
**kwargs
):
Line.__init__(self, *args, **kwargs)
n_dots = int(self.get_length() / dot_spacing) + 1
dot_spacing = self.get_length() / (n_dots - 1)
unit_vector = self.get_unit_vector()
start = self.start
self.dot_points = [start + unit_vector * dot_spacing * x for x in range(n_dots)]
self.dots = [Dot(point, **dot_kwargs) for point in self.dot_points]
self.clear_points()
self.add(*self.dots)
self.get_start = lambda: self.dot_points[0]
self.get_end = lambda: self.dot_points[-1]
def get_first_handle(self):
return self.dot_points[-1]
def get_last_handle(self):
return self.dot_points[-2]
class VectorAddition(Scene):
def construct(self):
VECT1 = np.array([3, 2, 0])
VECT2 = np.array([2, -1, 0])
VECT1_COLOR = "#b9b28b"
VECT2_COLOR = "#b98b99"
VECT3_COLOR = "#8ba7b9"
vect1 = Line(start=ORIGIN, end=VECT1, stroke_color=VECT1_COLOR).add_tip()
vect1_name = MathTex("\\vec{a}").next_to(vect1.get_center(), DOWN + RIGHT * 2, buff=0.1).set_color(VECT1_COLOR)
vect2 = Line(start=VECT1, end=VECT1 + VECT2, stroke_color=VECT2_COLOR).add_tip()
vect2_name = MathTex("\\vec{b}").next_to(vect2.get_center(), UP * 2 + RIGHT, buff=0.1).set_color(VECT2_COLOR)
vect2_negative = DashedLine(start=VECT1, end=VECT1 - VECT2, stroke_color=VECT2_COLOR).add_tip()
vect2_negative_name = MathTex("-\\vec{b}").next_to(vect2_negative.get_center(), UP * 2 + RIGHT, buff=0.1).set_color(VECT2_COLOR)
vect3 = Line(start=ORIGIN, end=VECT1 - VECT2, stroke_color=VECT3_COLOR, stroke_width=8).add_tip()
vect3_name = MathTex("\\vec{a} - \\vec{b}").next_to(vect3.get_center(), LEFT * 2, buff=0.1).set_color(VECT3_COLOR)
self.camera.frame_center = np.array([2.5, 1.5, 0])
self.play(GrowFromPoint(vect1, point=vect1.get_start()), Write(vect1_name), run_time=2)
self.wait()
self.play(GrowFromPoint(vect2, point=vect2.get_start()), Write(vect2_name), run_time=2)
self.wait()
self.play(GrowFromPoint(vect2_negative, point=vect2_negative.get_start()), Write(vect2_negative_name), run_time=2)
self.wait()
self.play(LaggedStart(GrowFromPoint(vect3, point=vect3.get_start())), Write(vect3_name), run_time=3, lag_ratio=1)
self.wait(4)
if __name__ == '__main__':
# Generate animated gif.
config.background_color = WHITE
config.pixel_height = 300
config.pixel_width = 600
config.frame_width = 8
config.frame_height = 10
config.output_file = Path(__file__).resolve().parent.parent.parent / Path('notes/_media/vector-subtract-example')
config.format = 'gif'
scene = VectorAddition()
scene.render()
# Generate cover png.
config.save_last_frame = True
config.output_file = Path(__file__).resolve().parent.parent.parent / Path('notes/_media/vector-subtract-cover')
scene = VectorAddition()
scene.render()
|
1637832
|
from tensor2struct.models import decoder, batched_decoder
from tensor2struct.utils import registry, vocab
class CogsPreproc(decoder.DecoderPreproc):
def add_item(self, item, section, validation_info):
actions = item.code.split()
if section == "train":
for action in actions:
self.vocab_builder.add_word(action)
self.items[section].append({"actions": [vocab.BOS] + actions + [vocab.EOS]})
@registry.register("decoder", "cogs_lstm_dec")
class CogsDecoder(batched_decoder.Decoder):
batched = True
Preproc = CogsPreproc
@registry.register("decoder", "cogs_transformer_dec")
class CogsTransformerDeccoder(batched_decoder.TransformerDecoder):
batched = True
Preproc = CogsPreproc
|
1637852
|
import torch
import torch.nn as nn
from torch.nn import init
import functools
from torch.optim import lr_scheduler
from collections import OrderedDict
import torch.nn.functional as F
def get_scheduler(optimizer, opt):
if opt.lr_policy == 'lambda':
def lambda_rule(epoch):
lr_l = 1.0 - max(0, epoch + 1 + opt.epoch_count - opt.niter) / float(opt.niter_decay + 1)
return lr_l
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule)
elif opt.lr_policy == 'step':
scheduler = lr_scheduler.StepLR(optimizer, step_size=opt.lr_decay_iters, gamma=0.1)
elif opt.lr_policy == 'plateau':
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.2, threshold=0.01, patience=5)
else:
return NotImplementedError('learning rate policy [%s] is not implemented', opt.lr_policy)
return scheduler
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
def init_net(net, init_type='normal', init_gain=0.02, gpu_ids=[]):
if len(gpu_ids) > 0:
# print("gpu_ids,", gpu_ids)
assert(torch.cuda.is_available())
net.to(gpu_ids[0])
net = torch.nn.DataParallel(net, gpu_ids)
init_weights(net, init_type, gain=init_gain)
return net
##############################################################################
# Classes
##############################################################################
class GANLoss(nn.Module):
def __init__(self, gan_type='wgan-gp', target_real_label=1.0, target_fake_label=0.0):
super(GANLoss, self).__init__()
self.register_buffer('real_label', torch.tensor(target_real_label))
self.register_buffer('fake_label', torch.tensor(target_fake_label))
self.gan_type = gan_type
if self.gan_type == 'wgan-gp':
self.loss = lambda x, y: -torch.mean(x) if y else torch.mean(x)
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'gan':
self.loss = nn.BCELoss()
else:
raise NotImplementedError('GAN loss type [%s] is not found' % gan_type)
def get_target_tensor(self, input, target_is_real):
if target_is_real:
target_tensor = self.real_label
else:
target_tensor = self.fake_label
return target_tensor.expand_as(input)
def __call__(self, input, target_is_real):
if self.gan_type == 'wgan-gp':
target_tensor = target_is_real
else:
target_tensor = self.get_target_tensor(input, target_is_real)
return self.loss(input, target_tensor)
# Define a resnet block
class ResnetBlock(nn.Module):
def __init__(self, dim):
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim)
def build_conv_block(self, dim):
conv_block = []
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(dim, affine=True, track_running_stats=False),
nn.ReLU(True)]
#if use_dropout:
# conv_block += [nn.Dropout(0.5)]
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False),
nn.InstanceNorm2d(dim, affine=True, track_running_stats=False)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
class ResnetBlock2(nn.Module):
def __init__(self, dim):
super(ResnetBlock2, self).__init__()
self.conv_block = self.build_conv_block(dim)
def build_conv_block(self, dim):
conv_block = []
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False),
nn.ELU(True)]
#if use_dropout:
# conv_block += [nn.Dropout(0.5)]
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=1, bias=False)]
return nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
##############################################################################
# Basic network model
##############################################################################
def define_splitG(img_nc, aus_nc, ngf, signal_type='class', init_type='normal', init_gain=0.02, gpu_ids=[]):
net_img_au = Generator(img_nc, aus_nc, ngf, signal_type, repeat_num=6)
return init_net(net_img_au, init_type, init_gain, gpu_ids)
def define_splitD(input_nc, aus_nc, image_size, ndf, signal_type='class', init_type='normal', init_gain=0.02, gpu_ids=[], GAN_head=True):
net_dis_aus = Discriminator(input_nc, aus_nc, image_size, ndf, n_layers=6, GAN_head=GAN_head, control_signal_type = signal_type)
return init_net(net_dis_aus, init_type, init_gain, gpu_ids)
class Generator(nn.Module):
def __init__(self, img_nc, c_dim, conv_dim=64, signal_type=None, repeat_num=6, repeat_num2 = 4):
assert(repeat_num >= 0)
super(Generator, self).__init__()
self.aus_nc = c_dim
### 128 scale
## Spontaneous motion module
# encoder
layers = []
if signal_type in {'labelmap', 'edgemap'}:
layers.append(nn.Conv2d(3 + 2 * c_dim, conv_dim, kernel_size=7, stride=2, padding=3, bias=False))
else:
layers.append(nn.Conv2d(3 + c_dim, conv_dim, kernel_size=7, stride=2, padding=3, bias=False))
layers.append(nn.InstanceNorm2d(conv_dim, affine=True, track_running_stats=False))
layers.append(nn.ReLU(inplace=True))
curr_dim = conv_dim
layers.append(nn.Conv2d(curr_dim, curr_dim * 2, kernel_size=4, stride=2, padding=1, bias=False))
layers.append(nn.InstanceNorm2d(curr_dim * 2, affine=True, track_running_stats=False))
layers.append(nn.ReLU(inplace=True))
curr_dim = curr_dim * 2
for i in range(repeat_num):
layers.append(ResnetBlock(curr_dim))
self.encoding = nn.Sequential(*layers)
encoding_dim = curr_dim
# decoder for motion prediction
layers2 = []
curr_dim = encoding_dim
layers2.append(nn.ConvTranspose2d(curr_dim, curr_dim // 2, kernel_size=4, stride=2, padding=1, bias=False))
layers2.append(nn.InstanceNorm2d(curr_dim // 2, affine=True, track_running_stats=False))
layers2.append(nn.ReLU(inplace=True))
curr_dim = curr_dim // 2
layers2.append(nn.ConvTranspose2d(curr_dim, 2, kernel_size=6, stride=2, padding=2, bias=False))
layers2.append(nn.Tanh())
self.flow_pred = nn.Sequential(*layers2)
## Refinement module
layers4 = []
layers4.append(nn.Conv2d(3, conv_dim, kernel_size=7, stride=2, padding=3, bias=False))
layers4.append(nn.ELU(inplace=True))
curr_dim = conv_dim
for i in range(repeat_num2):
layers4.append(ResnetBlock2(curr_dim))
layers4.append(nn.ConvTranspose2d(curr_dim, 3, kernel_size=6, stride=2, padding=2, bias=False))
self.refine = nn.Sequential(*layers4)
self.signal_type = signal_type
if self.signal_type == 'class' or self.signal_type == 'labelmap':
self.CONST_LOGITS = torch.arange(c_dim).unsqueeze(0)
if self.signal_type == 'labelmap':
self.CONST_LOGITS = self.CONST_LOGITS.unsqueeze(2).unsqueeze(3)
def warp(self, x, flow, mode='bilinear', padding_mode='zeros', coff=0.1):
n, c, h, w = x.size()
yv, xv = torch.meshgrid([torch.arange(h), torch.arange(w)])
xv = xv.float() / (w - 1) * 2.0 - 1
yv = yv.float() / (h - 1) * 2.0 - 1
grid = torch.cat((xv.unsqueeze(-1), yv.unsqueeze(-1)), -1).unsqueeze(0).cuda()
grid_x = grid + 2 * flow * coff
warp_x = F.grid_sample(x, grid_x, mode=mode, padding_mode=padding_mode)
return warp_x
def forward(self, img, c, interp_coef=1., coef=0.1):
if self.signal_type == 'class':
c = c.unsqueeze(1)
c = (c == self.CONST_LOGITS.expand(c.size(0), self.CONST_LOGITS.size(1)).cuda()).float()
elif self.signal_type == 'labelmap':
assert isinstance(c, list), print('c must be a list of two iterms')
cc = c[0].unsqueeze(1)
#print(c.size(), self.CONST_LOGITS.size())
logits = self.CONST_LOGITS.expand(cc.size(0), self.CONST_LOGITS.size(1), cc.size(2), cc.size(3)).cuda()
c_src = (c[0].unsqueeze(1) == logits).float()
c_tar = (c[1].unsqueeze(1) == logits).float()
c = torch.cat([c_src, c_tar], dim=1)
elif self.signal_type == 'edgemap':
assert isinstance(c, list), print('c must be a list of two iterms')
c = torch.cat([cc.unsqueeze(1) for cc in c], dim=1)
if self.signal_type in {'class', 'au'}:
c = c.unsqueeze(2).unsqueeze(3)
c = c.expand(c.size(0), c.size(1), img.size(2), img.size(3))
x_cond = torch.cat([img, c], dim=1)
feat = self.encoding(x_cond)
flow = self.flow_pred(feat) * interp_coef
flow = flow.permute(0, 2, 3, 1) # [n, 2, h, w] ==> [n, h, w, 2]
warp_x = self.warp(img, flow, coff=coef)
refine_x = self.refine(warp_x)
refine_warp_x = torch.clamp(refine_x, min=-1.0, max=1.0)
return refine_warp_x, warp_x, flow, x_cond
"""
class UnetGenerator(nn.Module):
def __init__(self, input_nc, output_nc, num_downs, ngf=64,
use_dropout=False):
super(UnetGenerator, self).__init__()
# construct unet structure
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=None, innermost=True)
for i in range(num_downs - 5):
unet_block = UnetSkipConnectionBlock(ngf * 8, ngf * 8, input_nc=None, submodule=unet_block, use_dropout=use_dropout)
unet_block = UnetSkipConnectionBlock(ngf * 4, ngf * 8, input_nc=None, submodule=unet_block)
unet_block = UnetSkipConnectionBlock(ngf * 2, ngf * 4, input_nc=None, submodule=unet_block)
unet_block = UnetSkipConnectionBlock(ngf, ngf * 2, input_nc=None, submodule=unet_block)
unet_block = UnetSkipConnectionBlock(output_nc, ngf, input_nc=input_nc, submodule=unet_block, outermost=True)
self.model = unet_block
def forward(self, input):
return self.model(input)
"""
class UnetSkipConnectionBlock(nn.Module):
def __init__(self, outer_nc, inner_nc, input_nc=None,
submodule=None, outermost=False, innermost=False):
super(UnetSkipConnectionBlock, self).__init__()
self.outermost = outermost
if input_nc is None:
input_nc = outer_nc
downconv = nn.Conv2d(input_nc, inner_nc, kernel_size=4,
stride=2, padding=1, bias=False)
downrelu = nn.LeakyReLU(0.2, True)
downnorm = nn.InstanceNorm2d(inner_nc, affine=True,)
uprelu = nn.ReLU(True)
upnorm = nn.InstanceNorm2d(outer_nc, affine=True,)
if outermost:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1)
down = [downconv]
up = [uprelu, upconv, nn.Tanh()]
model = down + [submodule] + up
elif innermost:
upconv = nn.ConvTranspose2d(inner_nc, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=False)
down = [downrelu, downconv]
up = [uprelu, upconv, upnorm]
model = down + up
else:
upconv = nn.ConvTranspose2d(inner_nc * 2, outer_nc,
kernel_size=4, stride=2,
padding=1, bias=False)
down = [downrelu, downconv, downnorm]
up = [uprelu, upconv, upnorm]
model = down + [submodule] + up
self.model = nn.Sequential(*model)
def forward(self, x):
if self.outermost:
return self.model(x)
else:
return torch.cat([x, self.model(x)], 1)
class Discriminator(nn.Module):
def __init__(self, input_nc, aus_nc, image_size=128, ndf=64, n_layers=6, GAN_head=True, control_signal_type='class'):
super(Discriminator, self).__init__()
kw = 4
padw = 1
self.GAN_head = GAN_head
if self.GAN_head:
self.dis_top = nn.Conv2d(ndf*4, 1, kernel_size=kw-1, stride=1, padding=padw, bias=False)
self.control_signal_type = control_signal_type
if control_signal_type in {'labelmap', 'edgemap'}:
self.downlayer0 = nn.Sequential(*[
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.01, True)
])
self.downlayer1 = nn.Sequential(*[
nn.Conv2d(ndf, ndf*2, kernel_size=kw, stride=2, padding=padw, bias=True),
nn.LeakyReLU(0.01, True)
])
self.downlayer2 = nn.Sequential(*[
nn.Conv2d(ndf*2, ndf*4, kernel_size=kw, stride=2, padding=padw, bias=True),
nn.LeakyReLU(0.01, True)
])
self.downlayer3 = nn.Sequential(*[
nn.Conv2d(ndf*4, ndf*4, kernel_size=kw, stride=2, padding=padw, bias=True),
nn.LeakyReLU(0.01, True)
])
self.downlayer4 = nn.Sequential(*[
nn.Conv2d(ndf*4, ndf*4, kernel_size=kw, stride=2, padding=padw, bias=True),
nn.LeakyReLU(0.01, True)
])
self.downlayer5 = nn.Sequential(*[
nn.Conv2d(ndf*4, ndf*4, kernel_size=kw, stride=2, padding=padw, bias=True),
nn.LeakyReLU(0.01, True)
])
self.uplayer0 = nn.Sequential(nn.ConvTranspose2d(ndf*4,\
ndf*4,kernel_size=4,stride=2,padding=1,bias=False),
nn.LeakyReLU(0.01, True))
self.uplayer1 = nn.Sequential(nn.ConvTranspose2d(ndf*8,\
ndf*4,kernel_size=4,stride=2,padding=1,bias=False),
nn.LeakyReLU(0.01, True))
self.uplayer2 = nn.Sequential(nn.ConvTranspose2d(ndf*8,\
ndf*4,kernel_size=4,stride=2,padding=1,bias=False),
nn.LeakyReLU(0.01, True))
self.uplayer3 = nn.Sequential(nn.ConvTranspose2d(ndf*8,\
ndf*4,kernel_size=4,stride=2,padding=1,bias=False),
nn.LeakyReLU(0.01, True))
self.uplayer4 = nn.Sequential(nn.ConvTranspose2d(ndf*6,\
ndf*3,kernel_size=4,stride=2,padding=1,bias=False),
nn.LeakyReLU(0.01, True))
self.aus_top = nn.ConvTranspose2d(ndf*4, aus_nc, kernel_size=4, stride=2,padding=1, bias=False)
else:
use_bias = True
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.01, True)
]
cur_dim = ndf
for n in range(1, n_layers):
sequence += [nn.Conv2d(cur_dim, min(2 * cur_dim, 256),
kernel_size=kw, stride=2, padding=padw, bias=use_bias),
nn.LeakyReLU(0.01, True)
]
cur_dim = min(2 * cur_dim, 256)
self.model = nn.Sequential(*sequence)
k_size = int(image_size / (2 ** n_layers))
self.aus_top = nn.Conv2d(ndf*4, aus_nc, kernel_size=k_size, stride=1, bias=False)
def forward(self, img):
if self.control_signal_type in {'au', 'class'}:
feat5 = self.model(img)
pred_aus = self.aus_top(feat5)
else:
feat0 = self.downlayer0(img)
feat1 = self.downlayer1(feat0)
feat2 = self.downlayer2(feat1)
feat3 = self.downlayer3(feat2)
feat4 = self.downlayer4(feat3)
feat5 = self.downlayer5(feat4)
temp = self.uplayer0(feat5)
temp = torch.cat([temp, feat4], dim=1)
temp = self.uplayer1(temp)
temp = torch.cat([temp, feat3], dim=1)
temp = self.uplayer2(temp)
temp = torch.cat([temp, feat2], dim=1)
temp = self.uplayer3(temp)
temp = torch.cat([temp, feat1], dim=1)
temp = self.uplayer4(temp)
temp = torch.cat([temp, feat0], dim=1)
pred_aus = self.aus_top(temp)
if self.GAN_head:
pred_map = self.dis_top(feat5)
return pred_map.squeeze(), pred_aus.squeeze()
else:
return None, pred_aus.squeeze()
|
1637878
|
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import os
import sys
import tensorflow as tf
import jpegio as jio
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
### Tensorflow
def get_lr_schedule(id_num):
if id_num == 0:
# From scratch
return {'boundaries': [20000, 200000],
'values': [1e-4, 1e-3, 1e-4],
'max_iter': 300000}
elif id_num == 1:
# From QF75
return {'boundaries': [10000, 120000, 140000],
'values': [1e-4, (1e-4)/2, (1e-4)/4, (1e-4)/8],
'max_iter': 160000}
elif id_num == 2:
# noPC
return {'boundaries': [20000, 80000, 120000, 160000],
'values': [1e-4, 1e-3, (1e-3)/10, (1e-3)/10/5, (1e-3)/10/10],
'max_iter': 200000}
class AdamaxOptimizer(optimizer.Optimizer):
"""Optimizer that implements the Adamax algorithm.
See [Kingma et. al., 2014](http://arxiv.org/abs/1412.6980)
([pdf](http://arxiv.org/pdf/1412.6980.pdf)).
@@__init__
"""
def __init__(self, learning_rate=0.001, beta1=0.9, beta2=0.999, use_locking=False, name="Adamax"):
super(AdamaxOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._beta1 = beta1
self._beta2 = beta2
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._beta1_t = None
self._beta2_t = None
def _prepare(self):
self._lr_t = ops.convert_to_tensor(self._lr, name="learning_rate")
self._beta1_t = ops.convert_to_tensor(self._beta1, name="beta1")
self._beta2_t = ops.convert_to_tensor(self._beta2, name="beta2")
def _create_slots(self, var_list):
# Create slots for the first and second moments.
for v in var_list:
self._zeros_slot(v, "m", self._name)
self._zeros_slot(v, "v", self._name)
def _apply_dense(self, grad, var):
lr_t = math_ops.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = math_ops.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = math_ops.cast(self._beta2_t, var.dtype.base_dtype)
if var.dtype.base_dtype == tf.float16:
eps = 1e-7 # Can't use 1e-8 due to underflow -- not sure if it makes a big difference.
else:
eps = 1e-8
v = self.get_slot(var, "v")
v_t = v.assign(beta1_t * v + (1. - beta1_t) * grad)
m = self.get_slot(var, "m")
m_t = m.assign(tf.maximum(beta2_t * m + eps, tf.abs(grad)))
g_t = v_t / m_t
var_update = state_ops.assign_sub(var, lr_t * g_t)
return control_flow_ops.group(*[var_update, m_t, v_t])
def _apply_sparse(self, grad, var):
raise NotImplementedError("Sparse gradient updates are not supported.")
### Pytorch
def get_optimizer(optimizer_name):
import torch
if optimizer_name.lower() == 'sgd':
return torch.optim.SGD
elif optimizer_name.lower() == 'adamw':
return torch.optim.AdamW
|
1637923
|
from collections import defaultdict
from stoichiograph import speller
from stoichiograph.speller import Node
ELEMENTS = {
'H', 'He', 'Li', 'Be', 'B', 'C', 'N', 'O', 'F', 'Ne', 'Na', 'Mg', 'Al',
'Si', 'P', 'S', 'Cl', 'Ar', 'K', 'Ca', 'Sc', 'Ti', 'V', 'Cr', 'Mn', 'Fe',
'Co', 'Ni', 'Cu', 'Zn', 'Ga', 'Ge', 'As', 'Se', 'Br', 'Kr', 'Rb', 'Sr', 'Y',
'Zr', 'Nb', 'Mo', 'Tc', 'Ru', 'Rh', 'Pd', 'Ag', 'Cd', 'In', 'Sn', 'Sb',
'Te', 'I', 'Xe', 'Cs', 'Ba', 'La', 'Ce', 'Pr', 'Nd', 'Pm', 'Sm', 'Eu', 'Gd',
'Tb', 'Dy', 'Ho', 'Er', 'Tm', 'Yb', 'Lu', 'Hf', 'Ta', 'W', 'Re', 'Os', 'Ir',
'Pt', 'Au', 'Hg', 'Tl', 'Pb', 'Bi', 'Po', 'At', 'Rn', 'Fr', 'Ra', 'Ac',
'Th', 'Pa', 'U', 'Np', 'Pu', 'Am', 'Cm', 'Bk', 'Cf', 'Es', 'Fm', 'Md', 'No',
'Lr', 'Rf', 'Db', 'Sg', 'Bh', 'Hs', 'Mt', 'Ds', 'Rg', 'Cn', 'Nh', 'Fl',
'Mc', 'Lv', 'Ts', 'Og'
}
def test_verify_data():
"""Assert that the set of elements in `speller.py` matches this
canonical set.
"""
assert speller.ELEMENTS == ELEMENTS
def test_elemental_spelling():
"""Assert that we get the expected results when spelling various
inputs.
"""
assert speller.spell('amputation') == [
('Am', 'Pu', 'Ta', 'Ti', 'O', 'N'),
('Am', 'P', 'U', 'Ta', 'Ti', 'O', 'N')
]
assert speller.spell('') == []
assert speller.spell('o') == [('O',)]
def test_find_all_paths():
"""Make simple graph with some branches, and assert that we find all
the paths from the first node to the last.
"""
parents_to_children = {
'a': {'b'},
'b': {'c'},
'c': {'d'},
'd': {'e', 'y', 'z'},
'e': {'f', 'x'},
'f': {'g', 'x'},
'g': {'h'},
'h': {'i'},
'x': {'y'},
'y': {'z'},
}
assert set(speller.find_all_paths(parents_to_children, 'a', 'z')) == set([
('a', 'b', 'c', 'd', 'z'),
('a', 'b', 'c', 'd', 'y', 'z'),
('a', 'b', 'c', 'd', 'e', 'x', 'y', 'z'),
('a', 'b', 'c', 'd', 'e', 'f', 'x', 'y', 'z'),
])
def test_build_spelling_graph():
"""Make a `speller.Graph` object, then build it with a word and
assert that it contains the proper node relationships.
"""
g = speller.Graph()
speller.build_spelling_graph('because', g)
assert g._parents_of == defaultdict(
set,
{
Node(value='c', position=2): {Node(value='be', position=0)},
Node(value='au', position=3): {Node(value='c', position=2)},
Node(value='s', position=5): {
Node(value='au', position=3),
Node(value='u', position=4)
},
Node(value='se', position=5): {
Node(value='au', position=3),
Node(value='u', position=4)
},
None: {Node(value='se', position=5)},
Node(value='ca', position=2): {Node(value='be', position=0)},
Node(value='u', position=4): {Node(value='ca', position=2)}
}
)
assert g._children_of == defaultdict(
set,
{
None: {Node(value='be', position=0), Node(value='b', position=0)},
Node(value='be', position=0): {
Node(value='ca', position=2),
Node(value='c', position=2)
},
Node(value='c', position=2): {Node(value='au', position=3)},
Node(value='au', position=3): {
Node(value='se', position=5),
Node(value='s', position=5)
},
Node(value='ca', position=2): {Node(value='u', position=4)},
Node(value='u', position=4): {
Node(value='se', position=5),
Node(value='s', position=5)
}
}
)
class TestGraph:
"""Tests for the methods of the `speller.Graph` class."""
def test_firsts(self, test_graph):
"""Assert that the graph properly identifies its first nodes."""
assert test_graph.firsts() == {Node('be', 0), Node('b', 0)}
def test_lasts(self, test_graph):
"""Assert that the graph properly identifies its last nodes."""
assert test_graph.lasts() == {Node('se', 5)}
def test_add_edge(self, test_graph):
"""Add an edge to the graph."""
parent = Node('te', 0)
child = Node('st', 2)
test_graph.add_edge(parent, child)
assert test_graph._children_of[parent] == {child}
assert test_graph._parents_of[child] == {parent}
def test_add_edge_with_no_parent(self, test_graph):
"""Add an edge with no parent to the graph. Assert that 'None'
isn't added to `_parents_of[child]`.
"""
parent = None
child = Node('a', 0)
test_graph.add_edge(parent, child)
assert child in test_graph._children_of[parent]
assert None not in test_graph._parents_of[child]
def test_add_edge_with_no_child(self, test_graph):
"""Add an edge with no child to the graph. Assert that `None`
isn't added to `_children_of[parent]`.
"""
parent = Node('z', 25)
child = None
test_graph.add_edge(parent, child)
assert None not in test_graph._children_of[parent]
assert parent in test_graph._parents_of[child]
def test_nodes(self, test_graph):
"""Assert that the graph properly lists its nodes."""
assert set(test_graph.nodes(connected_only=True)) == set([
Node(value='be', position=0),
Node(value='c', position=2),
Node(value='ca', position=2),
Node(value='au', position=3),
Node(value='u', position=4),
Node(value='s', position=5),
Node(value='se', position=5),
])
assert set(test_graph.nodes(connected_only=False)) == set([
Node(value='b', position=0),
Node(value='be', position=0),
Node(value='c', position=2),
Node(value='ca', position=2),
Node(value='au', position=3),
Node(value='u', position=4),
Node(value='s', position=5),
Node(value='se', position=5),
])
def test_edges(self, test_graph):
"""Assert that the graph properly lists its edges."""
assert set(test_graph.edges()) == set([
(None, Node(value='b', position=0)),
(None, Node(value='be', position=0)),
(Node(value='be', position=0), Node(value='c', position=2)),
(Node(value='be', position=0), Node(value='ca', position=2)),
(Node(value='c', position=2), Node(value='au', position=3)),
(Node(value='au', position=3), Node(value='s', position=5)),
(Node(value='au', position=3), Node(value='se', position=5)),
(Node(value='ca', position=2), Node(value='u', position=4)),
(Node(value='u', position=4), Node(value='s', position=5)),
(Node(value='u', position=4), Node(value='se', position=5))
])
def test_export(self, test_graph):
"""Assert that the graph exports the proper dot code."""
assert test_graph.export() == (
"""digraph G {
graph [rankdir=LR];
node [width=0.75 shape=circle];
"Node(value='au', position=3)" -> "Node(value='s', position=5)";
"Node(value='au', position=3)" -> "Node(value='se', position=5)";
"Node(value='be', position=0)" -> "Node(value='c', position=2)";
"Node(value='be', position=0)" -> "Node(value='ca', position=2)";
"Node(value='c', position=2)" -> "Node(value='au', position=3)";
"Node(value='ca', position=2)" -> "Node(value='u', position=4)";
"Node(value='u', position=4)" -> "Node(value='s', position=5)";
"Node(value='u', position=4)" -> "Node(value='se', position=5)";
"Node(value='au', position=3)" [label="Au"];
"Node(value='be', position=0)" [label="Be"];
"Node(value='c', position=2)" [label="C"];
"Node(value='ca', position=2)" [label="Ca"];
"Node(value='s', position=5)" [label="S"];
"Node(value='se', position=5)" [label="Se"];
"Node(value='u', position=4)" [label="U"];
}"""
)
|
1637936
|
import os, sys
# sys.path.append('/home/shaunxliu/projects/nnsp')
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import torch
from torch.utils.data import DataLoader
import numpy as np
from src.solver import BaseSolver
from src.data_load import OneshotVcDataset, MultiSpkVcCollate
# from src.rnn_ppg2mel import BiRnnPpg2MelModel
# from src.mel_decoder_mol_encAddlf0 import MelDecoderMOL
from src.loss import MaskedMSELoss
from src.optim import Optimizer
from src.util import human_format, feat_to_fig
from src import build_model
class Solver(BaseSolver):
"""Customized Solver."""
def __init__(self, config, paras, mode):
super().__init__(config, paras, mode)
self.num_att_plots = 5
self.att_ws_dir = f"{self.logdir}/att_ws"
os.makedirs(self.att_ws_dir, exist_ok=True)
self.best_loss = np.inf
def fetch_data(self, data):
"""Move data to device"""
data = [i.to(self.device) for i in data]
return data
def load_data(self):
""" Load data for training/validation/plotting."""
train_dataset = OneshotVcDataset(
meta_file=self.config.data.train_fid_list,
vctk_ppg_dir=self.config.data.vctk_ppg_dir,
libri_ppg_dir=self.config.data.libri_ppg_dir,
vctk_f0_dir=self.config.data.vctk_f0_dir,
libri_f0_dir=self.config.data.libri_f0_dir,
vctk_wav_dir=self.config.data.vctk_wav_dir,
libri_wav_dir=self.config.data.libri_wav_dir,
vctk_spk_dvec_dir=self.config.data.vctk_spk_dvec_dir,
libri_spk_dvec_dir=self.config.data.libri_spk_dvec_dir,
ppg_file_ext=self.config.data.ppg_file_ext,
min_max_norm_mel=self.config.data.min_max_norm_mel,
mel_min=self.config.data.mel_min,
mel_max=self.config.data.mel_max,
)
dev_dataset = OneshotVcDataset(
meta_file=self.config.data.dev_fid_list,
vctk_ppg_dir=self.config.data.vctk_ppg_dir,
libri_ppg_dir=self.config.data.libri_ppg_dir,
vctk_f0_dir=self.config.data.vctk_f0_dir,
libri_f0_dir=self.config.data.libri_f0_dir,
vctk_wav_dir=self.config.data.vctk_wav_dir,
libri_wav_dir=self.config.data.libri_wav_dir,
vctk_spk_dvec_dir=self.config.data.vctk_spk_dvec_dir,
libri_spk_dvec_dir=self.config.data.libri_spk_dvec_dir,
ppg_file_ext=self.config.data.ppg_file_ext,
min_max_norm_mel=self.config.data.min_max_norm_mel,
mel_min=self.config.data.mel_min,
mel_max=self.config.data.mel_max,
)
self.train_dataloader = DataLoader(
train_dataset,
num_workers=self.paras.njobs,
shuffle=True,
batch_size=self.config.hparas.batch_size,
pin_memory=False,
drop_last=True,
collate_fn=MultiSpkVcCollate(self.config.model.frames_per_step,
use_spk_dvec=True),
)
self.dev_dataloader = DataLoader(
dev_dataset,
num_workers=self.paras.njobs,
shuffle=False,
batch_size=self.config.hparas.batch_size,
pin_memory=False,
drop_last=False,
collate_fn=MultiSpkVcCollate(self.config.model.frames_per_step,
use_spk_dvec=True),
)
self.plot_dataloader = DataLoader(
dev_dataset,
num_workers=self.paras.njobs,
shuffle=False,
batch_size=1,
pin_memory=False,
drop_last=False,
collate_fn=MultiSpkVcCollate(self.config.model.frames_per_step,
use_spk_dvec=True,
give_uttids=True),
)
msg = "Have prepared training set and dev set."
self.verbose(msg)
def load_pretrained_params(self):
print("Load pretrained model from: ", self.config.data.pretrain_model_file)
ignore_layer_prefixes = ["speaker_embedding_table"]
pretrain_model_file = self.config.data.pretrain_model_file
pretrain_ckpt = torch.load(
pretrain_model_file, map_location=self.device
)["model"]
model_dict = self.model.state_dict()
print(self.model)
# 1. filter out unnecessrary keys
for prefix in ignore_layer_prefixes:
pretrain_ckpt = {k : v
for k, v in pretrain_ckpt.items() if not k.startswith(prefix)
}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrain_ckpt)
# 3. load the new state dict
self.model.load_state_dict(model_dict)
def set_model(self):
"""Setup model and optimizer"""
# Model
print("[INFO] Model name: ", self.config["model_name"])
model_class = build_model(self.config["model_name"])
self.model = model_class(
**self.config["model"]
).to(self.device)
# self.load_pretrained_params()
# model_params = [{'params': self.model.spk_embedding.weight}]
model_params = [{'params': self.model.parameters()}]
# Loss criterion
self.loss_criterion = MaskedMSELoss(self.config.model.frames_per_step)
# Optimizer
self.optimizer = Optimizer(model_params, **self.config["hparas"])
self.verbose(self.optimizer.create_msg())
# Automatically load pre-trained model if self.paras.load is given
self.load_ckpt()
def exec(self):
self.verbose("Total training steps {}.".format(
human_format(self.max_step)))
mel_loss = None
n_epochs = 0
# Set as current time
self.timer.set()
while self.step < self.max_step:
for data in self.train_dataloader:
# Pre-step: updata lr_rate and do zero_grad
lr_rate = self.optimizer.pre_step(self.step)
total_loss = 0
# data to device
ppgs, lf0_uvs, mels, in_lengths, \
out_lengths, spk_ids, stop_tokens = self.fetch_data(data)
self.timer.cnt("rd")
mel_outputs, mel_outputs_postnet, predicted_stop = self.model(
ppgs,
in_lengths,
mels,
out_lengths,
lf0_uvs,
spk_ids
)
mel_loss, stop_loss = self.loss_criterion(
mel_outputs,
mel_outputs_postnet,
mels,
out_lengths,
stop_tokens,
predicted_stop
)
loss = mel_loss + stop_loss
self.timer.cnt("fw")
# Back-prop
grad_norm = self.backward(loss)
self.step += 1
# Logger
if (self.step == 1) or (self.step % self.PROGRESS_STEP == 0):
self.progress("Tr|loss:{:.4f},mel-loss:{:.4f},stop-loss:{:.4f}|Grad.Norm-{:.2f}|{}"
.format(loss.cpu().item(), mel_loss.cpu().item(),
stop_loss.cpu().item(), grad_norm, self.timer.show()))
self.write_log('loss', {'tr/loss': loss,
'tr/mel-loss': mel_loss,
'tr/stop-loss': stop_loss})
# Validation
if (self.step == 1) or (self.step % self.valid_step == 0):
self.validate()
# End of step
# https://github.com/pytorch/pytorch/issues/13246#issuecomment-529185354
torch.cuda.empty_cache()
self.timer.set()
if self.step > self.max_step:
break
n_epochs += 1
self.log.close()
def validate(self):
self.model.eval()
dev_loss, dev_mel_loss, dev_stop_loss = 0.0, 0.0, 0.0
for i, data in enumerate(self.dev_dataloader):
self.progress('Valid step - {}/{}'.format(i+1, len(self.dev_dataloader)))
# Fetch data
ppgs, lf0_uvs, mels, in_lengths, \
out_lengths, spk_ids, stop_tokens = self.fetch_data(data)
with torch.no_grad():
mel_outputs, mel_outputs_postnet, predicted_stop = self.model(
ppgs,
in_lengths,
mels,
out_lengths,
lf0_uvs,
spk_ids
)
mel_loss, stop_loss = self.loss_criterion(
mel_outputs,
mel_outputs_postnet,
mels,
out_lengths,
stop_tokens,
predicted_stop
)
loss = mel_loss + stop_loss
dev_loss += loss.cpu().item()
dev_mel_loss += mel_loss.cpu().item()
dev_stop_loss += stop_loss.cpu().item()
dev_loss = dev_loss / (i + 1)
dev_mel_loss = dev_mel_loss / (i + 1)
dev_stop_loss = dev_stop_loss / (i + 1)
self.save_checkpoint(f'step_{self.step}.pth', 'loss', dev_loss, show_msg=False)
if dev_loss < self.best_loss:
self.best_loss = dev_loss
self.save_checkpoint(f'best_loss_step_{self.step}.pth', 'loss', dev_loss)
self.write_log('loss', {'dv/loss': dev_loss,
'dv/mel-loss': dev_mel_loss,
'dv/stop-loss': dev_stop_loss})
# plot attention
for i, data in enumerate(self.plot_dataloader):
if i == self.num_att_plots:
break
# Fetch data
ppgs, lf0_uvs, mels, in_lengths, \
out_lengths, spk_ids, stop_tokens = self.fetch_data(data[:-1])
fid = data[-1][0]
with torch.no_grad():
_, _, _, att_ws = self.model(
ppgs,
in_lengths,
mels,
out_lengths,
lf0_uvs,
spk_ids,
output_att_ws=True
)
att_ws = att_ws.squeeze(0).cpu().numpy()
att_ws = att_ws[None]
w, h = plt.figaspect(1.0 / len(att_ws))
fig = plt.Figure(figsize=(w * 1.3, h * 1.3))
axes = fig.subplots(1, len(att_ws))
if len(att_ws) == 1:
axes = [axes]
for ax, aw in zip(axes, att_ws):
ax.imshow(aw.astype(np.float32), aspect="auto")
ax.set_title(f"{fid}")
ax.set_xlabel("Input")
ax.set_ylabel("Output")
ax.xaxis.set_major_locator(MaxNLocator(integer=True))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
fig_name = f"{self.att_ws_dir}/{fid}_step{self.step}.png"
fig.savefig(fig_name)
# Resume training
self.model.train()
|
1637937
|
from .token import Token
from typing import List
from typing import Dict
from typing import Set
from typing import Union
from typing import Generator
from .span import Span
from .utils import normalize_slice
class TextDoc:
def __init__(self):
# This list is populated in the __call__ method of the Tokenizer object.
# Its members are objects of the TokenMeta class
self.token_metas = list()
# A dictionary to hold custom attributes
self.attributes: Dict[str, List[str]] = dict()
def __getitem__(self, key: Union[int, slice]) -> Union[Token, Span, int]:
"""Returns a Token object at position `key` or Span object using slice.
Args:
key (int or slice): The index of the token within the Doc,
or the slice of the Doc to return as a Span object.
Returns:
Token or Span or id of the Span object.
"""
if isinstance(key, int):
idx = 0
if key < 0:
idx = len(self) + key
else:
idx = key
# Get the corresponding TokenMeta object
token_meta = self.token_metas[idx]
# Create a Token object
token = Token(doc=self, token_meta=token_meta, position=key)
return token
if isinstance(key, slice):
# Normalize slice to handle negative slicing
start, end = normalize_slice(len(self), key.start, key.stop, key.step)
# Create a new span object
span = Span(self, start, end)
return span
def __len__(self):
"""Return the number of tokens in the Doc."""
return len(self.token_metas)
def __iter__(self):
"""Allows to loop over tokens in `self.token_metas`"""
for i in range(len(self.token_metas)):
# Yield a Token object
yield self[i]
@property
def text(self):
"""Returns the text present in the doc with whitespaces"""
return "".join(token.text_with_ws for token in self)
|
1637969
|
import enum
from uuid import UUID
from pydantic import BaseModel
class ProfileShort(BaseModel):
id: UUID
username: str
class FriendshipRequest(BaseModel):
profile_id: UUID
target_profile_id: UUID
class Relationship(str, enum.Enum):
FRIEND = "FRIEND"
OUTGOING_FRIEND_REQUEST = "OUTGOING_FRIEND_REQUEST"
INCOMING_FRIEND_REQUEST = "INCOMING_FRIEND_REQUEST"
SELF = "SELF"
NONE = "NONE"
|
1637977
|
from openie import StanfordOpenIE
import spacy
import neuralcoref
from difflib import SequenceMatcher
import nltk
from nltk.corpus import stopwords
import argparse
import random
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=str)
parser.add_argument('--outfile', type=str)
parser.add_argument('--verbose', action='store_true')
args = parser.parse_args()
file = open(args.file)
text = file.read()
nlp = spacy.load('en')
neuralcoref.add_to_pipe(nlp)
num_examples = 1
pronouns = ["all", "another", "any", "anybody", "anyone", "anything", "both", "each", "each", "other", "either", "everybody", "everyone", "everything", "few", "he", "her", "hers", "herself", "him", "himself", "his", "through", "it", "its",
"itself", "little", "many", "me", "mine", "more", "most", "much", "my", "myself", "neither", "no", "one", "nobody", "none", "nothing", "one", "one", "another", "other", "others", "our", "ours", "ourselves", "through", "several",
"she", "some", "somebody", "someone", "something", "that", "their", "theirs", "them", "themselves", "these", "they", "this", "those", "us", "we", "what", "whatever", "which", "whichever", "who", "whoever", "whom", "whomever",
"whose", "you", "your", "yours", "yourself", "yourselves", ]
objective_pronouns = ["all", "another", "any", "anybody", "anyone", "anything", "both", "each", "each", "other", "either", "few", "her", "hers", "herself", "him", "himself", "his", "through", "its",
"itself", "little", "many", "me", "mine", "more", "most", "much", "my", "myself", "neither", "no", "nobody", "none", "nothing", "another", "other", "others", "our", "ours", "ourselves", "through", "several",
"some", "somebody", "someone", "something", "that", "their", "theirs", "them", "themselves", "these", "this", "those", "us", "what", "whatever", "which", "whichever", "who", "whoever", "whom", "whomever",
"whose", "your", "yours", "yourself", "yourselves", ]
def capitalizeFirst(phrase):
words = phrase.split()
words[0] = words[0].capitalize()
return ' '.join(words)
def lowerFirst(phrase):
words = phrase.split()
words[0] = words[0].lower()
return ' '.join(words)
def printMentions(doc):
mentions = []
for cluster in doc._.coref_clusters:
mentions.append(cluster.mentions)
return mentions
def intersect(b1, b2):
return b1[1] > b2[0] and b1[0] < b2[1]
# extract coreference mentions and open ie triples
doc = nlp(text)
mentions = printMentions(doc)
with StanfordOpenIE() as client:
core_nlp_output = client.annotate(text, simple_format=False)
triples = []
offset = 0
for sentence in core_nlp_output['sentences']:
for triple in sentence['openie']:
# use character offset because openie and spacy disagree on word level offset
for part in ['subject', 'relation', 'object']:
span = part + 'Span'
start = sentence['tokens'][triple[span][0]]['characterOffsetBegin']
end = sentence['tokens'][triple[span][1]]['characterOffsetEnd']
triple[span][0] = start
triple[span][1] = end
triples.append(triple)
characters = []
candidates = []
# find longest common in mentions to get list of characters
modifiers = ['the', 'her', 'its', 'his', 'their', 'a', 'this', 'that', 'those', 'these']
for m in mentions:
best = ' '.join([w for w in m[0].text.split() if w not in modifiers])
for c in m:
if len(best) == 0:
best = ' '.join([w for w in c.text.split() if w not in modifiers])
if c.text.lower() in pronouns:
continue
str1 = best
str2 = ' '.join([w for w in c.text.split() if w not in modifiers])
match = SequenceMatcher(None, str1.split(), str2.split()).find_longest_match(0, len(str1.split()), 0, len(str2.split()))
if match.size > 0:
best = " ".join(str1.split()[match.a: match.a + match.size])
if best in pronouns:
continue
# somtimes extracts something with alot of rephrasing so take the first
best = best.split(',')[0]
characters.append(best)
candidates.append(m)
good = []
for char, cand in zip(characters, candidates):
if args.verbose:
print('=======================')
print(char + '\n')
used = []
relationships = set()
seq = [char]
# map mentions to triples
for mention in cand:
# use char level
m_range = [mention.start_char, mention.end_char]
for t in triples:
candidate = " ".join([capitalizeFirst(t['subject']), lowerFirst(t['relation'].replace('_', ' ')), t['object']])
# if relation already used, then ignore
bad = False
for u in used:
if intersect(t['relationSpan'], u) or t['relation'] in relationships:
bad = True
if bad:
continue
if intersect(t['subjectSpan'], m_range): # or intersect(t['objectSpan'], m_range):
seq.append(candidate)
used.append(t['relationSpan'])
relationships.add(t['relation'])
break
if args.verbose:
print(candidate)
if len(seq) >= 5:
good.append(seq)
random.seed(1)
f = open(args.outfile, 'a')
random.shuffle(good)
for i in range(num_examples):
seq = good[i]
for i in range(3):
f.write('\t'.join([s for s in seq]))
f.write('\n')
print(seq[0], len(seq))
|
1637987
|
import os
file_chars_reference = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def create_letters_text_files():
try:
# Obtenemos la ruta absoluta del directorio en el que estamos trabajando
script_directory = os.path.dirname(__file__)
for letter in file_chars_reference:
file_path = f"{script_directory}/{letter}.txt"
open(file_path, "w")
except FileExistsError:
print("Fichero existe")
return []
except Exception as except_message:
print(except_message)
return []
create_letters_text_files()
|
1637989
|
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
def test_module(client, url):
result = client._http_request('GET', full_url=url)
if isinstance(result, list):
return 'ok'
else:
return 'Test failed: ' + str(result)
def find_type_and_value(indicatordata):
if len(indicatordata.get('sha256')) > 0:
return 'File', indicatordata.get('sha256')
elif len(indicatordata.get('md5')) > 0:
return 'File', indicatordata.get('md5')
elif len(indicatordata.get('sha1')) > 0:
return 'File', indicatordata.get('sha1')
elif len(indicatordata.get('mail')) > 0:
return 'Email', indicatordata.get('mail')
elif len(indicatordata.get('ip')) > 0:
return 'IP', indicatordata.get('ip')
elif len(indicatordata.get('domain')) > 0:
return 'Domain', indicatordata.get('domain')
elif len(indicatordata.get('url')) > 0:
return 'URL', indicatordata.get('url')
else:
return 'Error', ''
def get_indicators_command(client, url, feed_tags=None, tlp_color=None):
listofindicators = []
result = client._http_request('GET', full_url=url)
for item in result:
typeofindicator, valueofindicator = find_type_and_value(item)
for newitem in valueofindicator:
data = {'type': typeofindicator,
'value': newitem,
'service': 'Twitter IOC Hunter',
'fields': {
'firstseenbysource': item.get('tweet').get('timestamp'),
'tags': feed_tags,
'reportedby': item.get('tweet').get('user')},
'rawJSON': item,
'score': 3
}
if tlp_color:
data['fields']['trafficlightprotocol'] = tlp_color
listofindicators.append(data)
return listofindicators
def main():
type_of_feed = demisto.params().get('typeoffeed')
base_url = 'http://www.tweettioc.com/v1/tweets/daily/full'
user_url = 'http://www.tweettioc.com/v1/tweets/daily/full/user/'
tags_url = 'http://www.tweettioc.com/v1/tweets/daily/ioc/hashtags/'
feed_tags = demisto.params().get('feedTags')
tlp_color = demisto.params().get('tlp_color')
filter_to_use = demisto.params().get('filtertouse')
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
demisto.info(f'Command being called is {demisto.command()}')
if type_of_feed == 'Username':
url = f'{user_url}{filter_to_use}'
elif type_of_feed == 'Hashtag':
url = f'{tags_url}{filter_to_use}'
else:
url = base_url
try:
client = BaseClient(
base_url=url,
verify=verify_certificate,
proxy=proxy)
if demisto.command() == 'test-module':
# This is the call made when pressing the integration Test button.
result = test_module(client, url)
demisto.results(result)
elif demisto.command() == 'fetch-indicators':
indicators = get_indicators_command(client, url, feed_tags, tlp_color)
for b in batch(indicators, batch_size=2000):
demisto.createIndicators(b)
elif demisto.command() == 'twitteriochunter-get-indicators':
return_results({'Indicators': get_indicators_command(client, url, feed_tags, tlp_color)})
except Exception as e:
raise Exception(f'Error in Integration [{e}]')
if __name__ in ('__main__', '__bui {SOURCE_NAME}ltin__', 'builtins'):
main()
|
1638020
|
import numpy as np
import torch
from sklearn.preprocessing import normalize
from torch_geometric.datasets import Planetoid
def get_dataset(dataset):
datasets = Planetoid('./dataset', dataset)
return datasets
def data_preprocessing(dataset):
dataset.adj = torch.sparse_coo_tensor(
dataset.edge_index, torch.ones(dataset.edge_index.shape[1]), torch.Size([dataset.x.shape[0], dataset.x.shape[0]])
).to_dense()
dataset.adj_label = dataset.adj
dataset.adj += torch.eye(dataset.x.shape[0])
dataset.adj = normalize(dataset.adj, norm="l1")
dataset.adj = torch.from_numpy(dataset.adj).to(dtype=torch.float)
return dataset
def get_M(adj):
adj_numpy = adj.cpu().numpy()
# t_order
t=2
tran_prob = normalize(adj_numpy, norm="l1", axis=0)
M_numpy = sum([np.linalg.matrix_power(tran_prob, i) for i in range(1, t + 1)]) / t
return torch.Tensor(M_numpy)
|
1638079
|
def main():
import os
import sys
import sysconfig
import site
try:
import vapoursynth
except ImportError as e:
print("It seems you have not installed VapourSynth yet.")
exit(e)
from .install import install
def print_help():
print(
"""
Useage: python3 -m vsstubs [operation] [mode]
operation could be "help", "install" or "update".
If not specified, "update" will be selected.
- "help" will show this help message.
- "install" has three modes: "default", "byside" and "here".
"mode" option could be empty and then "default"
will be selected.
- "default" will install the stub file as a package called
"vapoursynth-stub", which could be used by many
language servers.
- "byside" will install the stub file where your vapoursynth
is installed.
- "here" will generate the stub file at where you run the
command, which should only be used for testing.
- "update" will find your installed stub file and make that up to date.
"""
)
argc = len(sys.argv)
if argc == 1:
mode = "update"
elif argc >= 2:
if sys.argv[1] == "install":
if argc >= 3:
mode = sys.argv[2]
else:
mode = "default"
elif sys.argv[1] == "update":
mode = "update"
elif sys.argv[1] == "help":
mode = "help"
else:
print(f'Unknown operation "{sys.argv[1]}"')
mode = "help"
else:
mode = "help"
if site.ENABLE_USER_SITE:
pkgdir = site.USER_SITE
else:
pkgdir = sysconfig.get_path("purelib")
stubsdir = os.path.join(pkgdir, "vapoursynth-stubs")
vsdir = os.path.dirname(os.path.realpath(vapoursynth.__file__))
workdir = os.getcwd()
filedir = os.path.dirname(os.path.realpath(__file__))
filedir = os.path.abspath(os.path.join(filedir, os.pardir))
if mode == "update":
if os.path.exists(os.path.join(stubsdir, "__init__.pyi")):
mode = "default"
elif os.path.exists(os.path.join(vsdir, "vapoursynth.pyi")):
mode = "byside"
elif os.path.exists(os.path.join(workdir, "vapoursynth.pyi")):
mode = "test"
else:
print("It seems you have not installed the stub file yet.")
mode = "help"
if mode == "default":
if not os.path.exists(stubsdir):
os.makedirs(stubsdir)
install(stubsdir, "__init__.pyi")
elif mode == "byside":
install(vsdir, "vapoursynth.pyi")
elif mode == "test":
install(workdir, "vapoursynth.pyi")
elif mode == "help":
print_help()
else:
print(f'Unknown mode "{mode}".')
print_help()
|
1638131
|
from collections import deque
class Node:
"""A Node which maps a node proto. It has pointers to its parents and
children.
"""
def __init__(self, onnx_node):
"""Initialize a node. This initialization only set up the mapping to
node proto. The pointers should be set up by outside.
"""
self.name = None
self.parents = []
self.children = []
self.proto = None
self.output_value = None
if onnx_node is not None:
self.name = onnx_node.name
self.proto = onnx_node
class Graph:
"""A graph which is constructed from the onnx proto.
"""
def __init__(self, onnx_graph):
"""Construct the graph from onnx.
"""
self.input_nodes = []
self.output_nodes = []
self.name2node = {}
self.output2node = {}
self.proto = onnx_graph
# Add input nodes
for value in onnx_graph.input:
input_node = Node(None)
input_node.name = "Input_" + value.name
input_node.output_value = value
self.name2node[input_node.name] = input_node
self.output2node[value.name] = input_node
self.input_nodes.append(input_node)
output_value_names = [value.name for value in onnx_graph.output]
# Add regular nodes
for onnx_node in onnx_graph.node:
node = Node(onnx_node)
self.name2node[node.name] = node
self.output2node[onnx_node.output[0]] = node
for value_name in onnx_node.input:
node.parents.append(self.output2node[value_name])
self.output2node[value_name].children.append(node)
if onnx_node.output[0] in output_value_names:
self.output_nodes.append(node)
# Add value infos
for value in onnx_graph.value_info:
node = self.output2node[value.name]
node.output_value = value
def get_sorted_node_list(self):
"""Return a node list in topological order.
"""
visited = set()
todo = deque()
result = []
for node in self.input_nodes:
todo.append(node)
visited.add(node)
for onnx_node in self.proto.node:
if onnx_node.op_type == "Constant":
node = self.name2node[onnx_node.name]
todo.append(node)
visited.add(node)
while todo:
node = todo.popleft()
result.append(node)
for child in node.children:
if child in visited:
continue
ready = True
for child_parent in child.parents:
if child_parent in visited:
continue
ready = False
break
if ready:
todo.append(child)
visited.add(child)
return result
|
1638133
|
from pathlib import Path
import ujson
class BotGeneratorPreset:
def __init__(self, database_dir: Path, bot_role: str):
bots_dir = database_dir.joinpath("bots", bot_role)
self.generation: dict = ujson.load(
bots_dir.joinpath("generation.json").open(encoding="utf8")
)
self.inventory: dict = ujson.load(
bots_dir.joinpath("inventory.json").open(encoding="utf8")
)
self.chances: dict = ujson.load(
bots_dir.joinpath("chances.json").open(encoding="utf8")
)
self.health: dict = ujson.load(
bots_dir.joinpath("health.json").open(encoding="utf8")
)
self.appearance: dict = ujson.load(
bots_dir.joinpath("appearance.json").open(encoding="utf8")
)
|
1638158
|
import sys
import json
import subprocess
def runner(language, commands, is_test):
print("\nRunning {language} formatter{mode}...\n".format(
language = language,
mode = " in test mode" if is_test else ""
))
process = subprocess.run(commands)
if process.returncode != 0:
exit(process.returncode)
args = json.loads(sys.argv[1])
languages = args["language"]["value"]
format_all_languages = "all" in languages
is_test_mode = args["test"]["value"]
if format_all_languages or "bazel" in languages:
target = "buildifier-test" if is_test_mode else "buildifier"
runner("Bazel", ["bazel", "run", f"//tools/format:{target}"], is_test_mode)
if format_all_languages or "cpp" in languages or "c++" in languages:
config = "tidy-test" if is_test_mode else "tidy"
runner("C++", ["bazel", "build", "//...", "--config", config], is_test_mode)
|
1638164
|
from typing import Tuple, Dict, List
import numpy as np
from graph_nets.graphs import GraphsTuple
from .tf_tools import graphs_tuple_to_data_dicts, data_dicts_to_graphs_tuple
MIN_STD = 1E-6
class Standardizer:
@staticmethod
def compute_mean_std(a: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
return (
np.mean(a, axis=0),
np.maximum(np.std(a, axis=0), MIN_STD),
)
@staticmethod
def _standardize(a: np.ndarray, mean: np.ndarray, std: np.ndarray) -> np.ndarray:
return (a - mean) / std
@staticmethod
def _destandardize(a: np.ndarray, mean: np.ndarray, std: np.ndarray) -> np.ndarray:
return (a * std) + mean
class ArrayStandardizer(Standardizer):
def __init__(
self,
mean: np.ndarray = np.array([0]),
std: np.ndarray = np.array([1]),
):
self.mean, self.std = mean, std
def standardize(self, array) -> np.ndarray:
return self._standardize(a=array, mean=self.mean, std=self.std)
def destandardize(self, array) -> np.ndarray:
return self._destandardize(a=array, mean=self.mean, std=self.std)
@classmethod
def from_array(cls, array: np.ndarray):
mean, std = cls.compute_mean_std(a=array)
return cls(mean=mean, std=std)
class GraphStandardizer(Standardizer):
def __init__(
self,
global_mean: np.ndarray = np.array([0]),
global_std: np.ndarray = np.array([1]),
nodes_mean: np.ndarray = np.array([0]),
nodes_std: np.ndarray = np.array([1]),
edges_mean: np.ndarray = np.array([0]),
edges_std: np.ndarray = np.array([1]),
):
self.global_mean, self.global_std = global_mean, global_std
self.nodes_mean, self.nodes_std = nodes_mean, nodes_std
self.edges_mean, self.edges_std = edges_mean, edges_std
def standardize_graphs_tuple(self, graphs: GraphsTuple) -> GraphsTuple:
standard = graphs.replace(globals=self._standardize(graphs.globals, mean=self.global_mean, std=self.global_std))
standard = standard.replace(nodes=self._standardize(graphs.nodes, mean=self.nodes_mean, std=self.nodes_std))
standard = standard.replace(edges=self._standardize(graphs.edges, mean=self.edges_mean, std=self.edges_std))
return standard
def standardize_data_dict(self, d: Dict) -> Dict:
return graphs_tuple_to_data_dicts(self.standardize_graphs_tuple(data_dicts_to_graphs_tuple([d])))[0]
def destandardize_graphs_tuple(self, graphs: GraphsTuple) -> GraphsTuple:
standard_graphs = graphs.replace(
globals=self._destandardize(graphs.globals, mean=self.global_mean, std=self.global_std))
standard_graphs = standard_graphs.replace(
nodes=self._destandardize(graphs.nodes, mean=self.nodes_mean, std=self.nodes_std))
standard_graphs = standard_graphs.replace(
edges=self._destandardize(graphs.edges, mean=self.edges_mean, std=self.edges_std))
return standard_graphs
def destandardize_data_dicts(self, d: Dict) -> Dict:
return graphs_tuple_to_data_dicts(self.destandardize_graphs_tuple(data_dicts_to_graphs_tuple([d])))[0]
@classmethod
def from_graphs_tuple(cls, graphs_tuple: GraphsTuple):
global_mean, global_std = cls.compute_mean_std(graphs_tuple.globals)
nodes_mean, nodes_std = cls.compute_mean_std(graphs_tuple.nodes)
edges_mean, edges_std = cls.compute_mean_std(graphs_tuple.edges)
return cls(
global_mean=global_mean,
global_std=global_std,
nodes_mean=nodes_mean,
nodes_std=nodes_std,
edges_mean=edges_mean,
edges_std=edges_std,
)
@classmethod
def from_data_dicts(cls, dicts: List[Dict]):
return cls.from_graphs_tuple(data_dicts_to_graphs_tuple(dicts))
|
1638205
|
from pkgconf import Conf
class ControlCenter(Conf):
DASHBOARDS = []
CHARTIST_COLORS = 'default'
SHARP = '#'
|
1638211
|
import os
import sys
import glob
import gzip
import bs4, lxml
import concurrent.futures
import re
from pathlib import Path
import json
import random
import CONFIG
def pmap(arg):
key, names = arg
random.shuffle(names)
for name in names:
try:
sha256 = name.split('/')[-1]
if Path(name).exists() is False:
continue
if Path(f'jsons_content/{sha256}').exists():
open(name, 'wb').write(gzip.compress(bytes('finished', 'utf8')))
#print('already processed', name)
continue
html = gzip.decompress(open(name, 'rb').read()).decode()
soup = bs4.BeautifulSoup(html, 'lxml')
for script in soup(["script", "style"]):
script.extract() # rip it out
article = soup.find('article')
if article is None:
Path(name).unlink()
continue
titles = [article.h1, article.h2]
if titles == [None, None]:
continue
title = [t for t in [t.text for t in titles if t is not None] if t not in ['SNSアカウント']]
title = ' '.join(title)
canonical = soup.find('link', {'rel':'canonical'})
if canonical is None:
Path(name).unlink()
continue
if 'archive-' in canonical.get('href') or \
'theme-' in canonical.get('href'):
Path(name).unlink()
continue
print(canonical.get('href'))
time = soup.time.get('datetime')
body = soup.find('div', {'id':'entryBody'})
if body is None:
Path(name).unlink()
continue
body = body.text.replace('\n', ' ')
body = re.sub(r'\s{1,}', ' ', body)
record = {'title':title, 'canonical':canonical.get('href'), 'time':time, 'body':body, 'sha256':sha256}
with open(f'jsons_content/{sha256}', 'w') as fp:
fp.write(json.dumps(record, indent=2, ensure_ascii=False))
if random.random() <= 0.05:
print(record)
open(name, 'wb').write(gzip.compress(bytes('finished', 'utf8')))
except Exception as ex:
#Path(name).unlink()
print(ex)
def main():
args = {}
for index,name in enumerate(glob.glob( CONFIG.HTML_PATH +'/*')):
key = index % 32
if args.get(key) is None:
args[key] = []
args[key].append(name)
args = [(key,names) for key, names in args.items()]
with concurrent.futures.ProcessPoolExecutor(max_workers=32) as exe:
exe.map(pmap, args)
if __name__ == '__main__':
if '--loop' in sys.argv:
while True:
main()
else:
main()
|
1638226
|
from django.shortcuts import render, get_object_or_404, redirect, reverse
from django.http import HttpResponse, HttpResponseBadRequest, StreamingHttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.debug import sensitive_post_parameters
from django.contrib.auth.decorators import login_required
from django.utils import timezone
from django.views import View
from django.contrib.auth.models import User
from django.conf import settings
from . import models
from . import util
from . import forms
from . import signals
from . import fixed_data
from . import form_handlers
from .form_handlers import _format_tuple
import json
import uuid
import datetime
from tatl.models import TatlVerb
MINIMUM_CLIENT_VERSION = "0.37.0"
@csrf_exempt
def wrap_api_v2(request, f, permission=None, oauth_permission=None, partial=False, stream=False, get=False, oauth_only=False):
from tatl.models import TatlRequest, TatlPermFlex
start_ts = timezone.now()
api_o = {
"errors": 0,
"warnings": 0,
"messages": [],
"tasks": [],
"new": [],
"updated": [],
"ignored": [],
}
api_o["request"] = str(request.treq.response_uuid)
if not get:
# Bounce non-POST for post (not get) interfaces
if request.method != "POST":
return HttpResponseBadRequest()
try:
json_data = json.loads(request.body)
except:
return HttpResponseBadRequest()
# Bounce badly formatted requests
if not json_data.get('token', None) or not json_data.get('username', None):
return HttpResponseBadRequest()
else:
# Bounce non-GET if get flag is set
if request.method != "GET":
return HttpResponseBadRequest()
# Set json_data to GET params, skipping check for token/username
json_data = request.GET
profile = None
oauth = False
if hasattr(request, "tatl_oauth") and request.tatl_oauth:
oauth = True
# borrowed from the oauth2_provider backend
from oauth2_provider.oauth2_backends import get_oauthlib_core
OAuthLibCore = get_oauthlib_core()
# now check the request for the right scopes
valid, r = OAuthLibCore.verify_request(request, scopes=oauth_permission.split(" ") if oauth_permission else [])
if valid:
profile = request.user.profile
user = request.user
else:
#TODO This should return 401 probably
api_o["messages"].append("Your token is valid but does not have all of the scopes to perform this action. Required scopes: %s" % oauth_permission)
api_o["errors"] += 1
bad = True
return HttpResponse(json.dumps(api_o), content_type="application/json")
elif oauth_only:
return HttpResponseBadRequest()
else:
if get:
# GET API endpoints are OAuth only
return HttpResponseBadRequest()
try:
# Check new key validity
key = models.ProfileAPIKey.objects.get(key=json_data["token"], profile__user__username=json_data["username"], was_revoked=False, validity_start__lt=timezone.now(), validity_end__gt=timezone.now())
profile = key.profile
except:
return HttpResponseBadRequest()
#api_o["messages"].append("That key does not exist, has expired or was revoked")
#api_o["errors"] += 1
#bad = True
user = profile.user
if permission:
# Check permission has been granted to user
if not user.has_perm(permission):
return HttpResponseBadRequest()
# Check permission has been granted to key
if not key.key_definition.permission:
return HttpResponseBadRequest()
if key.key_definition.permission.codename != permission.split('.')[1]:
return HttpResponseBadRequest()
# If in doubt
if not profile or not user.is_active or user.profile.is_revoked:
return HttpResponseBadRequest()
request.treq.is_api = True
request.treq.user = user
request.treq.save()
if permission and not oauth:
tflex = TatlPermFlex(
user = user,
substitute_user = None,
used_permission = permission,
timestamp = timezone.now(),
request=request.treq,
content_object = request.treq, #TODO just use the request for now
)
tflex.save()
# Bounce non-admin escalations to other users
if json_data.get("sudo_as"):
if user.is_staff:
try:
user = models.Profile.objects.get(user__username=json_data["sudo_as"]).user
request.treq.substitute_user = user
request.treq.save()
if permission and not oauth:
tflex.substitute_user = user
tflex.save()
except:
return HttpResponseBadRequest()
else:
return HttpResponseBadRequest()
bad = False
# Bounce out of date clients
if json_data.get("client_name") == "ocarina":
try:
server_version = tuple(map(int, (MINIMUM_CLIENT_VERSION.split("."))))
client_version = tuple(map(int, (json_data["client_version"].split("."))))
if client_version < server_version:
api_o["messages"].append("Update your 'ocarina' client to version v%s" % MINIMUM_CLIENT_VERSION)
api_o["errors"] += 1
bad = True
except:
api_o["messages"].append("It appears you are using 'ocarina', but your version number doesn't make sense... This shouldn't happen...")
api_o["errors"] += 1
bad = True
# Call the wrapped function
if not bad and profile:
possible_fstream = f(request, api_o, json_data, user=user, partial=partial)
api_o["success"] = api_o["errors"] == 0
end_ts = timezone.now()
request.treq.save()
if stream and possible_fstream:
return possible_fstream
else:
return HttpResponse(json.dumps(api_o), content_type="application/json")
def handle_metadata(metadata, tag_type, tag_to, user, api_o):
changed_fields = []
#nulled_fields = []
ts = timezone.now()
for tag_key in metadata:
for key in metadata[tag_key]:
t_data = {
tag_type: tag_to,
"tag": tag_key,
"timestamp": ts,
}
t_data["name"] = key
t_data["value"] = metadata[tag_key][key]
form = forms.TestMetadataForm(t_data)
if form.is_valid():
majora_meta, created, updated = form_handlers.handle_testmetadata(form, user=user, api_o=api_o)
if not created:
#TODO catch
pass
if not majora_meta:
api_o["warnings"] += 1
api_o["ignored"].append("metadata__%s__%s" % (t_data.get("tag"), t_data.get("name")))
if updated:
changed_fields.append("metadata:%s.%s" % (t_data.get("tag"), t_data.get("name")))
#if t_data.get("value") is None:
# # Nuke the record if it has been None'd
# if majora_meta.delete()[0] == 1:
# api_o["messages"].append("Deleted: metadata__%s__%s" % (t_data.get("tag"), t_data.get("name")))
else:
api_o["errors"] += 1
api_o["ignored"].append("metadata__%s__%s" % (t_data.get("tag"), t_data.get("name")))
api_o["messages"].append(form.errors.get_json_data())
return changed_fields
#TODO Abstract this away info form handlers per-metric, use modelforms properly
def handle_metrics(metrics, tag_type, tag_to, user, api_o):
updated_metrics = []
ts = timezone.now()
for metric in metrics:
metrics[metric]["artifact"] = tag_to.id
metrics[metric]["namespace"] = metric
is_model = True
if metric == "sequence":
m = models.TemporaryMajoraArtifactMetric_Sequence.objects.filter(artifact=tag_to).first()
form = forms.M2Metric_SequenceForm(metrics[metric], instance=m)
elif metric == "mapping":
m = models.TemporaryMajoraArtifactMetric_Mapping.objects.filter(artifact=tag_to).first()
form = forms.M2Metric_MappingForm(metrics[metric], instance=m)
elif metric == "tile-mapping":
m = models.TemporaryMajoraArtifactMetric_Mapping_Tiles.objects.filter(artifact=tag_to).first()
form = forms.M2Metric_MappingTileForm(metrics[metric], instance=m)
elif metric == "ct":
m = models.TemporaryMajoraArtifactMetric_ThresholdCycle.objects.filter(artifact=tag_to).first()
metrics[metric]["num_tests"] = 0
metrics[metric]["min_ct"] = 0
metrics[metric]["max_ct"] = 0
form = forms.M2Metric_ThresholdCycleForm(metrics[metric], instance=m)
# Catch null values gently on uploader
any_ct = False
for metric_rec_name in metrics[metric].get("records", {}):
metric_rec = metrics[metric]["records"][metric_rec_name]
if metric_rec.get("ct_value"):
any_ct = True
if not any_ct:
api_o["ignored"].append("%s" % metric)
api_o["messages"].append("'%s' records look empty" % metric)
api_o["warnings"] += 1
continue
else:
api_o["ignored"].append(metric)
api_o["messages"].append("'%s' does not describe a valid metric" % metric)
api_o["warnings"] += 1
continue
if form.is_valid():
try:
metric_ob = form.save()
if metric_ob:
api_o["updated"].append(_format_tuple(tag_to))
# Handle optional records
first_valid = True
for metric_rec_name in metrics[metric].get("records", {}):
metric_rec = metrics[metric]["records"][metric_rec_name]
if metric == "ct":
metric_rec["artifact_metric"] = metric_ob
form = forms.M2MetricRecord_ThresholdCycleForm(metric_rec)
# Catch null values gently on uploader
if not metric_rec.get("ct_value"):
api_o["ignored"].append("%s:%s" % (metric, metric_rec_name))
api_o["warnings"] += 1
continue
if form.is_valid():
if first_valid:
# Destroy existing records
dc = metric_ob.metric_records.all().delete()[0] # bye
if dc > 0:
api_o["messages"].append("%d existing Ct value records deleted and replaced with new values" % int(dc/2))
updated_metrics = [metric]
first_valid = False
try:
artifact_metric = form.cleaned_data["artifact_metric"]
rec_obj, rec_obj_created = models.TemporaryMajoraArtifactMetricRecord_ThresholdCycle.objects.get_or_create(
artifact_metric = artifact_metric,
test_platform = form.cleaned_data.get("test_platform"),
test_target = form.cleaned_data.get("test_target"),
test_kit = form.cleaned_data.get("test_kit"),
)
if rec_obj:
rec_obj.ct_value = form.cleaned_data["ct_value"]
rec_obj.save()
artifact_metric.num_tests = len(artifact_metric.metric_records.all())
ct_min = None
ct_max = None
for record in artifact_metric.metric_records.all():
ct = record.ct_value
if not ct_min:
ct_min = ct
elif ct < ct_min:
ct_min = ct
if not ct_max:
ct_max = ct
elif ct > ct_max:
ct_max = ct
artifact_metric.min_ct = ct_min
artifact_metric.max_ct = ct_max
artifact_metric.save()
if not rec_obj:
api_o["ignored"].append("%s:%s" % (metric, metric_rec_name))
api_o["errors"] += 1
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
else:
api_o["errors"] += 1
api_o["ignored"].append("%s:%s" % (metric, metric_rec_name))
api_o["messages"].append(form.errors.get_json_data())
# End Metric Records
else:
api_o["ignored"].append(metric)
api_o["errors"] += 1
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
else:
api_o["errors"] += 1
api_o["ignored"].append(metric)
api_o["messages"].append(form.errors.get_json_data())
return updated_metrics
def biosample_query_validity(request):
def f(request, api_o, json_data, user=None, partial=False):
biosamples = json_data.get("biosamples", {})
if not biosamples:
api_o["messages"].append("'biosamples' key missing or empty")
api_o["errors"] += 1
api_o["result"] = {}
for biosample in biosamples:
exists = False
has_metadata = False
has_sender = False
bs = models.BiosampleArtifact.objects.filter(central_sample_id=biosample).first()
if bs:
exists = True
if bs.sender_sample_id and len(bs.sender_sample_id) > 0:
has_sender = True
if bs.created:
if bs.created.collection_location_country and len(bs.created.collection_location_country) > 0:
has_metadata = True
api_o["result"][biosample] = {
"central_sample_id": bs.central_sample_id if bs else None,
"exists": exists,
"has_sender_id": has_sender,
"has_metadata": has_metadata
}
return wrap_api_v2(request, f)
def get_biosample(request):
def f(request, api_o, json_data, user=None, partial=False):
sample_id = json_data.get("central_sample_id")
if not sample_id:
api_o["messages"].append("'central_sample_id' key missing or empty")
api_o["errors"] += 1
return
try:
artifact = models.MajoraArtifact.objects.filter(dice_name=sample_id).first()
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append("No such artifact.")
return
try:
api_o["get"] = {
sample_id: artifact.as_struct()
}
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
return wrap_api_v2(request, f)
def get_sequencing(request):
def f(request, api_o, json_data, user=None, partial=False):
run_names = json_data.get("run_name")
if not run_names:
api_o["messages"].append("'run_name' key missing or empty")
api_o["errors"] += 1
return
if len(run_names) == 1 and run_names[0] == "*":
if user.is_staff:
from . import tasks
celery_task = tasks.task_get_sequencing.delay(None, api_o, json_data, user=user.pk if user else None, response_uuid=api_o["request"])
if celery_task:
api_o["tasks"].append(celery_task.id)
api_o["messages"].append("Call api.majora.task.get with the appropriate task ID later...")
else:
api_o["errors"] += 1
api_o["messages"].append("Could not add requested task to Celery...")
else:
return HttpResponseBadRequest()
runs = {}
for run_name in run_names:
try:
process = models.DNASequencingProcess.objects.get(run_name=run_name)
except Exception as e:
api_o["warnings"] += 1
api_o["ignored"].append(run_name)
continue
try:
runs[process.run_name] = process.as_struct()
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
continue
api_o["get"] = runs
return wrap_api_v2(request, f)
def get_sequencing2(request):
def f(request, api_o, json_data, user=None, partial=False):
run_names = json_data.get("run_name")
if not run_names:
api_o["messages"].append("'run_name' key missing or empty")
api_o["errors"] += 1
return
if len(run_names) == 1 and run_names[0] == "*":
if user.is_staff:
from . import tasks
celery_task = tasks.task_get_sequencing_faster.delay(None, api_o, json_data, user=user.pk if user else None, response_uuid=api_o["request"])
if celery_task:
api_o["tasks"].append(celery_task.id)
api_o["messages"].append("Call api.majora.task.get with the appropriate task ID later...")
else:
api_o["errors"] += 1
api_o["messages"].append("Could not add requested task to Celery...")
else:
return HttpResponseBadRequest()
runs = {}
for run_name in run_names:
try:
process = models.DNASequencingProcess.objects.get(run_name=run_name)
except Exception as e:
api_o["warnings"] += 1
api_o["ignored"].append(run_name)
continue
try:
runs[process.run_name] = process.as_struct()
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
continue
api_o["get"] = runs
return wrap_api_v2(request, f)
def add_qc(request):
def f(request, api_o, json_data, user=None, partial=False):
pag_name = json_data.get("publish_group")
test_name = json_data.get("test_name")
test_version = json_data.get("test_version")
if (not pag_name) or (not test_name) or (not test_version):
api_o["messages"].append("'pag_name', 'test' or 'version' key missing or empty")
api_o["errors"] += 1
return
if len(pag_name)==0 or len(test_name)==0 or len(str(test_version))==0:
api_o["messages"].append("'pag_name', 'test' or 'version' key missing or empty")
api_o["errors"] += 1
return
pag = models.PublishedArtifactGroup.objects.filter(is_latest=True, published_name=pag_name).first() # There can be only one
if not pag:
api_o["messages"].append("Invalid 'pag_name'")
api_o["ignored"].append(pag_name)
api_o["errors"] += 1
return
t_group = models.PAGQualityTestEquivalenceGroup.objects.filter(slug=test_name).first()
if not t_group:
api_o["messages"].append("Invalid 'test_name'")
api_o["ignored"].append(pag_name)
api_o["errors"] += 1
return
# Gather metrics from PAG
metrics = {}
for artifact in pag.tagged_artifacts.all():
# For this project we don't need to worry about duplicates
# but this is an outstanding problem... TODO
for metric in artifact.metrics.all():
if metric.namespace:
if metric.namespace not in metrics:
metrics[metric.namespace] = metric.as_struct()
else:
api_o["messages"].append("Cannot automatically QC a PAG with multiple objects containing the same metric type...")
api_o["errors"] += 1
return
# Gather metadata from PAG
metadata = {}
for artifact in pag.tagged_artifacts.all():
curr_meta = artifact.get_metadata_as_struct()
for namespace in curr_meta:
if namespace not in metadata:
metadata[namespace] = {}
for meta_name in curr_meta[namespace]:
if meta_name not in metadata[namespace]:
metadata[namespace][meta_name] = curr_meta[namespace][meta_name]
elif metadata[namespace][meta_name] != curr_meta[namespace][meta_name]:
api_o["messages"].append("Cannot automatically QC a PAG with multiple objects containing the same metadata fields with different values...")
api_o["errors"] += 1
return
else:
pass
n_fails = 0
test_data = {}
all_skipped = True # flag to determine at least one QC test was run
for test in t_group.tests.all():
# Get the latest test version
tv = test.versions.order_by('-version_number').first()
test_data[tv] = {
"results": {},
"decisions": {},
"is_pass": None,
"is_skip": None,
}
is_match = False
is_skip = False
# Determine if we need to skip this test
for tfilter in test.filters.all():
meta = metadata.get(tfilter.metadata_namespace, {}).get(tfilter.metadata_name, None)
if meta:
meta = str(meta).upper()
if tfilter.op == "EQ":
is_match = meta == tfilter.filter_on_str
elif tfilter.op == "NEQ":
is_match = meta != tfilter.filter_on_str
else:
pass
is_skip = not is_match
else:
if tfilter.force_field:
api_o["messages"].append("Cannot automatically QC a PAG that is missing required metadata (%s.%s)" % (tfilter.metadata_namespace, tfilter.metadata_name))
api_o["errors"] += 1
return
if is_skip:
test_data[tv]["is_skip"] = True
test_data[tv]["is_pass"] = False
continue # to next test
else:
all_skipped = False
test_data[tv]["is_skip"] = False
for rule in tv.rules.all():
curr_res = {
"rule": rule,
"test_metric_str": None,
"is_pass": None,
"is_warn": None,
"is_fail": None,
}
# Determine if the test can be performed
if rule.metric_namespace not in metrics:
api_o["messages"].append("Namespace %s not found in metrics" % rule.metric_namespace)
api_o["ignored"].append(rule.metric_namespace)
api_o["errors"] += 1
continue
if rule.metric_name not in metrics[rule.metric_namespace] or metrics[rule.metric_namespace][rule.metric_name] is None:
api_o["messages"].append("Metric %s.%s not found in metrics" % (rule.metric_namespace, rule.metric_name))
api_o["ignored"].append(rule.metric_name)
api_o["errors"] += 1
continue
curr_metric = metrics[rule.metric_namespace][rule.metric_name]
curr_res["test_metric_str"] = str(curr_metric)
if not rule.warn_min and not rule.warn_max:
curr_res["is_warn"] = False
else:
# Check warnings
if rule.warn_min:
if curr_metric < rule.warn_min:
curr_res["is_warn"] = True
else:
curr_res["is_warn"] = False
if not curr_res["is_warn"] and rule.warn_max:
if curr_metric >= rule.warn_max:
curr_res["is_warn"] = True
else:
curr_res["is_warn"] = False
if not rule.fail_min and not rule.fail_max:
curr_res["is_fail"] = False
else:
# Check failures
if rule.fail_min:
if curr_metric < rule.fail_min:
curr_res["is_fail"] = True
else:
curr_res["is_fail"] = False
if not curr_res["is_fail"] and rule.fail_max:
if curr_metric >= rule.fail_max:
curr_res["is_fail"] = True
else:
curr_res["is_fail"] = False
curr_res["is_pass"] = not curr_res["is_fail"]
test_data[tv]["results"][rule] = curr_res
#TODO What if the same rule is checked many times? (It should not be anyway but...)
if len(test_data[tv]["results"]) != len(tv.rules.all()):
api_o["messages"].append("Refusing to create QC report as not all target metrics could be assessed...")
api_o["metrics"] = metrics
api_o["errors"] += 1
return
curr_test_fails = 0
for decision in tv.decisions.all():
curr_dec = {
"decision": decision,
"a": None,
"b": None,
"is_pass": None,
"is_warn": None,
"is_fail": None,
}
results = test_data[tv]["results"]
if decision.a not in results:
api_o["messages"].append("Could not make a decision for rule as metric appears to have not been selecting for testing")
api_o["errors"] += 1
return
curr_dec["a"] = decision.a
if not decision.b:
curr_dec["is_warn"] = results[decision.a]["is_warn"]
curr_dec["is_fail"] = results[decision.a]["is_fail"]
else:
curr_dec["b"] = decision.b
if decision.b not in results:
api_o["messages"].append("Could not make a decision for rule as metric appears to have not been selecting for testing")
api_o["errors"] += 1
return
if decision.op == "AND":
curr_dec["is_warn"] = results[decision.a]["is_warn"] or results[decision.a]["is_warn"] # Warnings always roll up
curr_dec["is_fail"] = results[decision.a]["is_fail"] and results[decision.b]["is_fail"]
elif decision.op == "OR":
curr_dec["is_warn"] = results[decision.a]["is_warn"] or results[decision.a]["is_warn"]
curr_dec["is_fail"] = results[decision.a]["is_fail"] or results[decision.b]["is_fail"]
else:
api_o["messages"].append("Unknown decision operator encountered")
api_o["errors"] += 1
return
curr_dec["is_pass"] = not curr_dec["is_fail"]
if not curr_dec["is_pass"]:
n_fails += 1
curr_test_fails += 1
test_data[tv]["decisions"][decision] = curr_dec
test_data[tv]["is_pass"] = curr_test_fails == 0
if len(test_data[tv]["decisions"]) != len(tv.decisions.all()):
api_o["messages"].append("Refusing to create QC report as not all target metrics could be assessed...")
api_o["errors"] += 1
return
if all_skipped:
# See https://github.com/COG-UK/dipi-group/issues/55 for why we default to using at least one QC test
api_o["messages"].append("Cowardly refusing to create QC report as no tests were performed...")
api_o["errors"] += 1
return
# Looks good?
tz_now_dt = timezone.now()
is_pass = n_fails == 0
ereport_g, created = models.PAGQualityReportEquivalenceGroup.objects.get_or_create(
pag = pag,
test_group = t_group,
)
ereport_g.last_updated = tz_now_dt
ereport_g.is_pass = is_pass
ereport_g.save()
for tv in test_data:
report_g, created = models.PAGQualityReportGroup.objects.get_or_create(
pag = pag,
group = ereport_g,
test_set = tv.test,
)
report_g.is_pass = test_data[tv]["is_pass"]
report_g.is_skip = test_data[tv]["is_skip"]
report_g.save()
report, created = models.PAGQualityReport.objects.get_or_create(
report_group = report_g,
test_set_version = tv,
is_pass = test_data[tv]["is_pass"],
is_skip = test_data[tv]["is_skip"],
timestamp = tz_now_dt,
)
report.save()
if test_data[tv]["is_skip"]:
continue
saved_rules = {}
for rule, rule_result in test_data[tv]["results"].items():
rule_result["report"] = report
rule_rec, created = models.PAGQualityReportRuleRecord.objects.get_or_create(
**rule_result
)
rule_rec.save()
saved_rules[rule] = rule_rec
for decision, decision_result in test_data[tv]["decisions"].items():
decision_result["report"] = report
decision_result["a"] = saved_rules[decision_result["a"]]
if decision_result["b"]:
decision_result["b"] = saved_rules[decision_result["b"]]
dec_rec, created = models.PAGQualityReportDecisionRecord.objects.get_or_create(
**decision_result
)
dec_rec.save()
api_o["test_results"] = str(test_data)
return wrap_api_v2(request, f, oauth_permission="majora2.add_pagqualityreport majora2.change_pagqualityreport")
def add_metrics(request):
def f(request, api_o, json_data, user=None, partial=False):
artifact = json_data.get("artifact", "")
artifact_path = json_data.get("artifact_path", "")
if (not artifact or len(artifact) == 0) and (not artifact_path or len(artifact_path) == 0):
api_o["messages"].append("'artifact' or 'artifact_path' key missing or empty")
api_o["errors"] += 1
return
metrics = json_data.get("metrics", {})
a = None
if artifact:
try:
a = models.MajoraArtifact.objects.get(dice_name=artifact)
except:
pass
elif artifact_path:
#TODO Need a much better way to keep track of paths
a = models.DigitalResourceArtifact.objects.filter(current_path=artifact_path).first()
if not a:
api_o["ignored"].append((artifact, artifact_path))
api_o["errors"] += 1
return
for metric in metrics:
metrics[metric]["artifact"] = a.id
metrics[metric]["namespace"] = metric
if metric == "sequence":
m = models.TemporaryMajoraArtifactMetric_Sequence.objects.filter(artifact=a).first()
form = forms.M2Metric_SequenceForm(metrics[metric], instance=m)
elif metric == "mapping":
m = models.TemporaryMajoraArtifactMetric_Mapping.objects.filter(artifact=a).first()
form = forms.M2Metric_MappingForm(metrics[metric], instance=m)
elif metric == "tile-mapping":
m = models.TemporaryMajoraArtifactMetric_Mapping_Tiles.objects.filter(artifact=a).first()
form = forms.M2Metric_MappingTileForm(metrics[metric], instance=m)
else:
api_o["ignored"].append(metric)
api_o["messages"].append("'%s' does not describe a valid metric" % metric)
api_o["warnings"] += 1
continue
if form.is_valid():
try:
metric = form.save()
if metric:
api_o["updated"].append(_format_tuple(a))
else:
api_o["ignored"].append(metric)
api_o["errors"] += 1
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
else:
api_o["errors"] += 1
api_o["ignored"].append(metric)
api_o["messages"].append(form.errors.get_json_data())
return wrap_api_v2(request, f)
# NOTE samstudio8 2021-05-25
# This endpoint was initially a workaround to support biosamples without metadata
# and allow downstream processes depending on them to be submitted without error,
# with the metadata to be filled in later. Today we extend this idea to optionally
# allow for a sender_sample_id to be pushed in to provide linkage to the four nations.
# This is a bit of a hack and with a little more time and energy I might've come up
# with a solution wherein the MajoraArtifact model itself has the ability to flag
# itself as a complete record or not. Nevertheless, we are not in the business to
# sit around and come up with elegant things and it just has to work instead.
# See https://github.com/COG-UK/dipi-group/issues/78
def addempty_biosample(request):
def f(request, api_o, json_data, user=None, partial=False):
biosamples = json_data.get("biosamples", [])
if not biosamples:
api_o["messages"].append("'biosamples' key missing or empty")
api_o["errors"] += 1
return
if not isinstance(biosamples, list):
api_o["errors"] += 1
api_o["messages"].append("'biosamples' appears malformed")
return
for sample_id in biosamples:
try:
sender_sample_id = None
if isinstance(sample_id, dict):
central_sample_id = sample_id["central_sample_id"]
sender_sample_id = sample_id.get("sender_sample_id")
elif isinstance(sample_id, str):
central_sample_id = sample_id
else:
raise Exception()
except:
api_o["warnings"] += 1
api_o["messages"].append("'biosamples' appears malformed")
continue
# Make dummy sample
biosample, created = models.BiosampleArtifact.objects.get_or_create(
central_sample_id=central_sample_id,
dice_name=central_sample_id,
)
if created:
TatlVerb(request=request.treq, verb="CREATE", content_object=biosample).save()
api_o["new"].append(_format_tuple(biosample))
else:
# TODO Ignored doesn't really mean ignored if you go and update sender_sample_id
# but at least users will know their force push did not create a new sample, or whatever
api_o["ignored"].append(_format_tuple(biosample))
api_o["warnings"] += 1
if not biosample.created:
sample_p = models.BiosourceSamplingProcess()
sample_p.save()
sampling_rec = models.BiosourceSamplingProcessRecord(
process=sample_p,
out_artifact=biosample,
)
sampling_rec.save()
biosample.created = sample_p # Set the sample collection process
biosample.save()
# Add the optional sender_sample_id regardless of whether this sample was just created,
# this is a sneaky --partial to help WSI out of a tight spot, that ONLY works on blanked samples (adm1 null)
# https://github.com/COG-UK/dipi-group/issues/78#issuecomment-856743169
if sender_sample_id and not biosample.created.collection_location_adm1:
if biosample.sender_sample_id != sender_sample_id:
biosample.sender_sample_id = sender_sample_id
biosample.save()
if not created:
changed_data_d = {
"changed_fields": ["sender_sample_id"],
"nulled_fields": [],
}
api_o["updated"].append(_format_tuple(biosample))
TatlVerb(request=request.treq, verb="UPDATE", content_object=biosample, extra_context=json.dumps(changed_data_d)).save()
return wrap_api_v2(request, f, permission="majora2.force_add_biosampleartifact", oauth_permission="majora2.force_add_biosampleartifact majora2.add_biosampleartifact majora2.change_biosampleartifact majora2.add_biosourcesamplingprocess majora2.change_biosourcesamplingprocess", oauth_only=True)
class MajoraEndpointView(View):
#TODO Abstract basic empty key checking to MEV
#TODO Abstract wrap_api_v2 here
#TODO Abstract tatl messages out of f to class
def update(self, request, *args, **kwargs):
#TODO Get objects to update to pass to f
#TODO Set self.partial
pass
def create(self, request, *args, **kwargs):
pass
def retrieve(self, request, *args, **kwargs):
pass
def post(self, request, *args, **kwargs):
api_tail = request.resolver_match.view_name.split('.')[-1]
if api_tail == "add":
return self.create(request, *args, **kwargs)
elif api_tail == "update":
return self.update(request, *args, **kwargs)
class BiosampleArtifactEndpointView(MajoraEndpointView):
def create(self, request, *args, **kwargs):
return wrap_api_v2(request, self.f, oauth_permission="majora2.add_biosampleartifact majora2.change_biosampleartifact majora2.add_biosamplesource majora2.change_biosamplesource majora2.add_biosourcesamplingprocess majora2.change_biosourcesamplingprocess")
def update(self, request, *args, **kwargs):
return wrap_api_v2(request, self.f, oauth_permission="majora2.change_biosampleartifact majora2.add_biosamplesource majora2.change_biosamplesource majora2.change_biosourcesamplingprocess", partial=True)
def f(self, request, api_o, json_data, user=None, partial=False):
biosamples = json_data.get("biosamples", {})
if not biosamples:
api_o["messages"].append("'biosamples' key missing or empty")
api_o["errors"] += 1
for biosample in biosamples:
try:
sample_id = biosample.get("central_sample_id")
initial = fixed_data.fill_fixed_data("api.artifact.biosample.add", user)
# Fetch objects for update (if applicable)
supp = None
sample_p = None
source = None
bs = models.BiosampleArtifact.objects.filter(central_sample_id=sample_id).first()
if bs:
if hasattr(bs, "created"):
sample_p = bs.created
if hasattr(bs.created, "coguk_supp"):
supp = bs.created.coguk_supp
if hasattr(bs, "primary_group"):
source = bs.primary_group
if partial:
if not bs:
api_o["errors"] += 1
api_o["ignored"].append(sample_id)
api_o["messages"].append("Cannot use `partial` on new BiosampleArtifact %s" % sample_id)
continue
if not sample_p or not sample_p.submission_user:
api_o["errors"] += 1
api_o["ignored"].append(sample_id)
api_o["messages"].append("Cannot use `partial` on empty BiosampleArtifact %s" % sample_id)
continue
# Pre screen the cog uk supplementary form
coguk_supp_form = forms.COGUK_BiosourceSamplingProcessSupplement_ModelForm(biosample, initial=initial, instance=supp, partial=partial)
if not coguk_supp_form.is_valid():
api_o["errors"] += 1
api_o["ignored"].append(sample_id)
api_o["messages"].append(coguk_supp_form.errors.get_json_data())
continue
# Pre screen the sample collection process form
sample_process_form = forms.BiosourceSamplingProcessModelForm(biosample, initial=initial, instance=sample_p, partial=partial)
if not sample_process_form.is_valid():
api_o["errors"] += 1
api_o["ignored"].append(sample_id)
api_o["messages"].append(sample_process_form.errors.get_json_data())
continue
# Handle new sample
sample_form = forms.BiosampleArtifactModelForm(biosample, initial=initial, instance=bs, partial=partial)
if not sample_form.is_valid():
api_o["errors"] += 1
api_o["ignored"].append(sample_id)
api_o["messages"].append(sample_form.errors.get_json_data())
continue
# Hit it
sample = sample_form.save(commit=False)
if not sample:
api_o["errors"] += 1
api_o["ignored"].append(sample_id)
continue
# Create (or fetch) the biosample source (host)
#TODO There is a form for this but it seems overkill for one field
source_created = None
biosample_source_id = biosample.get("biosample_source_id")
if biosample_source_id:
source, source_created = models.BiosampleSource.objects.get_or_create(
dice_name=biosample_source_id,
secondary_id=biosample_source_id,
source_type = initial.get("source_type"), # previously fetched from form
physical=True,
)
source.save()
# Create and save the sample collection process
sample_p = sample_process_form.save(commit=False)
if not sample_p.who:
submission_org = user.profile.institute if hasattr(user, "profile") and not user.profile.institute.code.startswith("?") else None
if submission_org:
sample_p.submitted_by = submission_org.name
sample_p.submission_org = submission_org
sample_p.who = user
sample_p.when = sample_p.collection_date if sample_p.collection_date else sample_p.received_date
sample_p.submission_user = user
sample_p.save()
# Update remaining sample fields
sample.dice_name = sample.central_sample_id
sample.primary_group = source
sample.save()
# Bind sample and sample collection process if sample_p is new
if not sample.created:
sample.created = sample_p
if sample_p.records.count() == 0:
sampling_rec = models.BiosourceSamplingProcessRecord(
process=sample_p,
in_group=sample.primary_group,
out_artifact=sample,
)
sampling_rec.save()
sample.save()
# Create and link the supplementary model data
coguk_supp = coguk_supp_form.save(commit=False)
coguk_supp.sampling = sample.created
coguk_supp.save()
# Hack to fix source if it has been changed at some point
#TODO This only works in the cog context where we can assume 1:1 between sample and collection
if source and sample.created:
for record in sample.created.records.all():
if record.in_group != source and record.out_artifact == sample:
record.in_group = source
record.save()
updated_metadata_l = handle_metadata(biosample.get("metadata", {}), 'artifact', sample.dice_name, user, api_o)
updated_metrics_l = handle_metrics(biosample.get("metrics", {}), 'artifact', sample, user, api_o) #TODO clean this as it duplicates the add_metric view
if not bs and sample:
# Created
if api_o:
api_o["new"].append(_format_tuple(sample))
TatlVerb(request=request.treq, verb="CREATE", content_object=sample).save()
else:
changed_data_d = forms.MajoraPossiblePartialModelForm.merge_changed_data(
coguk_supp_form, sample_process_form, sample_form
)
changed_data_d["changed_metadata"] = updated_metadata_l
changed_data_d["flashed_metrics"] = updated_metrics_l
if api_o:
api_o["updated"].append(_format_tuple(sample))
TatlVerb(request=request.treq, verb="UPDATE", content_object=sample, extra_context=json.dumps(changed_data_d)).save()
if source_created:
if api_o:
api_o["new"].append(_format_tuple(source))
TatlVerb(request=request.treq, verb="CREATE", content_object=source).save()
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
def add_library(request):
def f(request, api_o, json_data, user=None, partial=False):
library_name = json_data.get("library_name")
if not library_name:
api_o["messages"].append("'library_name' key missing or empty")
api_o["errors"] += 1
return
biosamples = json_data.get("biosamples", [])
if not biosamples:
api_o["messages"].append("'biosamples' key missing or empty")
api_o["errors"] += 1
return
if not isinstance(biosamples, list):
api_o["errors"] += 1
api_o["messages"].append("'biosamples' appears malformed")
return
library = None
try:
initial = fixed_data.fill_fixed_data("api.artifact.library.add", user)
form = forms.TestLibraryForm(json_data, initial=initial)
if form.is_valid():
form.cleaned_data.update(initial)
library, library_created = form_handlers.handle_testlibrary(form, user=user, api_o=api_o, request=request)
if not library:
api_o["ignored"].append(library_name)
api_o["errors"] += 1
else:
api_o["errors"] += 1
api_o["ignored"].append(library_name)
api_o["messages"].append(form.errors.get_json_data())
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
if not library:
return
try:
handle_metadata(json_data.get("metadata", {}), 'artifact', library_name, user, api_o)
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
# Check samples exist, and create them if the right flag has been set
sample_missing = False
sample_forced = False
for biosample in biosamples:
sample_id = biosample.get("central_sample_id")
if json_data.get("force_biosamples"):
# Make dummy sample
biosample, created = models.BiosampleArtifact.objects.get_or_create(
central_sample_id=sample_id,
dice_name=sample_id
)
if created:
TatlVerb(request=request.treq, verb="CREATE", content_object=biosample).save()
api_o["new"].append(_format_tuple(biosample))
api_o["warnings"] += 1
sample_forced = True
if not biosample.created:
sample_p = models.BiosourceSamplingProcess()
sample_p.save()
sampling_rec = models.BiosourceSamplingProcessRecord(
process=sample_p,
out_artifact=biosample,
)
sampling_rec.save()
biosample.created = sample_p # Set the sample collection process
biosample.save()
else:
if models.BiosampleArtifact.objects.filter(central_sample_id=sample_id).count() != 1:
api_o["ignored"].append(sample_id)
api_o["errors"] += 1
sample_missing = True
if sample_missing:
api_o["messages"].append("At least one Biosample in your Library was not registered. No samples have been added to this Library. Register the missing samples, or remove them from your request and try again.")
return
if sample_forced:
api_o["messages"].append("You forced the creation of at least one Biosample. This sample will be ignored by CLIMB pipelines and reports until its metadata has been registered.")
# Add samples to library
for biosample in biosamples:
try:
sample_id = biosample.get("central_sample_id")
initial = fixed_data.fill_fixed_data("api.processrecord.library.add", user)
biosample["library_name"] = library_name
form = forms.TestLibraryBiosampleForm(biosample, initial=initial)
if form.is_valid():
form.cleaned_data.update(initial)
record, record_created = form_handlers.handle_testlibraryrecord(form, user=user, api_o=api_o, request=request)
if not record:
api_o["ignored"].append(sample_id)
api_o["errors"] += 1
else:
api_o["errors"] += 1
api_o["ignored"].append(initial)
api_o["messages"].append(form.errors.get_json_data())
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
return wrap_api_v2(request, f, oauth_permission="majora2.add_biosampleartifact majora2.change_biosampleartifact majora2.add_libraryartifact majora2.change_libraryartifact majora2.add_librarypoolingprocess majora2.change_librarypoolingprocess")
def add_sequencing(request):
def f(request, api_o, json_data, user=None, partial=False):
library_name = json_data.get("library_name")
if not library_name:
api_o["messages"].append("'library_name' key missing or empty")
api_o["errors"] += 1
return
runs = json_data.get("runs", {})
if not runs:
api_o["messages"].append("'runs' key missing or empty")
api_o["errors"] += 1
return
# Try and get the library_name before the form does to provide a better
# error for users submitting data from the online workflow
try:
models.LibraryArtifact.objects.get(dice_name=library_name)
except:
api_o["messages"].append({"library_name": [{"message": "Could not add sequencing to Library %s as it does not exist. Check and fix errors in your library fields and resubmit." % library_name, "code": ""}]})
api_o["errors"] += 1
return
# Add sequencing runs to library
for run in runs:
try:
run = forms.TestSequencingForm.modify_preform(run)
initial = fixed_data.fill_fixed_data("api.process.sequencing.add", user)
run["library_name"] = library_name
run["run_group"] = json_data.get("run_group")
form = forms.TestSequencingForm(run, initial=initial)
if form.is_valid():
form.cleaned_data.update(initial)
sequencing, sequencing_created = form_handlers.handle_testsequencing(form, user=user, api_o=api_o, request=request)
else:
api_o["errors"] += 1
api_o["messages"].append(form.errors.get_json_data())
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
return wrap_api_v2(request, f, oauth_permission="majora2.change_libraryartifact majora2.add_dnasequencingprocess majora2.change_dnasequencingprocess")
def add_digitalresource(request):
def f(request, api_o, json_data, user=None, partial=False):
node_name = json_data.get("node_name")
if not node_name and user and hasattr(user, "profile"):
node_name = user.profile.institute.code
# Just add the node if it does not exist?
node, created = models.DigitalResourceNode.objects.get_or_create(
unique_name = node_name,
dice_name = node_name,
meta_name = node_name,
node_name = node_name,
)
json_data["node_name"] = node.dice_name
# Try to add file
try:
initial = fixed_data.fill_fixed_data("api.artifact.digitalresource.add", user)
form = forms.TestFileForm(json_data, initial=initial)
if form.is_valid():
form.cleaned_data.update(initial)
mfile, created = form_handlers.handle_testdigitalresource(form, user=user, api_o=api_o, request=request)
if not mfile:
api_o["ignored"].append(json_data.get("path"))
api_o["errors"] += 1
elif mfile:
handle_metadata(json_data.get("metadata", {}), 'artifact', mfile.id, user, api_o)
else:
api_o["errors"] += 1
api_o["messages"].append(form.errors.get_json_data())
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
return wrap_api_v2(request, f, oauth_permission="majora2.add_digitalresourceartifact majora2.change_digitalresourceartifact")
def add_tag(request):
def f(request, api_o, json_data, user=None, partial=False):
if json_data.get("artifact"):
handle_metadata(json_data.get("metadata", {}), 'artifact', json_data.get("artifact"), user, api_o)
elif json_data.get("group"):
handle_metadata(json_data.get("metadata", {}), 'group', json_data.get("group"), user, api_o)
elif json_data.get("process"):
handle_metadata(json_data.get("metadata", {}), 'process', json_data.get("process"), user, api_o)
return wrap_api_v2(request, f)
def add_pag_accession(request):
def f(request, api_o, json_data, user=None, partial=False):
pag_name = json_data.get("publish_group")
pag_contains = json_data.get("contains")
if not pag_name:
api_o["messages"].append("'publish_group' key missing or empty")
api_o["errors"] += 1
return
if pag_contains:
qs = models.PublishedArtifactGroup.objects.filter(published_name__contains=pag_name, is_latest=True, is_suppressed=False)
else:
qs = models.PublishedArtifactGroup.objects.filter(published_name=pag_name, is_latest=True, is_suppressed=False)
if qs.count() > 1:
api_o["messages"].append("%s does not uniquely identify a PAG in Majora" % pag_name)
api_o["errors"] += 1
return
pag = qs.first()
if not pag:
api_o["messages"].append("PAG %s not known to Majora" % pag_name)
api_o["errors"] += 1
return
if not json_data.get("service"):
api_o["messages"].append("'service' key missing or empty")
api_o["errors"] += 1
return
if not json_data.get("accession") and json_data.get("public"):
api_o["messages"].append("You are trying to mark this PAG as public, but the 'accession' key is missing or empty.")
api_o["errors"] += 1
return
accession, created = models.TemporaryAccessionRecord.objects.get_or_create(
pag = pag,
service = json_data.get("service"),
)
if accession:
accession.primary_accession = json_data.get("accession")
accession.secondary_accession = json_data.get("accession2")
accession.tertiary_accession = json_data.get("accession3")
accession.save()
if api_o:
api_o["updated"].append(_format_tuple(pag))
if not accession.requested_timestamp and json_data.get("submitted"):
accession.requested_timestamp = timezone.now()
accession.requested_by = user
accession.save()
if json_data.get("public") and not accession.is_public:
accession.is_public = True
accession.public_timestamp = timezone.now()
accession.save()
if json_data.get("public") and json_data.get("public_date"):
try:
accession.public_timestamp = datetime.datetime.strptime(json_data.get("public_date"), '%Y-%m-%d')
accession.save()
except:
api_o["warnings"] += 1
api_o["messages"].append("Failed to coerce --public-date %s to a date." % json_data.get("public_date"))
if json_data.get("public") and not pag.is_public:
pag.is_public = True
pag.public_timestamp = timezone.now()
pag.save()
api_o["messages"].append("PAG marked as public")
return wrap_api_v2(request, f, oauth_permission="majora2.add_temporaryaccessionrecord majora2.change_temporaryaccessionrecord")
def get_outbound_summary(request):
def f(request, api_o, json_data, user=None, partial=False):
from django.db.models import Count, F, Q
from dateutil.rrule import rrule, DAILY, WEEKLY, MO
service = json_data.get("service")
if not service or len(service) == 0:
api_o["errors"] += 1
api_o["messages"].append("'service' key missing or empty")
return
#status = json_data.get("status")
#if not status or len(status) == 0:
# api_o["errors"] += 1
# api_o["messages"].append("'status' key missing or empty")
# return
#
#statuses = ["public", "submitted", "rejected"]
#if status.lower() not in statuses:
# api_o["errors"] += 1
# api_o["messages"].append("'status' must be one of: %s" % str(statuses))
# return
gte_date = None
if json_data.get("gte_date"):
try:
gte_date = datetime.datetime.strptime(json_data.get("gte_date", ""), "%Y-%m-%d")
except:
api_o["errors"] += 1
api_o["messages"].append("Could not convert %s to date." % json_data.get("gte_date"))
return
accessions = models.TemporaryAccessionRecord.objects.filter(service=service)
api_o["get"] = {}
api_o["get"]["intervals"] = []
api_o["get"]["accessions"] = accessions.count()
if json_data.get("user"):
try:
p = models.Profile.objects.get(user__username=json_data.get("user"))
accessions = accessions.filter(requested_by=p.user)
except:
api_o["errors"] += 1
api_o["messages"].append("Could not find named user.")
return
#interval_ends = list(rrule(WEEKLY, wkst=MO, dtstart=gte_date, until=timezone.now().date(), byweekday=MO))
interval_ends = list(rrule(DAILY, wkst=MO, dtstart=gte_date, until=timezone.now().date()))
for i in range(len(interval_ends)):
submitted_accessions = accessions
rejected_accessions = accessions.filter(is_rejected=True)
published_accessions = accessions.filter(is_public=True)
dt = interval_ends[i].date()
last_dt = None
if i == 0:
# Everything before the date
submitted_accessions = submitted_accessions.filter(requested_timestamp__date__lte=dt)
rejected_accessions = rejected_accessions.filter(rejected_timestamp__date__lte=dt)
published_accessions = published_accessions.filter(public_timestamp__date__lte=dt)
else:
# Everything between the last date and current date
last_dt = interval_ends[i-1].date()
submitted_accessions = submitted_accessions.filter(requested_timestamp__date__lte=dt, requested_timestamp__date__gt=last_dt)
rejected_accessions = rejected_accessions.filter(rejected_timestamp__date__lte=dt, rejected_timestamp__date__gt=last_dt)
published_accessions = published_accessions.filter(public_timestamp__date__lte=dt, public_timestamp__date__gt=last_dt)
api_o["get"]["intervals"].append({
"whole": True,
"dt": dt.strftime("%Y-%m-%d"),
"last_dt": last_dt.strftime("%Y-%m-%d") if last_dt else '',
"submitted": submitted_accessions.count(),
"rejected": rejected_accessions.count(),
"released": published_accessions.count(),
})
# Tack on a final timestamp if the last time interval is not today
if interval_ends[-1].date() != timezone.now().date():
last_dt = interval_ends[-1].date() + datetime.timedelta(days=1)
submitted_accessions = accessions
rejected_accessions = accessions.filter(is_rejected=True)
published_accessions = accessions.filter(is_public=True)
submitted_accessions = submitted_accessions.filter(requested_timestamp__date__gt=last_dt)
rejected_accessions = rejected_accessions.filter(rejected_timestamp__date__gt=last_dt)
published_accessions = published_accessions.filter(public_timestamp__date__gt=last_dt)
api_o["get"]["intervals"].append({
"whole": False,
"dt": timezone.now().date().strftime("%Y-%m-%d"),
"last_dt": last_dt.strftime("%Y-%m-%d") if last_dt else '',
"submitted": submitted_accessions.count(),
"rejected": rejected_accessions.count(),
"released": published_accessions.count(),
})
return wrap_api_v2(request, f)
def get_dashboard_metrics(request):
def f(request, api_o, json_data, user=None, partial=False):
from django.db.models import Count, F, Q, ExpressionWrapper, BooleanField, Subquery
gte_date=None
the_pags = models.PAGQualityReportEquivalenceGroup.objects.filter(test_group__slug="cog-uk-elan-minimal-qc", pag__is_latest=True, pag__is_suppressed=False)
try:
gte_date = datetime.datetime.strptime(json_data.get("gte_date", ""), "%Y-%m-%d")
the_pags = the_pags.filter(last_updated__gt=gte_date)
except:
pass
all_pags = {}
for pag in the_pags.values(
site=F('pag__owner__profile__institute__code'),
sourcesite=F('pag__tagged_artifacts__biosampleartifact__created__who__profile__institute__code'),
is_surveillance=ExpressionWrapper(F('pag__tagged_artifacts__biosampleartifact__created__biosourcesamplingprocess__coguk_supp__is_surveillance'), output_field=BooleanField()),
) \
.exclude(sourcesite__isnull=True) \
.annotate(
count=Count('pk'),
failc=Count('pk', filter=Q(is_pass=False)),
passc=Count('pk', filter=Q(is_pass=True)),
surveillance_num=Count('pk', filter=Q(is_surveillance=True)),
surveillance_dom=Count('pk', filter=Q(is_surveillance__isnull=False)),
):
if (pag["sourcesite"], pag["site"]) not in all_pags:
all_pags[(pag["sourcesite"], pag["site"])] = {
'site': pag['site'],
'sourcesite': pag['sourcesite'],
'count': 0,
'surveillance_num': 0,
'surveillance_dom': 0,
'pass_count': 0,
'fail_count': 0,
}
all_pags[(pag["sourcesite"], pag["site"])]["count"] += pag["count"]
all_pags[(pag["sourcesite"], pag["site"])]["pass_count"] += pag["passc"]
all_pags[(pag["sourcesite"], pag["site"])]["fail_count"] += pag["failc"]
all_pags[(pag["sourcesite"], pag["site"])]["surveillance_num"] += pag["surveillance_num"]
all_pags[(pag["sourcesite"], pag["site"])]["surveillance_dom"] += pag["surveillance_dom"]
api_o["get"] = {
"total_sequences": models.PublishedArtifactGroup.objects.filter(is_latest=True, is_suppressed=False).count(),
"site_qc": sorted(all_pags.values(), key=lambda x: x.get('count'), reverse=True),
}
return wrap_api_v2(request, f)
def get_pag_by_qc_celery(request):
def f(request, api_o, json_data, user=None, partial=False):
test_name = json_data.get("test_name")
dra_current_kind = json_data.get("dra_current_kind")
if not test_name or len(test_name) == 0:
api_o["messages"].append("'test_name', key missing or empty")
api_o["errors"] += 1
return
t_group = models.PAGQualityTestEquivalenceGroup.objects.filter(slug=test_name).first()
if not t_group:
api_o["messages"].append("Invalid 'test_name'")
api_o["ignored"].append(test_name)
api_o["errors"] += 1
return
from . import tasks
basic_task = True
get_mode = json_data.get("mode")
if get_mode and len(get_mode) > 0:
if get_mode.lower() == "ena-assembly":
pass
elif get_mode.lower() == "pagfiles":
celery_task = tasks.task_get_pagfiles.delay(None, api_o, json_data, user=user.pk, response_uuid=api_o["request"])
basic_task = False
if basic_task:
celery_task = tasks.task_get_pag_v2.delay(None, api_o, json_data, user=user.pk, response_uuid=api_o["request"])
if celery_task:
api_o["tasks"].append(celery_task.id)
api_o["messages"].append("Call api.majora.task.get with the appropriate task ID later...")
else:
api_o["errors"] += 1
api_o["messages"].append("Could not add requested task to Celery...")
return wrap_api_v2(request, f, permission="majora2.temp_can_read_pags_via_api")
def stream_task_result(request):
def f(request, api_o, json_data, user=None, partial=False):
import requests, boto3
from botocore.exceptions import ClientError
task_id = json_data.get("task_id")
if not task_id:
api_o["messages"].append("'task_id' key missing or empty")
api_o["errors"] += 1
return
from mylims.celery import app
res = app.AsyncResult(task_id)
if res.state == "SUCCESS":
try:
s3 = boto3.client('s3',
aws_access_key_id=settings.CELERY_S3_ACCESS_KEY_ID,
aws_secret_access_key=settings.CELERY_S3_SECRET_ACCESS_KEY,
endpoint_url=settings.CELERY_S3_ENDPOINT_URL,
region_name=None,
)
purl = s3.generate_presigned_url('get_object',
Params={
'Bucket': settings.CELERY_S3_BUCKET,
'Key': app.backend.get_key_for_task(res.id).decode("utf-8"),
},
ExpiresIn=10,
)
r = requests.get(url=purl, stream=True)
return StreamingHttpResponse(r.raw, content_type="application/json")
except ClientError as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
else:
api_o["warnings"] += 1
api_o["messages"].append("Task is not (yet) SUCCESS...")
api_o["task"] = {
"id": task_id,
"state": res.state,
}
return wrap_api_v2(request, f, stream=True)
def get_task_result(request):
def f(request, api_o, json_data, user=None, partial=False):
task_id = json_data.get("task_id")
if not task_id:
api_o["messages"].append("'task_id' key missing or empty")
api_o["errors"] += 1
return
from mylims.celery import app
res = app.AsyncResult(task_id)
state = res.state
cleaned = False
if state == "SUCCESS":
try:
api_o.update(res.get())
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
try:
# Delete task result from backend
res.forget()
cleaned = True
except Exception as e:
api_o["warnings"] += 1
api_o["messages"].append(str(e))
else:
api_o["warnings"] += 1
api_o["messages"].append("Task is not (yet) SUCCESS...")
api_o["task"] = {
"id": task_id,
"state": state,
"cleaned": cleaned,
}
return wrap_api_v2(request, f)
def del_task_result(request):
def f(request, api_o, json_data, user=None, partial=False):
task_id = json_data.get("task_id")
if not task_id:
api_o["messages"].append("'task_id' key missing or empty")
api_o["errors"] += 1
return
from mylims.celery import app
res = app.AsyncResult(task_id)
was_deleted = False
if res.state == "SUCCESS":
try:
# Not sure wtf is going on here but the current version of celery s3 seems to bug out that k is bytes
k = app.backend.get_key_for_task(res.id).decode("utf-8")
app.backend.delete(k)
api_o["deleted"] = k
was_deleted = True
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
else:
api_o["warnings"] += 1
api_o["messages"].append("Task is not (yet) SUCCESS...")
api_o["task"] = {
"id": task_id,
"state": res.state,
"deleted": was_deleted,
}
return wrap_api_v2(request, f)
def get_mag(request):
def f(request, api_o, json_data, user=None, partial=False):
path = json_data.get("path")
sep = json_data.get("sep")
if not path or len(path) == 0 or "://" not in path:
api_o["messages"].append("'path' key missing, empty or malformed")
api_o["errors"] += 1
return
node_name, path = path.split("://")
mag = util.get_mag(node_name, path, by_hard_path=True)
if not mag:
api_o["messages"].append("Invalid path.")
api_o["errors"] += 1
return
if (mag.children.count() > 100 or mag.groups.count() > 100 or mag.out_glinks.count() > 100) and not json_data.get("force"):
api_o["messages"].append("This MAG contains more than 100 groups or artifacts.")
api_o["error_code"] = "BIGMAG:%d" % max(mag.children.count(), mag.groups.count(), mag.out_glinks.count())
api_o["errors"] += 1
return
from .serializers import MAGSerializer
api_o["mag"] = MAGSerializer(mag).data
return wrap_api_v2(request, f)
def suppress_pag(request):
def f(request, api_o, json_data, user=None, partial=False):
pag_names = json_data.get("publish_group")
reason = json_data.get("reason")
if (not pag_names) or (not reason):
api_o["messages"].append("'publish_group' or 'reason' key missing or empty")
api_o["errors"] += 1
return
if len(pag_names)==0 or len(reason)==0:
api_o["messages"].append("'publish_group' or 'reason' key missing or empty")
api_o["errors"] += 1
return
valid_reasons = ["WRONG_BARCODE", "WRONG_METADATA", "WRONG_SEQUENCE", "CONTROL_FAIL"]
if reason.upper() not in valid_reasons:
api_o["messages"].append("Reason must be one of: %s" % str(valid_reasons))
api_o["errors"] += 1
return
if type(pag_names) == str:
pag_names = [pag_names]
for pag_name in pag_names:
pag = models.PublishedArtifactGroup.objects.filter(is_latest=True, published_name=pag_name).first() # There can be only one
if not pag:
api_o["ignored"].append(pag_name)
api_o["warnings"] += 1
api_o["messages"].append("%s not found" % pag_name)
continue
if pag.is_suppressed:
api_o["ignored"].append(pag_name)
api_o["warnings"] += 1
api_o["messages"].append("%s already suppressed" % pag_name)
continue
if pag.owner.profile.institute != user.profile.institute:
api_o["ignored"].append(pag_name)
api_o["errors"] += 1
api_o["messages"].append("Your organisation (%s) does not own %s (%s)" % (user.profile.institute.code, pag_name, pag.owner.profile.institute.code))
continue
pag.is_suppressed = True
pag.suppressed_date = timezone.now()
pag.suppressed_reason = reason.upper()
pag.save()
api_o["updated"].append(_format_tuple(pag))
TatlVerb(request=request.treq, verb="SUPPRESS", content_object=pag).save()
return wrap_api_v2(request, f, permission="majora2.can_suppress_pags_via_api", oauth_permission="majora2.can_suppress_pags_via_api")
def v0_get_artifact_info(request):
def f(request, api_o, json_data, user=None, partial=False):
query = None
artifact = None
query = request.GET.get("q", '')
if not query or len(query) == 0:
api_o["messages"].append("'q' GET param missing or empty")
api_o["errors"] += 1
return
api_o["info"] = {}
try:
artifact = models.MajoraArtifact.objects.get(id=query)
except Exception:
pass
if not artifact:
try:
artifact = models.MajoraArtifact.objects.get(dice_name=query)
except Exception:
pass
# TODO Unify Artifact interface
if not artifact:
try:
node_name, path = query.split("://")
path = "/%s" % path # ???
mag = util.get_mag(node_name, path, artifact=True, by_hard_path=False, prefetch=False) # not using hard path yet
if mag:
artifact = models.DigitalResourceArtifact.objects.get(primary_group=mag, current_path=path)
except Exception:
pass
if not artifact:
try:
artifact_fuzz = models.MajoraArtifact.objects.filter(dice_name__contains=query)
except Exception:
pass
if artifact_fuzz.count() == 1:
artifact = artifact_fuzz[0]
if not artifact:
api_o["errors"] += 1
api_o["messages"].append("No artifact for query.")
return
try:
api_o["info"] = artifact.info
except Exception as e:
api_o["errors"] += 1
api_o["messages"].append(str(e))
return wrap_api_v2(request, f, oauth_permission="majora2.view_majoraartifact_info", get=True)
#TODO False permission to disable v2
|
1638257
|
import numpy as np
from .base import Prior, PriorException
from .interpolated import Interped
from .analytical import DeltaFunction, PowerLaw, Uniform, LogUniform, \
SymmetricLogUniform, Cosine, Sine, Gaussian, TruncatedGaussian, HalfGaussian, \
LogNormal, Exponential, StudentT, Beta, Logistic, Cauchy, Gamma, ChiSquared, FermiDirac
from ..utils import infer_args_from_method, infer_parameters_from_function
def conditional_prior_factory(prior_class):
class ConditionalPrior(prior_class):
def __init__(self, condition_func, name=None, latex_label=None, unit=None,
boundary=None, **reference_params):
"""
Parameters
==========
condition_func: func
Functional form of the condition for this prior. The first function argument
has to be a dictionary for the `reference_params` (see below). The following
arguments are the required variables that are required before we can draw this
prior.
It needs to return a dictionary with the modified values for the
`reference_params` that are being used in the next draw.
For example if we have a Uniform prior for `x` depending on a different variable `y`
`p(x|y)` with the boundaries linearly depending on y, then this
could have the following form:
.. code-block:: python
def condition_func(reference_params, y):
return dict(
minimum=reference_params['minimum'] + y,
maximum=reference_params['maximum'] + y
)
name: str, optional
See superclass
latex_label: str, optional
See superclass
unit: str, optional
See superclass
boundary: str, optional
See superclass
reference_params:
Initial values for attributes such as `minimum`, `maximum`.
This differs on the `prior_class`, for example for the Gaussian
prior this is `mu` and `sigma`.
"""
if 'boundary' in infer_args_from_method(super(ConditionalPrior, self).__init__):
super(ConditionalPrior, self).__init__(name=name, latex_label=latex_label,
unit=unit, boundary=boundary, **reference_params)
else:
super(ConditionalPrior, self).__init__(name=name, latex_label=latex_label,
unit=unit, **reference_params)
self._required_variables = None
self.condition_func = condition_func
self._reference_params = reference_params
self.__class__.__name__ = 'Conditional{}'.format(prior_class.__name__)
self.__class__.__qualname__ = 'Conditional{}'.format(prior_class.__qualname__)
def sample(self, size=None, **required_variables):
"""Draw a sample from the prior
Parameters
==========
size: int or tuple of ints, optional
See superclass
required_variables:
Any required variables that this prior depends on
Returns
=======
float: See superclass
"""
self.least_recently_sampled = self.rescale(np.random.uniform(0, 1, size), **required_variables)
return self.least_recently_sampled
def rescale(self, val, **required_variables):
"""
'Rescale' a sample from the unit line element to the prior.
Parameters
==========
val: Union[float, int, array_like]
See superclass
required_variables:
Any required variables that this prior depends on
"""
self.update_conditions(**required_variables)
return super(ConditionalPrior, self).rescale(val)
def prob(self, val, **required_variables):
"""Return the prior probability of val.
Parameters
==========
val: Union[float, int, array_like]
See superclass
required_variables:
Any required variables that this prior depends on
Returns
=======
float: Prior probability of val
"""
self.update_conditions(**required_variables)
return super(ConditionalPrior, self).prob(val)
def ln_prob(self, val, **required_variables):
"""Return the natural log prior probability of val.
Parameters
==========
val: Union[float, int, array_like]
See superclass
required_variables:
Any required variables that this prior depends on
Returns
=======
float: Natural log prior probability of val
"""
self.update_conditions(**required_variables)
return super(ConditionalPrior, self).ln_prob(val)
def cdf(self, val, **required_variables):
"""Return the cdf of val.
Parameters
==========
val: Union[float, int, array_like]
See superclass
required_variables:
Any required variables that this prior depends on
Returns
=======
float: CDF of val
"""
self.update_conditions(**required_variables)
return super(ConditionalPrior, self).cdf(val)
def update_conditions(self, **required_variables):
"""
This method updates the conditional parameters (depending on the parent class
this could be e.g. `minimum`, `maximum`, `mu`, `sigma`, etc.) of this prior
class depending on the required variables it depends on.
If no variables are given, the most recently used conditional parameters are kept
Parameters
==========
required_variables:
Any required variables that this prior depends on. If none are given,
self.reference_params will be used.
"""
if sorted(list(required_variables)) == sorted(self.required_variables):
parameters = self.condition_func(self.reference_params.copy(), **required_variables)
for key, value in parameters.items():
setattr(self, key, value)
elif len(required_variables) == 0:
return
else:
raise IllegalRequiredVariablesException("Expected kwargs for {}. Got kwargs for {} instead."
.format(self.required_variables,
list(required_variables.keys())))
@property
def reference_params(self):
"""
Initial values for attributes such as `minimum`, `maximum`.
This depends on the `prior_class`, for example for the Gaussian
prior this is `mu` and `sigma`. This is read-only.
"""
return self._reference_params
@property
def condition_func(self):
return self._condition_func
@condition_func.setter
def condition_func(self, condition_func):
if condition_func is None:
self._condition_func = lambda reference_params: reference_params
else:
self._condition_func = condition_func
self._required_variables = infer_parameters_from_function(self.condition_func)
@property
def required_variables(self):
""" The required variables to pass into the condition function. """
return self._required_variables
def get_instantiation_dict(self):
instantiation_dict = super(ConditionalPrior, self).get_instantiation_dict()
for key, value in self.reference_params.items():
instantiation_dict[key] = value
return instantiation_dict
def reset_to_reference_parameters(self):
"""
Reset the object attributes to match the original reference parameters
"""
for key, value in self.reference_params.items():
setattr(self, key, value)
def __repr__(self):
"""Overrides the special method __repr__.
Returns a representation of this instance that resembles how it is instantiated.
Works correctly for all child classes
Returns
=======
str: A string representation of this instance
"""
prior_name = self.__class__.__name__
instantiation_dict = self.get_instantiation_dict()
instantiation_dict["condition_func"] = ".".join([
instantiation_dict["condition_func"].__module__,
instantiation_dict["condition_func"].__name__
])
args = ', '.join(['{}={}'.format(key, repr(instantiation_dict[key]))
for key in instantiation_dict])
return "{}({})".format(prior_name, args)
return ConditionalPrior
class ConditionalBasePrior(conditional_prior_factory(Prior)):
pass
class ConditionalUniform(conditional_prior_factory(Uniform)):
pass
class ConditionalDeltaFunction(conditional_prior_factory(DeltaFunction)):
pass
class ConditionalPowerLaw(conditional_prior_factory(PowerLaw)):
pass
class ConditionalGaussian(conditional_prior_factory(Gaussian)):
pass
class ConditionalLogUniform(conditional_prior_factory(LogUniform)):
pass
class ConditionalSymmetricLogUniform(conditional_prior_factory(SymmetricLogUniform)):
pass
class ConditionalCosine(conditional_prior_factory(Cosine)):
pass
class ConditionalSine(conditional_prior_factory(Sine)):
pass
class ConditionalTruncatedGaussian(conditional_prior_factory(TruncatedGaussian)):
pass
class ConditionalHalfGaussian(conditional_prior_factory(HalfGaussian)):
pass
class ConditionalLogNormal(conditional_prior_factory(LogNormal)):
pass
class ConditionalExponential(conditional_prior_factory(Exponential)):
pass
class ConditionalStudentT(conditional_prior_factory(StudentT)):
pass
class ConditionalBeta(conditional_prior_factory(Beta)):
pass
class ConditionalLogistic(conditional_prior_factory(Logistic)):
pass
class ConditionalCauchy(conditional_prior_factory(Cauchy)):
pass
class ConditionalGamma(conditional_prior_factory(Gamma)):
pass
class ConditionalChiSquared(conditional_prior_factory(ChiSquared)):
pass
class ConditionalFermiDirac(conditional_prior_factory(FermiDirac)):
pass
class ConditionalInterped(conditional_prior_factory(Interped)):
pass
class DirichletElement(ConditionalBeta):
r"""
Single element in a dirichlet distribution
The probability scales as
.. math::
p(x_n) \propto (x_\max - x_n)^{(N - n - 2)}
for :math:`x_n < x_\max`, where :math:`x_\max` is the sum of :math:`x_i`
for :math:`i < n`
Examples
========
n_dimensions = 1:
.. math::
p(x_0) \propto 1 ; 0 < x_0 < 1
n_dimensions = 2:
.. math::
p(x_0) &\propto (1 - x_0) ; 0 < x_0 < 1
p(x_1) &\propto 1 ; 0 < x_1 < 1
Parameters
==========
order: int
Order of this element of the dirichlet distribution.
n_dimensions: int
Total number of elements of the dirichlet distribution
label: str
Label for the dirichlet distribution.
This should be the same for all elements.
"""
def __init__(self, order, n_dimensions, label):
""" """
super(DirichletElement, self).__init__(
minimum=0, maximum=1, alpha=1, beta=n_dimensions - order - 1,
name=label + str(order),
condition_func=self.dirichlet_condition
)
self.label = label
self.n_dimensions = n_dimensions
self.order = order
self._required_variables = [
label + str(ii) for ii in range(order)
]
self.__class__.__name__ = 'Dirichlet'
def dirichlet_condition(self, reference_parms, **kwargs):
remaining = 1 - sum(
[kwargs[self.label + str(ii)] for ii in range(self.order)]
)
return dict(minimum=reference_parms["minimum"], maximum=remaining)
def __repr__(self):
return Prior.__repr__(self)
def get_instantiation_dict(self):
return Prior.get_instantiation_dict(self)
class ConditionalPriorException(PriorException):
""" General base class for all conditional prior exceptions """
class IllegalRequiredVariablesException(ConditionalPriorException):
""" Exception class for exceptions relating to handling the required variables. """
|
1638274
|
import numpy as np
import torch as th
from torchvision import transforms
from .data_utils import is_tuple_or_list
class BaseDataset:
"""An abstract class representing a Dataset.
All other datasets should subclass it. All subclasses should override
``__len__``, that provides the size of the dataset, and ``__getitem__``,
supporting integer indexing in range from 0 to len(self) exclusive.
"""
def __len__(self):
return len(self.inputs) if not isinstance(self.inputs, (tuple,list)) else len(self.inputs[0])
def add_input_transform(self, transform, add_to_front=True, idx=None):
if idx is None:
idx = np.arange(len(self.num_inputs))
elif not is_tuple_or_list(idx):
idx = [idx]
if add_to_front:
for i in idx:
self.input_transform[i] = transforms.Compose([transform, self.input_transform[i]])
else:
for i in idx:
self.input_transform[i] = transforms.Compose([self.input_transform[i], transform])
def add_target_transform(self, transform, add_to_front=True, idx=None):
if idx is None:
idx = np.arange(len(self.num_targets))
elif not is_tuple_or_list(idx):
idx = [idx]
if add_to_front:
for i in idx:
self.target_transform[i] = transforms.Compose([transform, self.target_transform[i]])
else:
for i in idx:
self.target_transform[i] = transforms.Compose([self.target_transform[i], transform])
def add_co_transform(self, transform, add_to_front=True, idx=None):
if idx is None:
idx = np.arange(len(self.min_inputs_or_targets))
elif not is_tuple_or_list(idx):
idx = [idx]
if add_to_front:
for i in idx:
self.co_transform[i] = transforms.Compose([transform, self.co_transform[i]])
else:
for i in idx:
self.co_transform[i] = transforms.Compose([self.co_transform[i], transform])
def load(self, num_samples=None, load_range=None):
"""
Load all data or a subset of the data into actual memory.
For instance, if the inputs are paths to image files, then this
function will actually load those images.
:param num_samples: (int (optional)):
number of samples to load. if None, will load all
:param load_range: (numpy array of integers (optional)):
the index range of images to load
e.g. np.arange(4) loads the first 4 inputs+targets
"""
def _parse_shape(x):
if isinstance(x, (list,tuple)):
return (len(x),)
elif isinstance(x, th.Tensor):
return x.size()
else:
return (1,)
if num_samples is None and load_range is None:
num_samples = len(self)
load_range = np.arange(num_samples)
elif num_samples is None and load_range is not None:
num_samples = len(load_range)
elif num_samples is not None and load_range is None:
load_range = np.arange(num_samples)
if self.has_target:
for enum_idx, sample_idx in enumerate(load_range):
input_sample, target_sample = self.__getitem__(sample_idx)
if enum_idx == 0:
if self.num_inputs == 1:
_shape = [len(load_range)] + list(_parse_shape(input_sample))
inputs = np.empty(_shape)
else:
inputs = []
for i in range(self.num_inputs):
_shape = [len(load_range)] + list(_parse_shape(input_sample[i]))
inputs.append(np.empty(_shape))
#inputs = [np.empty((len(load_range), *_parse_shape(input_sample[i]))) for i in range(self.num_inputs)]
if self.num_targets == 1:
_shape = [len(load_range)] + list(_parse_shape(target_sample))
targets = np.empty(_shape)
#targets = np.empty((len(load_range), *_parse_shape(target_sample)))
else:
targets = []
for i in range(self.num_targets):
_shape = [len(load_range)] + list(_parse_shape(target_sample[i]))
targets.append(np.empty(_shape))
#targets = [np.empty((len(load_range), *_parse_shape(target_sample[i]))) for i in range(self.num_targets)]
if self.num_inputs == 1:
inputs[enum_idx] = input_sample
else:
for i in range(self.num_inputs):
inputs[i][enum_idx] = input_sample[i]
if self.num_targets == 1:
targets[enum_idx] = target_sample
else:
for i in range(self.num_targets):
targets[i][enum_idx] = target_sample[i]
return inputs, targets
else:
for enum_idx, sample_idx in enumerate(load_range):
input_sample = self.__getitem__(sample_idx)
if enum_idx == 0:
if self.num_inputs == 1:
_shape = [len(load_range)] + list(_parse_shape(input_sample))
inputs = np.empty(_shape)
#inputs = np.empty((len(load_range), *_parse_shape(input_sample)))
else:
inputs = []
for i in range(self.num_inputs):
_shape = [len(load_range)] + list(_parse_shape(input_sample[i]))
inputs.append(np.empty(_shape))
#inputs = [np.empty((len(load_range), *_parse_shape(input_sample[i]))) for i in range(self.num_inputs)]
if self.num_inputs == 1:
inputs[enum_idx] = input_sample
else:
for i in range(self.num_inputs):
inputs[i][enum_idx] = input_sample[i]
return inputs
def fit_transforms(self):
"""
Make a single pass through the entire dataset in order to fit
any parameters of the transforms which require the entire dataset.
e.g. StandardScaler() requires mean and std for the entire dataset.
If you dont call this fit function, then transforms which require properties
of the entire dataset will just work at the batch level.
e.g. StandardScaler() will normalize each batch by the specific batch mean/std
"""
it_fit = hasattr(self.input_transform, 'update_fit')
tt_fit = hasattr(self.target_transform, 'update_fit')
ct_fit = hasattr(self.co_transform, 'update_fit')
if it_fit or tt_fit or ct_fit:
for sample_idx in range(len(self)):
if hasattr(self, 'input_loader'):
x = self.input_loader(self.inputs[sample_idx])
else:
x = self.inputs[sample_idx]
if it_fit:
self.input_transform.update_fit(x)
if self.has_target:
if hasattr(self, 'target_loader'):
y = self.target_loader(self.targets[sample_idx])
else:
y = self.targets[sample_idx]
if tt_fit:
self.target_transform.update_fit(y)
if ct_fit:
self.co_transform.update_fit(x,y)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.