code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from setuptools import setup, find_packages
import os
import deloqv
setup(name = 'deloqv',
version = '0.1.1',
url='https://github.com/ELKHMISSI/Project.git',
author = '<NAME>, NIASSE, FONTANA',
author_email = '<EMAIL>',
maintainer = '<NAME>, NIASSE, FONTANA',
maintainer_email = '<EMAIL>',
keywords = 'Prices distances south France highways',
packages = ['deloqv'],
description = 'voir presentation',
license = 'MIT',
platforms = 'ALL',
)
|
[
"setuptools.setup"
] |
[((69, 439), 'setuptools.setup', 'setup', ([], {'name': '"""deloqv"""', 'version': '"""0.1.1"""', 'url': '"""https://github.com/ELKHMISSI/Project.git"""', 'author': '"""<NAME>, NIASSE, FONTANA"""', 'author_email': '"""<EMAIL>"""', 'maintainer': '"""<NAME>, NIASSE, FONTANA"""', 'maintainer_email': '"""<EMAIL>"""', 'keywords': '"""Prices distances south France highways"""', 'packages': "['deloqv']", 'description': '"""voir presentation"""', 'license': '"""MIT"""', 'platforms': '"""ALL"""'}), "(name='deloqv', version='0.1.1', url=\n 'https://github.com/ELKHMISSI/Project.git', author=\n '<NAME>, NIASSE, FONTANA', author_email='<EMAIL>', maintainer=\n '<NAME>, NIASSE, FONTANA', maintainer_email='<EMAIL>', keywords=\n 'Prices distances south France highways', packages=['deloqv'],\n description='voir presentation', license='MIT', platforms='ALL')\n", (74, 439), False, 'from setuptools import setup, find_packages\n')]
|
# SPDX-FileCopyrightText: 2020 The Magma Authors.
# SPDX-FileCopyrightText: 2022 Open Networking Foundation <<EMAIL>>
#
# SPDX-License-Identifier: BSD-3-Clause
import json
from unittest import TestCase
from jsonschema import ValidationError
from magma.eventd.event_validator import EventValidator
class EventValidationTests(TestCase):
def setUp(self):
# A test event registry that specifies the test events
test_events_location = {
'module': 'orc8r',
'filename': 'test_event_definitions.yml',
}
config = {
'fluent_bit_port': '',
'tcp_timeout': '',
'event_registry': {
'simple_event': test_events_location,
'array_and_object_event': test_events_location,
'null_event': test_events_location,
},
}
self.validator = EventValidator(config)
def test_event_registration(self):
data = json.dumps({
'foo': 'magma', # required
'bar': 123,
})
# Errors when event is not registered
with self.assertRaises(Exception):
self.validator.validate_event(data, 'non_existent_event')
# Does not error when event is registered
self.validator.validate_event(data, 'simple_event')
def test_field_consistency(self):
# Errors when there are missing fields (required fields)
with self.assertRaises(ValidationError):
# foo is missing
data = json.dumps({
'bar': 123,
})
self.validator.validate_event(data, 'simple_event')
# Errors on excess fields (additionalProperties set to false)
with self.assertRaises(ValidationError):
data = json.dumps({
'extra_field': 12,
'foo': 'asdf',
'bar': 123,
})
self.validator.validate_event(data, 'simple_event')
# Errors when there are missing AND excess fields
with self.assertRaises(ValidationError):
data = json.dumps({
'extra_field': 12,
'bar': 123,
})
# foo is missing
self.validator.validate_event(data, 'simple_event')
# Does not error when the fields are equivalent
data = json.dumps({
'foo': 'magma', # required
'bar': 123,
})
self.validator.validate_event(data, 'simple_event')
# Does not error when event has no fields
self.validator.validate_event(json.dumps({}), 'null_event')
def test_type_checking(self):
data = json.dumps({
'an_array': ["a", "b"],
'an_object': {
"a_key": 1,
"b_key": 1,
},
})
# Does not error when the types match
self.validator.validate_event(data, 'array_and_object_event')
# Errors when the type is wrong for primitive fields
with self.assertRaises(ValidationError):
data = json.dumps({
'foo': 123,
'bar': 'asdf',
})
self.validator.validate_event(data, 'simple_event')
# Errors when the type is wrong for array
with self.assertRaises(ValidationError):
data = json.dumps({
'an_array': [1, 2, 3],
'an_object': {},
})
self.validator.validate_event(data, 'array_and_object_event')
# Errors when the value type is wrong for object
with self.assertRaises(ValidationError):
data = json.dumps({
'an_array': ["a", "b"],
'an_object': {
"a_key": "wrong_value",
},
})
self.validator.validate_event(data, 'array_and_object_event')
|
[
"json.dumps",
"magma.eventd.event_validator.EventValidator"
] |
[((889, 911), 'magma.eventd.event_validator.EventValidator', 'EventValidator', (['config'], {}), '(config)\n', (903, 911), False, 'from magma.eventd.event_validator import EventValidator\n'), ((967, 1007), 'json.dumps', 'json.dumps', (["{'foo': 'magma', 'bar': 123}"], {}), "({'foo': 'magma', 'bar': 123})\n", (977, 1007), False, 'import json\n'), ((2354, 2394), 'json.dumps', 'json.dumps', (["{'foo': 'magma', 'bar': 123}"], {}), "({'foo': 'magma', 'bar': 123})\n", (2364, 2394), False, 'import json\n'), ((2671, 2746), 'json.dumps', 'json.dumps', (["{'an_array': ['a', 'b'], 'an_object': {'a_key': 1, 'b_key': 1}}"], {}), "({'an_array': ['a', 'b'], 'an_object': {'a_key': 1, 'b_key': 1}})\n", (2681, 2746), False, 'import json\n'), ((1526, 1550), 'json.dumps', 'json.dumps', (["{'bar': 123}"], {}), "({'bar': 123})\n", (1536, 1550), False, 'import json\n'), ((1785, 1843), 'json.dumps', 'json.dumps', (["{'extra_field': 12, 'foo': 'asdf', 'bar': 123}"], {}), "({'extra_field': 12, 'foo': 'asdf', 'bar': 123})\n", (1795, 1843), False, 'import json\n'), ((2098, 2141), 'json.dumps', 'json.dumps', (["{'extra_field': 12, 'bar': 123}"], {}), "({'extra_field': 12, 'bar': 123})\n", (2108, 2141), False, 'import json\n'), ((2591, 2605), 'json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (2601, 2605), False, 'import json\n'), ((3075, 3114), 'json.dumps', 'json.dumps', (["{'foo': 123, 'bar': 'asdf'}"], {}), "({'foo': 123, 'bar': 'asdf'})\n", (3085, 3114), False, 'import json\n'), ((3345, 3397), 'json.dumps', 'json.dumps', (["{'an_array': [1, 2, 3], 'an_object': {}}"], {}), "({'an_array': [1, 2, 3], 'an_object': {}})\n", (3355, 3397), False, 'import json\n'), ((3645, 3720), 'json.dumps', 'json.dumps', (["{'an_array': ['a', 'b'], 'an_object': {'a_key': 'wrong_value'}}"], {}), "({'an_array': ['a', 'b'], 'an_object': {'a_key': 'wrong_value'}})\n", (3655, 3720), False, 'import json\n')]
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test handlers."""
from __future__ import absolute_import, print_function
import pytest
from flask import session, url_for
from flask_login import current_user
from flask_oauthlib.client import OAuth as FlaskOAuth
from flask_security import login_user, logout_user
from flask_security.confirmable import _security
from helpers import check_redirect_location
from werkzeug.routing import BuildError
from invenio_oauthclient import InvenioOAuthClient, current_oauthclient
from invenio_oauthclient.errors import AlreadyLinkedError, OAuthResponseError
from invenio_oauthclient.handlers import response_token_setter, token_getter
from invenio_oauthclient.models import RemoteToken
from invenio_oauthclient.utils import oauth_authenticate
from invenio_oauthclient.views.client import blueprint as blueprint_client
from invenio_oauthclient.views.settings import blueprint as blueprint_settings
def test_token_setter(app, remote):
"""Test token setter on response from OAuth server."""
# OAuth1
resp_oauth1 = {
'name': '<NAME>',
'expires_in': 3599,
'oauth_token': 'test_access_token',
'oauth_token_secret': 'test_refresh_token',
'scope': '/authenticate',
'token_type': 'bearer',
}
assert not response_token_setter(remote, resp_oauth1)
# Bad request
resp_bad = {
'invalid': 'invalid',
}
with pytest.raises(OAuthResponseError):
response_token_setter(remote, resp_bad)
def test_token_getter(remote, models_fixture, app):
"""Test token getter on response from OAuth server."""
datastore = app.extensions['invenio-accounts'].datastore
existing_email = '<EMAIL>.org'
user = datastore.find_user(email=existing_email)
# Missing RemoteToken
oauth_authenticate('dev', user)
assert not token_getter(remote)
# Populated RemoteToken
RemoteToken.create(user.id, 'testkey', 'mytoken', 'mysecret')
oauth_authenticate('dev', user)
assert token_getter(remote) == ('mytoken', 'mysecret')
|
[
"invenio_oauthclient.handlers.token_getter",
"pytest.raises",
"invenio_oauthclient.utils.oauth_authenticate",
"invenio_oauthclient.handlers.response_token_setter",
"invenio_oauthclient.models.RemoteToken.create"
] |
[((1995, 2026), 'invenio_oauthclient.utils.oauth_authenticate', 'oauth_authenticate', (['"""dev"""', 'user'], {}), "('dev', user)\n", (2013, 2026), False, 'from invenio_oauthclient.utils import oauth_authenticate\n'), ((2096, 2157), 'invenio_oauthclient.models.RemoteToken.create', 'RemoteToken.create', (['user.id', '"""testkey"""', '"""mytoken"""', '"""mysecret"""'], {}), "(user.id, 'testkey', 'mytoken', 'mysecret')\n", (2114, 2157), False, 'from invenio_oauthclient.models import RemoteToken\n'), ((2162, 2193), 'invenio_oauthclient.utils.oauth_authenticate', 'oauth_authenticate', (['"""dev"""', 'user'], {}), "('dev', user)\n", (2180, 2193), False, 'from invenio_oauthclient.utils import oauth_authenticate\n'), ((1495, 1537), 'invenio_oauthclient.handlers.response_token_setter', 'response_token_setter', (['remote', 'resp_oauth1'], {}), '(remote, resp_oauth1)\n', (1516, 1537), False, 'from invenio_oauthclient.handlers import response_token_setter, token_getter\n'), ((1619, 1652), 'pytest.raises', 'pytest.raises', (['OAuthResponseError'], {}), '(OAuthResponseError)\n', (1632, 1652), False, 'import pytest\n'), ((1662, 1701), 'invenio_oauthclient.handlers.response_token_setter', 'response_token_setter', (['remote', 'resp_bad'], {}), '(remote, resp_bad)\n', (1683, 1701), False, 'from invenio_oauthclient.handlers import response_token_setter, token_getter\n'), ((2042, 2062), 'invenio_oauthclient.handlers.token_getter', 'token_getter', (['remote'], {}), '(remote)\n', (2054, 2062), False, 'from invenio_oauthclient.handlers import response_token_setter, token_getter\n'), ((2205, 2225), 'invenio_oauthclient.handlers.token_getter', 'token_getter', (['remote'], {}), '(remote)\n', (2217, 2225), False, 'from invenio_oauthclient.handlers import response_token_setter, token_getter\n')]
|
import io
import json
import enum
import gzip
from sota_extractor import errors
class Format(enum.Enum):
"""Output format.
At the moment only supported format is JSON, but in the future YAML support
is planned.
"""
json = "json"
json_gz = "json.gz"
def dump(data, filename, fmt=Format.json, encoding="utf-8"):
"""Write sota data to file.
Intention of this helper function is to always have maximally similar
json files after export. To do that it will always sort json object keys
alphabetically, use the same indent, same encoding and same serializer.
Args:
data: Data for serialization.
filename (str): Path to the file in which the data should be
serialized.
fmt (Format): Serialization format.
encoding (str): File encoding.
"""
if fmt == Format.json:
with io.open(filename, mode="w", encoding=encoding) as fp:
json.dump(data, fp=fp, indent=2, sort_keys=True)
elif fmt == Format.json_gz:
with gzip.open(filename, mode="wb") as fp:
fp.write(
json.dumps(data, fp=fp, indent=2, sort_keys=True).encode(
encoding
)
)
else:
raise errors.UnsupportedFormat(fmt)
def load(filename, fmt=Format.json, encoding="utf-8"):
"""Load sota data from file.
Args:
filename (str): Path to the file from which the data should be
deserialized.
fmt (Format): Serialization format.
encoding (str): File encoding.
"""
if fmt == Format.json:
with io.open(filename, mode="r", encoding=encoding) as fp:
return json.load(fp)
elif fmt == Format.json_gz:
with gzip.open(filename, mode="rb") as fp:
return json.loads(fp.read().decode(encoding))
else:
raise errors.UnsupportedFormat(fmt)
|
[
"json.dump",
"json.load",
"gzip.open",
"json.dumps",
"io.open",
"sota_extractor.errors.UnsupportedFormat"
] |
[((873, 919), 'io.open', 'io.open', (['filename'], {'mode': '"""w"""', 'encoding': 'encoding'}), "(filename, mode='w', encoding=encoding)\n", (880, 919), False, 'import io\n'), ((939, 987), 'json.dump', 'json.dump', (['data'], {'fp': 'fp', 'indent': '(2)', 'sort_keys': '(True)'}), '(data, fp=fp, indent=2, sort_keys=True)\n', (948, 987), False, 'import json\n'), ((1252, 1281), 'sota_extractor.errors.UnsupportedFormat', 'errors.UnsupportedFormat', (['fmt'], {}), '(fmt)\n', (1276, 1281), False, 'from sota_extractor import errors\n'), ((1611, 1657), 'io.open', 'io.open', (['filename'], {'mode': '"""r"""', 'encoding': 'encoding'}), "(filename, mode='r', encoding=encoding)\n", (1618, 1657), False, 'import io\n'), ((1684, 1697), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (1693, 1697), False, 'import json\n'), ((1863, 1892), 'sota_extractor.errors.UnsupportedFormat', 'errors.UnsupportedFormat', (['fmt'], {}), '(fmt)\n', (1887, 1892), False, 'from sota_extractor import errors\n'), ((1033, 1063), 'gzip.open', 'gzip.open', (['filename'], {'mode': '"""wb"""'}), "(filename, mode='wb')\n", (1042, 1063), False, 'import gzip\n'), ((1743, 1773), 'gzip.open', 'gzip.open', (['filename'], {'mode': '"""rb"""'}), "(filename, mode='rb')\n", (1752, 1773), False, 'import gzip\n'), ((1109, 1158), 'json.dumps', 'json.dumps', (['data'], {'fp': 'fp', 'indent': '(2)', 'sort_keys': '(True)'}), '(data, fp=fp, indent=2, sort_keys=True)\n', (1119, 1158), False, 'import json\n')]
|
from cryptography.fernet import Fernet
import codecs
import chardet
def encrypt(database, llave):
key = llave
encoded_msg = database.encode()
f = Fernet(key)
encriptacion = f.encrypt(encoded_msg)
return encriptacion.decode()
def decrypt(encode_Database,llave):
key = llave
f = Fernet(key)
dec_msg = f.decrypt(encode_Database.encode())
return dec_msg.decode()
def convert_ascii(chain):
try:
s = chain
a = s.encode('ascii',errors='strict')
return a
except:
return 1
##def UTF():
def convert_utf8(chain):
try:
s = chain
a = s.encode(errors='strict')
return a
except:
return 1
def convert_iso(chain):
try:
s = chain
a = s.encode('iso-8859-1',errors='strict')
return a
except:
return 1
def gen_convert(chain,encoding):
if encoding == "ascii":
return convert_ascii(chain)
elif encoding == "utf8":
return convert_utf8(chain)
elif encoding == "iso-8859-1":
return convert_iso(chain)
else:
return 3
|
[
"cryptography.fernet.Fernet"
] |
[((178, 189), 'cryptography.fernet.Fernet', 'Fernet', (['key'], {}), '(key)\n', (184, 189), False, 'from cryptography.fernet import Fernet\n'), ((340, 351), 'cryptography.fernet.Fernet', 'Fernet', (['key'], {}), '(key)\n', (346, 351), False, 'from cryptography.fernet import Fernet\n')]
|
import threading
import socket
import sys
import time
class Client:
def __init__(self):
super().__init__()
self.kill = False
self.host = "127.0.0.1"
self.port = 3006
def receive_history(self):
data = str()
while True:
try:
chunk = self.socket.recv(100)
chunk = chunk.decode("UTF-8")
data += chunk
if "/end" in data:
break
except socket.error:
time.sleep(0.1)
messages = data.split(';')
if len(messages) > 1:
for message in messages[:-1]:
[client, text] = message.split('|')
print(client + ": " + text)
def connect(self):
print("Connecting...")
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((self.host, self.port))
self.socket.setblocking(False)
def reading_socket(self):
while not self.kill:
try:
data = self.socket.recv(1024)
[client, text] = data.decode("UTF-8").split('|')
print(client + ": " + text)
except socket.error:
time.sleep(0.3)
def run(self):
try:
reading_thread = threading.Thread(target=self.reading_socket)
reading_thread.start()
nickname = input("Enter your nickname: ")
self.socket.sendall(nickname.encode("UTF-8"))
print("Connected successfuly")
self.receive_history()
while True:
message = input()
self.socket.sendall(message.encode("UTF-8"))
except KeyboardInterrupt:
self.kill = True
print("Terminated")
sys.exit(0)
client = Client()
client.connect()
client.run()
|
[
"threading.Thread",
"socket.socket",
"sys.exit",
"time.sleep"
] |
[((848, 897), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (861, 897), False, 'import socket\n'), ((1348, 1392), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.reading_socket'}), '(target=self.reading_socket)\n', (1364, 1392), False, 'import threading\n'), ((1845, 1856), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1853, 1856), False, 'import sys\n'), ((526, 541), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (536, 541), False, 'import time\n'), ((1270, 1285), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (1280, 1285), False, 'import time\n')]
|
import tensorflow as tf
import numpy as np
from tensorflow.python.ops.rnn import _transpose_batch_time
class Decoder:
def __init__(self, **kwargs):
self.encodings = None
self.num_sentence_characters = kwargs['num_sentence_characters']
self.dict_length = kwargs['dict_length']
self.max_num_words = kwargs['max_num_words']
self.batch_size = kwargs['batch_size']
self.simple_decoder = True
self.global_lat_decoder = False
self.decoder_units = kwargs['decoder_units']
self.units_encoder_lstm = kwargs['encoder_dim']
self.lat_word_dim = kwargs['lat_word_dim']
self.global_lat_dim = kwargs['global_lat_dim']
self.decoder_p3_units = kwargs['decoder_p3_units']
def make_global_latent(self, values, units_dense):
mean_pool = tf.reduce_mean(values, axis=-1)
pre_dist1 = tf.layers.dense(inputs=mean_pool, activation=tf.nn.relu, units=units_dense)
pre_dist2 = tf.layers.dense(inputs=pre_dist1, activation=None, units=units_dense * 2)
mu, log_sig = tf.split(tf.cast(pre_dist2, dtype=tf.float32), axis=-1, num_or_size_splits=2)
return mu, log_sig
def decoder1_p1(self, reuse, units_bilstm, encodings=None):
if encodings is None:
encodings = self.encodings
with tf.variable_scope('decoder_p1', reuse=reuse):
cell1 = tf.contrib.rnn.LSTMCell(num_units=units_bilstm)
cell2 = tf.contrib.rnn.LSTMCell(num_units=units_bilstm)
values, states = tf.nn.bidirectional_dynamic_rnn(inputs=encodings, dtype=tf.float32, cell_bw=cell1,
cell_fw=cell2, sequence_length=self.sentence_lens)
values = tf.concat(values, 2)
return values
def decoder2_p1(self, reuse, units_bilstm, global_latent):
# needs some work
# input = [global_latent for i in range(self.num_sentence_characters)]
with tf.variable_scope('decoder_p1', reuse=reuse):
cell1 = tf.contrib.rnn.LSTMCell(num_units=units_bilstm)
cell2 = tf.contrib.rnn.LSTMCell(num_units=units_bilstm)
values, states = tf.nn.bidirectional_dynamic_rnn(inputs=input, dtype=tf.float32, cell_bw=cell1,
cell_fw=cell2, sequence_length=tf.cast(hap_lens, tf.int32))
values = tf.concat(values, 2)
return values
def bahd_attention(self, queries, values, reuse):
with tf.variable_scope('attention_layer', reuse=reuse):
w1 = tf.get_variable(name='query_w', shape=[self.decoder_units, self.lat_word_dim])
w2 = tf.get_variable(name='value_w', shape=[self.lat_word_dim, self.lat_word_dim])
v = tf.get_variable(name='v', shape=[self.lat_word_dim])
print('here')
conv_q = tf.reshape(tf.einsum('ij,jk->ik', queries, w1), [-1, 1, self.lat_word_dim])
print('here1')
a_p1 = tf.reshape(tf.tile(conv_q, [1, 1, self.max_num_words]),
[self.batch_size, self.max_num_words, self.lat_word_dim])
print('here2')
print(w2)
print('a p1 {}'.format(a_p1))
a_p2 = tf.einsum('ijk,kl->ijl', values, w2)
print('a p2 {}'.format(a_p2))
print('here3')
out = tf.einsum('k,ijk->ij', v, tf.nn.tanh(name='combine', x=a_p1 + a_p2))
print('MAT for softmax {}'.format(out))
out_norm = tf.nn.softmax(out, dim=-1)
context = tf.reduce_sum(values * tf.reshape(tf.stack([out_norm for _ in range(self.lat_word_dim)], -1),
[self.batch_size, self.max_num_words, self.lat_word_dim]),
axis=-2)
# context2 = tf.matmul(tf.reshape(tf.diag(out_norm),[-1,self.max_num_words]),tf.transpose(values,[-1,self.max_num_words]))
# is this the same
# print('ALT CONTEXT {}'.format(context2))
print('CONTEX SHAPE {}'.format(context))
l1 = tf.concat([context, queries], axis=-1)
l1 = tf.reshape(l1, [self.batch_size, self.lat_word_dim + self.decoder_units])
return l1
def decoder_p2(self, num_hidden_word_units, inputs, sequence_length, global_latent, reuse, context_dim, max_time):
outputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time)
cell = tf.contrib.rnn.LSTMCell(self.decoder_units)
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(self.batch_size, tf.float32)
next_loop_state = outputs_ta
context = self.bahd_attention(
queries=tf.zeros(shape=[self.batch_size, num_hidden_word_units], dtype=tf.float32), values=inputs,
reuse=None)
# next_input = tf.concat([tf.zeros(shape=[self.batch_size,self.lat_word_dim],dtype=tf.float32),tf.zeros(shape=[self.batch_size,self.global_lat_dim],dtype=tf.float32)],axis=-1)
next_input = tf.zeros(shape=[self.batch_size, self.lat_word_dim * 2 + self.global_lat_dim],
dtype=tf.float32)
else:
next_cell_state = cell_state
context = self.bahd_attention(queries=cell_output, values=inputs, reuse=True)
# should try passing in logits
# should also try doing the final decoding in a seperate RNN
# should try using a global latent vector here asap
# prediction = tf.layers.dense(inputs=context,activation=None,units=self.dict_length)
# took context out of decoder loop because softmax may be saturating
next_input = tf.concat([context, global_latent], axis=-1)
next_loop_state = loop_state.write(time - 1, context)
elements_finished = (time >= sequence_length)
return (elements_finished, next_input, next_cell_state, emit_output, next_loop_state)
with tf.variable_scope('decoder_p2', reuse=reuse):
_, _, loop_state_ta = tf.nn.raw_rnn(cell, loop_fn)
# loop_state_out = _transpose_batch_time(loop_state_ta.stack())
return loop_state_ta
def decoder_p3(self, inputs, reuse, max_time, sequence_length):
# _inputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time,name='context_array')
# _inputs_ta = _inputs_ta.unstack(tf.transpose(inputs,[1,0,2]))
_inputs_ta = inputs
outputs_ta = tf.TensorArray(dtype=tf.float32, size=max_time, name='pred_char_array')
cell = tf.contrib.rnn.LSTMCell(self.decoder_p3_units)
def loop_fn(time, cell_output, cell_state, loop_state):
next_loop_state = loop_state
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(self.batch_size, tf.float32)
next_input = tf.concat(
[tf.zeros(shape=[self.batch_size, self.dict_length], dtype=tf.float32), _inputs_ta.read(time)],
axis=-1)
next_loop_state = outputs_ta
else:
next_cell_state = cell_state
prediction = tf.layers.dense(inputs=cell_output, activation=None, units=self.dict_length)
next_loop_state = loop_state.write(time - 1, prediction)
next_input = tf.concat([prediction, _inputs_ta.read(time)], axis=-1)
elements_finished = (time >= sequence_length)
return (elements_finished, next_input, next_cell_state, emit_output, next_loop_state)
with tf.variable_scope('decoder_p3', reuse=reuse):
_, _, loop_ta = tf.nn.raw_rnn(cell, loop_fn)
output = _transpose_batch_time(loop_ta.stack())
return output
def run_decoder(self, units_lstm_decoder, sequence_length, units_dense_global, lat_words, reuse):
if self.simple_decoder:
global_mu, global_logsig = self.make_global_latent(values=lat_words, units_dense=units_dense_global)
eps = tf.random_normal(shape=[self.batch_size, units_dense_global], dtype=tf.float32)
global_latent = eps * tf.exp(global_logsig) + global_mu
out_2 = self.decoder_p2(sequence_length=sequence_length, num_hidden_word_units=units_lstm_decoder,
inputs=lat_words, reuse=reuse, global_latent=global_latent,
context_dim=units_lstm_decoder, max_time=self.num_sentence_characters)
out = self.decoder_p3(inputs=out_2, reuse=reuse, max_time=self.num_sentence_characters,
sequence_length=sequence_length)
return out, global_latent, global_logsig, global_mu
def prior(self, values, num_units, global_latent, word_lens, reuse):
global_latent = tf.transpose(tf.stack([global_latent for _ in range(self.max_num_words)]), [1, 0, 2])
print(' PRIOR input dim from post {}'.format(values))
values = tf.concat([tf.zeros(shape=[self.batch_size, 1, self.lat_word_dim], dtype=tf.float32), values], axis=1)
values = values[:, 0:-1, :]
values = tf.concat([tf.cast(values, dtype=tf.float32), global_latent], axis=-1)
print('PRIOR input dim to prior {}'.format(values))
with tf.variable_scope('prior', reuse=reuse):
cell = tf.contrib.rnn.LSTMCell(num_units)
values, _ = tf.nn.dynamic_rnn(cell=cell, inputs=values, sequence_length=word_lens, dtype=tf.float32)
with tf.variable_scope('prior/rnn', reuse=reuse):
w = tf.get_variable(name='prior_dense_w', shape=[self.lat_word_dim, self.lat_word_dim * 2],
dtype=tf.float32)
b = tf.get_variable(name='prior_dense_b', shape=self.lat_word_dim * 2, dtype=tf.float32)
out = tf.reshape(tf.matmul(tf.reshape(values, [-1, self.lat_word_dim]), w) + b,
[self.batch_size, self.max_num_words, self.lat_word_dim * 2])
mu, log_sig = tf.split(out, axis=-1, num_or_size_splits=2, name='prior_dense')
print('MU{}'.format(mu))
return [mu, log_sig]
def cost_function(self, predictions, true_input, global_mu, global_logsig, prior_mu, prior_logsig, posterior_mu,
posterior_logsig, shift, total_steps, global_step, kl=True):
mask = tf.reduce_sum(true_input, -1)
# reconstruction = tf.reduce_sum(tf.reduce_sum(-true_input*tf.log(predictions+1e-9),axis=-1),-1)
reconstruction = tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(true_input, -1), logits=predictions) * mask,
-1)
# have to be very careful of order of the mean/stddev parmeters
# outer reduce sum for each KL term
'''
kl_p1 = 0.5 * (tf.reduce_sum(tf.exp(posterior_logsig - prior_logsig), axis=-1) + tf.reduce_sum(
(posterior_mu - prior_mu) * tf.divide(1, tf.exp(prior_logsig)) * (posterior_mu - prior_mu),
axis=-1) - tf.cast(tf.shape(posterior_mu)[-1], dtype=tf.float32) + tf.reduce_sum(
(prior_logsig - posterior_logsig), axis=-1))
'''
kl_p1 = 0.5 * (tf.reduce_sum(tf.reduce_sum(prior_logsig, axis=1) - tf.reduce_sum(posterior_logsig, axis=1),
axis=-1) -
tf.cast(tf.shape(posterior_mu)[-1], dtype=tf.float32) * tf.cast(tf.shape(prior_mu)[1],
dtype=tf.float32) +
tf.reduce_sum(tf.reduce_sum(tf.divide(1, tf.exp(prior_logsig)) * tf.exp(posterior_logsig),
axis=-1) + tf.reduce_sum(
(posterior_mu - prior_mu) * tf.divide(1, tf.exp(prior_logsig)) * (posterior_mu - prior_mu),
axis=-1), axis=-1))
'''
kl_global_lat = 0.5 * (
tf.reduce_sum(tf.exp(global_logsig), axis=-1) + tf.reduce_sum((global_mu * global_mu), axis=-1) - tf.cast(
tf.shape(global_mu)[-1], dtype=tf.float32) - tf.reduce_sum(global_logsig))
'''
kl_global_lat = 0.5 * (-tf.reduce_sum(global_logsig, axis=-1) - tf.cast(tf.shape(global_mu)[-1],
dtype=tf.float32) + tf.reduce_sum(
tf.exp(global_logsig), axis=-1) + tf.reduce_sum((global_mu * global_mu), axis=-1))
kl_p2 = kl_p1
# kl_p2 = tf.reduce_sum(kl_p1, -1)
if kl:
kl_p3 = kl_p2 + kl_global_lat
anneal_c = tf.cast(tf.minimum(tf.maximum(tf.divide((global_step - shift), total_steps), 0), 1),
dtype=tf.float32)
kl_p3 = kl_p3 * anneal_c
else:
anneal_c = 0
kl_p3 = tf.constant(0, dtype=tf.float32)
# sum over all seperate KLs for each lat var
cost = tf.reduce_mean(kl_p3 + reconstruction)
return cost, reconstruction, kl_p3, kl_p1, kl_global_lat, kl_p2, anneal_c
def test_cost_function(self, predictions, true_input, global_mu, global_logsig, prior_mu, prior_logsig,
posterior_mu, posterior_logsig):
mask = tf.reduce_sum(true_input, -1)
reconstruction = tf.reduce_sum(
tf.nn.sparse_softmax_cross_entropy_with_logits(labels=tf.argmax(true_input, -1), logits=predictions) * mask,
-1)
# reconstruction = tf.reduce_sum(-true_input*tf.log(predictions+1e-9),axis=-1)
# have to be very careful of order of the mean/stddev parmeters
# outer reduce sum for each KL term
kl_p1 = 0.5 * (tf.reduce_sum(tf.exp(posterior_logsig - prior_logsig), axis=-1) + tf.reduce_sum(
(posterior_mu - prior_mu) * tf.divide(1, tf.exp(prior_logsig)) * (posterior_mu - prior_mu),
axis=-1) - tf.cast(tf.shape(posterior_mu)[-1], dtype=tf.float32) + tf.reduce_sum(
(prior_logsig - posterior_logsig), axis=-1))
kl_global_lat = 0.5 * (tf.reduce_sum(tf.exp(global_logsig), axis=-1) + tf.reduce_sum((global_mu * global_mu),
axis=-1) - tf.cast(
tf.shape(global_mu)[-1], dtype=tf.float32) - tf.reduce_sum(global_logsig))
# sum over all seperate KLs for each lat var
kl_p2 = tf.reduce_sum(kl_p1, -1)
kl_p3 = kl_p2 + kl_global_lat
cost = tf.reduce_mean(kl_p3 + tf.reduce_sum(reconstruction, -1))
return cost, reconstruction, kl_p3, kl_p1
def calc_cost(self, kl, posterior_logsig, post_samples, global_mu, global_logsig, global_latent_sample,
posterior_mu, true_input, sentence_word_lens, predictions, shift, total_steps, global_step, reuse):
prior_mu, prior_logsig = self.prior(values=post_samples, num_units=self.units_encoder_lstm,
global_latent=global_latent_sample, word_lens=sentence_word_lens,
reuse=reuse)
cost, reconstruction, kl_p3, kl_p1, kl_global, kl_p2, anneal_c = self.cost_function(kl=kl,
predictions=predictions,
true_input=true_input,
global_mu=global_mu,
global_logsig=global_logsig,
prior_mu=prior_mu,
prior_logsig=prior_logsig,
posterior_mu=posterior_mu,
posterior_logsig=posterior_logsig,
shift=shift,
total_steps=total_steps,
global_step=global_step)
self.kls_hist = tf.summary.histogram('kls', tf.reduce_mean(kl_p1, 0))
self.global_kl_scalar = tf.summary.scalar('kls_global', tf.reduce_mean(kl_global))
self.rec_scalar = tf.summary.scalar('rec', tf.reduce_mean(reconstruction))
self.cost_scalar = tf.summary.scalar('full_cost', cost)
var_all = tf.nn.moments(x=posterior_mu, axes=0)
var_all = var_all[-1]
kl = tf.reduce_mean(kl_p3)
self.full_kl_scalar = tf.summary.scalar('full_kl', kl)
self.sum_all_activ_hist = tf.summary.histogram('active_lats_all', var_all)
var_g = tf.nn.moments(x=global_mu, axes=0)
var_g = var_g[-1]
self.sum_global_activ_hist = tf.summary.histogram('active_lats_global', var_g)
return cost, reconstruction, kl_p3, kl_p1, kl_global, kl_p2, anneal_c
def test_calc_cost(self, posterior_logsig, post_samples, global_mu, global_logsig, global_latent_sample,
posterior_mu, true_input, predictions, sentence_word_lens):
prior_mu, prior_logsig = self.prior(values=post_samples, num_units=self.units_encoder_lstm,
global_latent=global_latent_sample, word_lens=sentence_word_lens,
reuse=True)
cost, _, _, _ = self.test_cost_function(predictions=predictions, true_input=true_input, global_mu=global_mu,
global_logsig=global_logsig, prior_mu=prior_mu,
prior_logsig=prior_logsig, posterior_mu=posterior_mu,
posterior_logsig=posterior_logsig)
return cost
def generation(self, samples):
outputs_ta = tf.TensorArray(dtype=tf.float32, size=self.max_num_words)
cell = tf.contrib.rnn.LSTMCell(self.decoder_units)
print('GENER samples {}'.format(np.shape(samples)))
def loop_fn(time, cell_output, cell_state, loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_cell_state = cell.zero_state(self.batch_size, tf.float32)
next_loop_state = outputs_ta
# self.lat_word_dim is very important, need from kevin
next_input = tf.concat(
[samples, tf.zeros(shape=[self.batch_size, self.lat_word_dim], dtype=tf.float32)], axis=-1)
else:
next_cell_state = cell_state
w = tf.get_variable(name='prior_dense_w')
b = tf.get_variable(name='prior_dense_b')
print(cell_output)
cell_output = tf.reshape(tf.matmul(cell_output, w) + b, [self.batch_size, self.lat_word_dim * 2])
mu, logsig = tf.split(cell_output, axis=-1, num_or_size_splits=2)
eps = tf.random_normal(shape=[self.batch_size, self.lat_word_dim], dtype=tf.float32)
samples_word = eps * tf.exp(logsig) + mu
next_input = tf.concat([samples, samples_word], axis=-1)
next_loop_state = loop_state.write(time - 1, samples_word)
elements_finished = (time >= self.max_num_words)
return (elements_finished, next_input, next_cell_state, emit_output, next_loop_state)
with tf.variable_scope('prior', reuse=True):
_, _, loop_state_ta = tf.nn.raw_rnn(cell, loop_fn)
loop_state_out = _transpose_batch_time(loop_state_ta.stack())
context = self.decoder_p2(num_hidden_word_units=self.lat_word_dim, inputs=loop_state_out,
sequence_length=np.repeat(self.num_sentence_characters, self.batch_size, axis=-1),
global_latent=samples, reuse=True, context_dim=self.decoder_units,
max_time=self.num_sentence_characters)
predictions = self.decoder_p3(inputs=context, reuse=True,
sequence_length=np.repeat(self.num_sentence_characters, self.batch_size, axis=-1),
max_time=self.num_sentence_characters)
return predictions
# Example usage
# batch_len = np.random.randint(low=0,high=30,size=[10])
# arg_dict = {'global_lat_dim':10,'word_lens':batch_len,'batch_size':10,'max_num_words':30,'decoder_units':40,'encodings' : np.random.randn(10,30,40),'sentence_lens':np.random.randint(low=0,high=30,size=10),'num_sentence_characters':200,'dict_length':26}
# decoder = Decoder(**arg_dict)
# word_encoding_placeholder=tf.placeholder(dtype=tf.float32,shape=[decoder.batch_size,decoder.max_num_words,np.shape(decoder.encodings)[-1]])
# out_o, global_latent_o,global_logsig_o,global_mu_o = decoder.run_decoder(units_lstm_decoder=40,lat_words=word_encoding_placeholder,units_dense_global=40,sequence_length=batch_len)
# true_mat =np.zeros(shape=[decoder.batch_size,decoder.num_sentence_characters],dtype=np.float32)
# for k,i in enumerate(batch_len):
# true_mat[k,0:i] = np.random.randint(low=0,high=decoder.dict_length,size=[i])
# true_inp=true_mat
# posterior_mu =np.random.randn(10,30,40)
# posterior_logsig = np.exp(np.random.randn(10,30,40))
# cost= decoder.calc_cost(prior_mu=posterior_mu,prior_logsig=posterior_logsig,global_latent_sample=global_latent_o,global_logsig=global_logsig_o,global_mu=global_mu_o,predictions=out_o,true_input=tf.one_hot(indices=true_inp,depth =decoder.dict_length),posterior_logsig=posterior_logsig,posterior_mu=posterior_mu,post_samples=decoder.encodings)
#
# with tf.Session() as sess:
# sess.run(tf.global_variables_initializer())
# cost_o=sess.run([cost],feed_dict={word_encoding_placeholder:decoder.encodings})
|
[
"tensorflow.einsum",
"tensorflow.reduce_sum",
"tensorflow.nn.tanh",
"tensorflow.reshape",
"numpy.shape",
"tensorflow.matmul",
"tensorflow.divide",
"tensorflow.nn.bidirectional_dynamic_rnn",
"tensorflow.split",
"tensorflow.get_variable",
"tensorflow.nn.softmax",
"tensorflow.nn.moments",
"tensorflow.concat",
"tensorflow.variable_scope",
"tensorflow.cast",
"tensorflow.summary.histogram",
"tensorflow.exp",
"numpy.repeat",
"tensorflow.summary.scalar",
"tensorflow.reduce_mean",
"tensorflow.constant",
"tensorflow.tile",
"tensorflow.random_normal",
"tensorflow.nn.raw_rnn",
"tensorflow.nn.dynamic_rnn",
"tensorflow.argmax",
"tensorflow.layers.dense",
"tensorflow.shape",
"tensorflow.zeros",
"tensorflow.TensorArray",
"tensorflow.contrib.rnn.LSTMCell"
] |
[((831, 862), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['values'], {'axis': '(-1)'}), '(values, axis=-1)\n', (845, 862), True, 'import tensorflow as tf\n'), ((883, 958), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'mean_pool', 'activation': 'tf.nn.relu', 'units': 'units_dense'}), '(inputs=mean_pool, activation=tf.nn.relu, units=units_dense)\n', (898, 958), True, 'import tensorflow as tf\n'), ((979, 1052), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'pre_dist1', 'activation': 'None', 'units': '(units_dense * 2)'}), '(inputs=pre_dist1, activation=None, units=units_dense * 2)\n', (994, 1052), True, 'import tensorflow as tf\n'), ((1751, 1771), 'tensorflow.concat', 'tf.concat', (['values', '(2)'], {}), '(values, 2)\n', (1760, 1771), True, 'import tensorflow as tf\n'), ((2405, 2425), 'tensorflow.concat', 'tf.concat', (['values', '(2)'], {}), '(values, 2)\n', (2414, 2425), True, 'import tensorflow as tf\n'), ((4402, 4449), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'max_time'}), '(dtype=tf.float32, size=max_time)\n', (4416, 4449), True, 'import tensorflow as tf\n'), ((4466, 4509), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['self.decoder_units'], {}), '(self.decoder_units)\n', (4489, 4509), True, 'import tensorflow as tf\n'), ((6727, 6798), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'max_time', 'name': '"""pred_char_array"""'}), "(dtype=tf.float32, size=max_time, name='pred_char_array')\n", (6741, 6798), True, 'import tensorflow as tf\n'), ((6815, 6861), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['self.decoder_p3_units'], {}), '(self.decoder_p3_units)\n', (6838, 6861), True, 'import tensorflow as tf\n'), ((10669, 10698), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['true_input', '(-1)'], {}), '(true_input, -1)\n', (10682, 10698), True, 'import tensorflow as tf\n'), ((13272, 13310), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(kl_p3 + reconstruction)'], {}), '(kl_p3 + reconstruction)\n', (13286, 13310), True, 'import tensorflow as tf\n'), ((13577, 13606), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['true_input', '(-1)'], {}), '(true_input, -1)\n', (13590, 13606), True, 'import tensorflow as tf\n'), ((14733, 14757), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['kl_p1', '(-1)'], {}), '(kl_p1, -1)\n', (14746, 14757), True, 'import tensorflow as tf\n'), ((17073, 17109), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""full_cost"""', 'cost'], {}), "('full_cost', cost)\n", (17090, 17109), True, 'import tensorflow as tf\n'), ((17128, 17165), 'tensorflow.nn.moments', 'tf.nn.moments', ([], {'x': 'posterior_mu', 'axes': '(0)'}), '(x=posterior_mu, axes=0)\n', (17141, 17165), True, 'import tensorflow as tf\n'), ((17209, 17230), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['kl_p3'], {}), '(kl_p3)\n', (17223, 17230), True, 'import tensorflow as tf\n'), ((17262, 17294), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""full_kl"""', 'kl'], {}), "('full_kl', kl)\n", (17279, 17294), True, 'import tensorflow as tf\n'), ((17329, 17377), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""active_lats_all"""', 'var_all'], {}), "('active_lats_all', var_all)\n", (17349, 17377), True, 'import tensorflow as tf\n'), ((17394, 17428), 'tensorflow.nn.moments', 'tf.nn.moments', ([], {'x': 'global_mu', 'axes': '(0)'}), '(x=global_mu, axes=0)\n', (17407, 17428), True, 'import tensorflow as tf\n'), ((17492, 17541), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""active_lats_global"""', 'var_g'], {}), "('active_lats_global', var_g)\n", (17512, 17541), True, 'import tensorflow as tf\n'), ((18555, 18612), 'tensorflow.TensorArray', 'tf.TensorArray', ([], {'dtype': 'tf.float32', 'size': 'self.max_num_words'}), '(dtype=tf.float32, size=self.max_num_words)\n', (18569, 18612), True, 'import tensorflow as tf\n'), ((18628, 18671), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['self.decoder_units'], {}), '(self.decoder_units)\n', (18651, 18671), True, 'import tensorflow as tf\n'), ((1084, 1120), 'tensorflow.cast', 'tf.cast', (['pre_dist2'], {'dtype': 'tf.float32'}), '(pre_dist2, dtype=tf.float32)\n', (1091, 1120), True, 'import tensorflow as tf\n'), ((1328, 1372), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder_p1"""'], {'reuse': 'reuse'}), "('decoder_p1', reuse=reuse)\n", (1345, 1372), True, 'import tensorflow as tf\n'), ((1394, 1441), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', ([], {'num_units': 'units_bilstm'}), '(num_units=units_bilstm)\n', (1417, 1441), True, 'import tensorflow as tf\n'), ((1462, 1509), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', ([], {'num_units': 'units_bilstm'}), '(num_units=units_bilstm)\n', (1485, 1509), True, 'import tensorflow as tf\n'), ((1539, 1677), 'tensorflow.nn.bidirectional_dynamic_rnn', 'tf.nn.bidirectional_dynamic_rnn', ([], {'inputs': 'encodings', 'dtype': 'tf.float32', 'cell_bw': 'cell1', 'cell_fw': 'cell2', 'sequence_length': 'self.sentence_lens'}), '(inputs=encodings, dtype=tf.float32, cell_bw\n =cell1, cell_fw=cell2, sequence_length=self.sentence_lens)\n', (1570, 1677), True, 'import tensorflow as tf\n'), ((1977, 2021), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder_p1"""'], {'reuse': 'reuse'}), "('decoder_p1', reuse=reuse)\n", (1994, 2021), True, 'import tensorflow as tf\n'), ((2043, 2090), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', ([], {'num_units': 'units_bilstm'}), '(num_units=units_bilstm)\n', (2066, 2090), True, 'import tensorflow as tf\n'), ((2111, 2158), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', ([], {'num_units': 'units_bilstm'}), '(num_units=units_bilstm)\n', (2134, 2158), True, 'import tensorflow as tf\n'), ((2516, 2565), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""attention_layer"""'], {'reuse': 'reuse'}), "('attention_layer', reuse=reuse)\n", (2533, 2565), True, 'import tensorflow as tf\n'), ((2584, 2662), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""query_w"""', 'shape': '[self.decoder_units, self.lat_word_dim]'}), "(name='query_w', shape=[self.decoder_units, self.lat_word_dim])\n", (2599, 2662), True, 'import tensorflow as tf\n'), ((2680, 2757), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""value_w"""', 'shape': '[self.lat_word_dim, self.lat_word_dim]'}), "(name='value_w', shape=[self.lat_word_dim, self.lat_word_dim])\n", (2695, 2757), True, 'import tensorflow as tf\n'), ((2774, 2826), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""v"""', 'shape': '[self.lat_word_dim]'}), "(name='v', shape=[self.lat_word_dim])\n", (2789, 2826), True, 'import tensorflow as tf\n'), ((3251, 3287), 'tensorflow.einsum', 'tf.einsum', (['"""ijk,kl->ijl"""', 'values', 'w2'], {}), "('ijk,kl->ijl', values, w2)\n", (3260, 3287), True, 'import tensorflow as tf\n'), ((3519, 3545), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['out'], {'dim': '(-1)'}), '(out, dim=-1)\n', (3532, 3545), True, 'import tensorflow as tf\n'), ((4113, 4151), 'tensorflow.concat', 'tf.concat', (['[context, queries]'], {'axis': '(-1)'}), '([context, queries], axis=-1)\n', (4122, 4151), True, 'import tensorflow as tf\n'), ((4169, 4242), 'tensorflow.reshape', 'tf.reshape', (['l1', '[self.batch_size, self.lat_word_dim + self.decoder_units]'], {}), '(l1, [self.batch_size, self.lat_word_dim + self.decoder_units])\n', (4179, 4242), True, 'import tensorflow as tf\n'), ((6216, 6260), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder_p2"""'], {'reuse': 'reuse'}), "('decoder_p2', reuse=reuse)\n", (6233, 6260), True, 'import tensorflow as tf\n'), ((6296, 6324), 'tensorflow.nn.raw_rnn', 'tf.nn.raw_rnn', (['cell', 'loop_fn'], {}), '(cell, loop_fn)\n', (6309, 6324), True, 'import tensorflow as tf\n'), ((7888, 7932), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder_p3"""'], {'reuse': 'reuse'}), "('decoder_p3', reuse=reuse)\n", (7905, 7932), True, 'import tensorflow as tf\n'), ((7962, 7990), 'tensorflow.nn.raw_rnn', 'tf.nn.raw_rnn', (['cell', 'loop_fn'], {}), '(cell, loop_fn)\n', (7975, 7990), True, 'import tensorflow as tf\n'), ((8339, 8418), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[self.batch_size, units_dense_global]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, units_dense_global], dtype=tf.float32)\n', (8355, 8418), True, 'import tensorflow as tf\n'), ((9591, 9630), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""prior"""'], {'reuse': 'reuse'}), "('prior', reuse=reuse)\n", (9608, 9630), True, 'import tensorflow as tf\n'), ((9651, 9685), 'tensorflow.contrib.rnn.LSTMCell', 'tf.contrib.rnn.LSTMCell', (['num_units'], {}), '(num_units)\n', (9674, 9685), True, 'import tensorflow as tf\n'), ((9710, 9802), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', ([], {'cell': 'cell', 'inputs': 'values', 'sequence_length': 'word_lens', 'dtype': 'tf.float32'}), '(cell=cell, inputs=values, sequence_length=word_lens,\n dtype=tf.float32)\n', (9727, 9802), True, 'import tensorflow as tf\n'), ((9812, 9855), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""prior/rnn"""'], {'reuse': 'reuse'}), "('prior/rnn', reuse=reuse)\n", (9829, 9855), True, 'import tensorflow as tf\n'), ((9873, 9983), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""prior_dense_w"""', 'shape': '[self.lat_word_dim, self.lat_word_dim * 2]', 'dtype': 'tf.float32'}), "(name='prior_dense_w', shape=[self.lat_word_dim, self.\n lat_word_dim * 2], dtype=tf.float32)\n", (9888, 9983), True, 'import tensorflow as tf\n'), ((10027, 10116), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""prior_dense_b"""', 'shape': '(self.lat_word_dim * 2)', 'dtype': 'tf.float32'}), "(name='prior_dense_b', shape=self.lat_word_dim * 2, dtype=tf\n .float32)\n", (10042, 10116), True, 'import tensorflow as tf\n'), ((10322, 10386), 'tensorflow.split', 'tf.split', (['out'], {'axis': '(-1)', 'num_or_size_splits': '(2)', 'name': '"""prior_dense"""'}), "(out, axis=-1, num_or_size_splits=2, name='prior_dense')\n", (10330, 10386), True, 'import tensorflow as tf\n'), ((13170, 13202), 'tensorflow.constant', 'tf.constant', (['(0)'], {'dtype': 'tf.float32'}), '(0, dtype=tf.float32)\n', (13181, 13202), True, 'import tensorflow as tf\n'), ((16846, 16870), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['kl_p1', '(0)'], {}), '(kl_p1, 0)\n', (16860, 16870), True, 'import tensorflow as tf\n'), ((16936, 16961), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['kl_global'], {}), '(kl_global)\n', (16950, 16961), True, 'import tensorflow as tf\n'), ((17014, 17044), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['reconstruction'], {}), '(reconstruction)\n', (17028, 17044), True, 'import tensorflow as tf\n'), ((20152, 20190), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""prior"""'], {'reuse': '(True)'}), "('prior', reuse=True)\n", (20169, 20190), True, 'import tensorflow as tf\n'), ((20226, 20254), 'tensorflow.nn.raw_rnn', 'tf.nn.raw_rnn', (['cell', 'loop_fn'], {}), '(cell, loop_fn)\n', (20239, 20254), True, 'import tensorflow as tf\n'), ((2885, 2920), 'tensorflow.einsum', 'tf.einsum', (['"""ij,jk->ik"""', 'queries', 'w1'], {}), "('ij,jk->ik', queries, w1)\n", (2894, 2920), True, 'import tensorflow as tf\n'), ((3007, 3050), 'tensorflow.tile', 'tf.tile', (['conv_q', '[1, 1, self.max_num_words]'], {}), '(conv_q, [1, 1, self.max_num_words])\n', (3014, 3050), True, 'import tensorflow as tf\n'), ((3401, 3442), 'tensorflow.nn.tanh', 'tf.nn.tanh', ([], {'name': '"""combine"""', 'x': '(a_p1 + a_p2)'}), "(name='combine', x=a_p1 + a_p2)\n", (3411, 3442), True, 'import tensorflow as tf\n'), ((5230, 5331), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[self.batch_size, self.lat_word_dim * 2 + self.global_lat_dim]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, self.lat_word_dim * 2 + self.\n global_lat_dim], dtype=tf.float32)\n', (5238, 5331), True, 'import tensorflow as tf\n'), ((5930, 5974), 'tensorflow.concat', 'tf.concat', (['[context, global_latent]'], {'axis': '(-1)'}), '([context, global_latent], axis=-1)\n', (5939, 5974), True, 'import tensorflow as tf\n'), ((7482, 7558), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'cell_output', 'activation': 'None', 'units': 'self.dict_length'}), '(inputs=cell_output, activation=None, units=self.dict_length)\n', (7497, 7558), True, 'import tensorflow as tf\n'), ((9302, 9375), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[self.batch_size, 1, self.lat_word_dim]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, 1, self.lat_word_dim], dtype=tf.float32)\n', (9310, 9375), True, 'import tensorflow as tf\n'), ((9458, 9491), 'tensorflow.cast', 'tf.cast', (['values'], {'dtype': 'tf.float32'}), '(values, dtype=tf.float32)\n', (9465, 9491), True, 'import tensorflow as tf\n'), ((12745, 12790), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(global_mu * global_mu)'], {'axis': '(-1)'}), '(global_mu * global_mu, axis=-1)\n', (12758, 12790), True, 'import tensorflow as tf\n'), ((14274, 14329), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(prior_logsig - posterior_logsig)'], {'axis': '(-1)'}), '(prior_logsig - posterior_logsig, axis=-1)\n', (14287, 14329), True, 'import tensorflow as tf\n'), ((14634, 14662), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['global_logsig'], {}), '(global_logsig)\n', (14647, 14662), True, 'import tensorflow as tf\n'), ((14835, 14868), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['reconstruction', '(-1)'], {}), '(reconstruction, -1)\n', (14848, 14868), True, 'import tensorflow as tf\n'), ((18712, 18729), 'numpy.shape', 'np.shape', (['samples'], {}), '(samples)\n', (18720, 18729), True, 'import numpy as np\n'), ((19341, 19378), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""prior_dense_w"""'}), "(name='prior_dense_w')\n", (19356, 19378), True, 'import tensorflow as tf\n'), ((19399, 19436), 'tensorflow.get_variable', 'tf.get_variable', ([], {'name': '"""prior_dense_b"""'}), "(name='prior_dense_b')\n", (19414, 19436), True, 'import tensorflow as tf\n'), ((19616, 19668), 'tensorflow.split', 'tf.split', (['cell_output'], {'axis': '(-1)', 'num_or_size_splits': '(2)'}), '(cell_output, axis=-1, num_or_size_splits=2)\n', (19624, 19668), True, 'import tensorflow as tf\n'), ((19691, 19769), 'tensorflow.random_normal', 'tf.random_normal', ([], {'shape': '[self.batch_size, self.lat_word_dim]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, self.lat_word_dim], dtype=tf.float32)\n', (19707, 19769), True, 'import tensorflow as tf\n'), ((19857, 19900), 'tensorflow.concat', 'tf.concat', (['[samples, samples_word]'], {'axis': '(-1)'}), '([samples, samples_word], axis=-1)\n', (19866, 19900), True, 'import tensorflow as tf\n'), ((20477, 20542), 'numpy.repeat', 'np.repeat', (['self.num_sentence_characters', 'self.batch_size'], {'axis': '(-1)'}), '(self.num_sentence_characters, self.batch_size, axis=-1)\n', (20486, 20542), True, 'import numpy as np\n'), ((20838, 20903), 'numpy.repeat', 'np.repeat', (['self.num_sentence_characters', 'self.batch_size'], {'axis': '(-1)'}), '(self.num_sentence_characters, self.batch_size, axis=-1)\n', (20847, 20903), True, 'import numpy as np\n'), ((2359, 2386), 'tensorflow.cast', 'tf.cast', (['hap_lens', 'tf.int32'], {}), '(hap_lens, tf.int32)\n', (2366, 2386), True, 'import tensorflow as tf\n'), ((8453, 8474), 'tensorflow.exp', 'tf.exp', (['global_logsig'], {}), '(global_logsig)\n', (8459, 8474), True, 'import tensorflow as tf\n'), ((4886, 4960), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[self.batch_size, num_hidden_word_units]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, num_hidden_word_units], dtype=tf.float32)\n', (4894, 4960), True, 'import tensorflow as tf\n'), ((7220, 7289), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[self.batch_size, self.dict_length]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, self.dict_length], dtype=tf.float32)\n', (7228, 7289), True, 'import tensorflow as tf\n'), ((10151, 10194), 'tensorflow.reshape', 'tf.reshape', (['values', '[-1, self.lat_word_dim]'], {}), '(values, [-1, self.lat_word_dim])\n', (10161, 10194), True, 'import tensorflow as tf\n'), ((10910, 10935), 'tensorflow.argmax', 'tf.argmax', (['true_input', '(-1)'], {}), '(true_input, -1)\n', (10919, 10935), True, 'import tensorflow as tf\n'), ((12711, 12732), 'tensorflow.exp', 'tf.exp', (['global_logsig'], {}), '(global_logsig)\n', (12717, 12732), True, 'import tensorflow as tf\n'), ((12970, 13013), 'tensorflow.divide', 'tf.divide', (['(global_step - shift)', 'total_steps'], {}), '(global_step - shift, total_steps)\n', (12979, 13013), True, 'import tensorflow as tf\n'), ((13713, 13738), 'tensorflow.argmax', 'tf.argmax', (['true_input', '(-1)'], {}), '(true_input, -1)\n', (13722, 13738), True, 'import tensorflow as tf\n'), ((14425, 14470), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(global_mu * global_mu)'], {'axis': '(-1)'}), '(global_mu * global_mu, axis=-1)\n', (14438, 14470), True, 'import tensorflow as tf\n'), ((19175, 19245), 'tensorflow.zeros', 'tf.zeros', ([], {'shape': '[self.batch_size, self.lat_word_dim]', 'dtype': 'tf.float32'}), '(shape=[self.batch_size, self.lat_word_dim], dtype=tf.float32)\n', (19183, 19245), True, 'import tensorflow as tf\n'), ((19513, 19538), 'tensorflow.matmul', 'tf.matmul', (['cell_output', 'w'], {}), '(cell_output, w)\n', (19522, 19538), True, 'import tensorflow as tf\n'), ((19807, 19821), 'tensorflow.exp', 'tf.exp', (['logsig'], {}), '(logsig)\n', (19813, 19821), True, 'import tensorflow as tf\n'), ((11518, 11553), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['prior_logsig'], {'axis': '(1)'}), '(prior_logsig, axis=1)\n', (11531, 11553), True, 'import tensorflow as tf\n'), ((11556, 11595), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['posterior_logsig'], {'axis': '(1)'}), '(posterior_logsig, axis=1)\n', (11569, 11595), True, 'import tensorflow as tf\n'), ((12511, 12548), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['global_logsig'], {'axis': '(-1)'}), '(global_logsig, axis=-1)\n', (12524, 12548), True, 'import tensorflow as tf\n'), ((14024, 14063), 'tensorflow.exp', 'tf.exp', (['(posterior_logsig - prior_logsig)'], {}), '(posterior_logsig - prior_logsig)\n', (14030, 14063), True, 'import tensorflow as tf\n'), ((14226, 14248), 'tensorflow.shape', 'tf.shape', (['posterior_mu'], {}), '(posterior_mu)\n', (14234, 14248), True, 'import tensorflow as tf\n'), ((14391, 14412), 'tensorflow.exp', 'tf.exp', (['global_logsig'], {}), '(global_logsig)\n', (14397, 14412), True, 'import tensorflow as tf\n'), ((14589, 14608), 'tensorflow.shape', 'tf.shape', (['global_mu'], {}), '(global_mu)\n', (14597, 14608), True, 'import tensorflow as tf\n'), ((11676, 11698), 'tensorflow.shape', 'tf.shape', (['posterior_mu'], {}), '(posterior_mu)\n', (11684, 11698), True, 'import tensorflow as tf\n'), ((11732, 11750), 'tensorflow.shape', 'tf.shape', (['prior_mu'], {}), '(prior_mu)\n', (11740, 11750), True, 'import tensorflow as tf\n'), ((11950, 11974), 'tensorflow.exp', 'tf.exp', (['posterior_logsig'], {}), '(posterior_logsig)\n', (11956, 11974), True, 'import tensorflow as tf\n'), ((12559, 12578), 'tensorflow.shape', 'tf.shape', (['global_mu'], {}), '(global_mu)\n', (12567, 12578), True, 'import tensorflow as tf\n'), ((11926, 11946), 'tensorflow.exp', 'tf.exp', (['prior_logsig'], {}), '(prior_logsig)\n', (11932, 11946), True, 'import tensorflow as tf\n'), ((12121, 12141), 'tensorflow.exp', 'tf.exp', (['prior_logsig'], {}), '(prior_logsig)\n', (12127, 12141), True, 'import tensorflow as tf\n'), ((14144, 14164), 'tensorflow.exp', 'tf.exp', (['prior_logsig'], {}), '(prior_logsig)\n', (14150, 14164), True, 'import tensorflow as tf\n')]
|
#!/user/bin/env python3
###################################################################################
# #
# NAME: conanfile.py #
# #
# AUTHOR: <NAME>. #
# #
# CONTACT: <mailto:<EMAIL>> #
# #
# NOTICES: #
# #
# License: Apache 2.0 :http://www.apache.org/licenses/LICENSE-2.0 #
# #
###################################################################################
from conans import ConanFile, tools, Meson
from os.path import join as join_paths
import sys
if sys.version_info[0] < 3:
raise Exception("The version of Python must be 3 or greater.")
class MesonProject(ConanFile):
generators = 'pkg_config'
settings = 'os', 'compiler', 'build_type', 'arch'
topics = ('conan', 'meson', 'mesonbuild', 'build-system', 'dlang')
def build(self):
meson = Meson(self)
meson.configure()
meson.build()
# end of method build
# end of class MesonProject
|
[
"conans.Meson"
] |
[((1524, 1535), 'conans.Meson', 'Meson', (['self'], {}), '(self)\n', (1529, 1535), False, 'from conans import ConanFile, tools, Meson\n')]
|
#-------------------------------------------------------------------------------
# Copyright 2017 Cognizant Technology Solutions
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
#-------------------------------------------------------------------------------
'''
Created on Jun 16, 2016
@author: 593714
'''
from __future__ import unicode_literals
from com.cognizant.devops.platformagents.core.BaseAgent import BaseAgent
from dateutil import parser
import boto3
import time
import json, ast
class AwsCodePipelineAgent(BaseAgent):
def process(self):
startFrom = self.config.get("startFrom", '')
startFrom = parser.parse(startFrom)
startFrom = startFrom.strftime('%Y-%m-%dT%H:%M:%S')
since = self.tracking.get('lastupdated', None)
if since == None:
lastUpdated = startFrom
else:
lastUpdated = since
since = parser.parse(since)
since = since.strftime('%Y-%m-%dT%H:%M:%S')
pattern = '%Y-%m-%dT%H:%M:%S'
since = int(time.mktime(time.strptime(since, pattern)))
accesskey = self.config.get("awsAccesskey", '')
secretkey = self.config.get("awsSecretkey", '')
regionName = self.config.get("awsRegion", '')
client = boto3.client('codepipeline',
aws_access_key_id=accesskey,
aws_secret_access_key=secretkey,
region_name=regionName)
tracking_data = []
injectData = {}
pipeline = client.list_pipelines(
)
for names in pipeline["pipelines"]:
response = client.get_pipeline_state(
name=names["name"]
)
date = str(response['created'])
date = parser.parse(date)
date = date.strftime('%Y-%m-%dT%H:%M:%S')
pattern = '%Y-%m-%dT%H:%M:%S'
date = int(time.mktime(time.strptime(date, pattern)))
if since == None or date > since:
injectData['pipelineName'] = str(response['pipelineName'])
injectData['jobName'] = str(response['stageStates'][1]['stageName'])
injectData['status'] = str(response['stageStates'][0]['actionStates'][0]['latestExecution']['status'])
summary = response['stageStates'][0]['actionStates'][0]['latestExecution']["errorDetails"]
injectData['summary'] = summary['message']
injectData['createTime'] = str(response['created'])
injectData['pipelineTime'] = response['created'].strftime('%Y-%m-%dT%H:%M:%SZ')
start = str(response['stageStates'][0]['actionStates'][0]['latestExecution']['lastStatusChange'])
injectData['pipelineStartTime'] = start
string = ast.literal_eval(json.dumps(injectData))
tracking_data.append(string)
seq = [x['createTime'] for x in tracking_data]
fromDateTime = max(seq)
fromDateTime = parser.parse(fromDateTime)
fromDateTime = fromDateTime.strftime('%Y-%m-%dT%H:%M:%S')
if tracking_data != []:
self.publishToolsData(tracking_data)
self.tracking["lastupdated"] = fromDateTime
self.updateTrackingJson(self.tracking)
if __name__ == "__main__":
AwsCodePipelineAgent()
|
[
"dateutil.parser.parse",
"time.strptime",
"boto3.client",
"json.dumps"
] |
[((1124, 1147), 'dateutil.parser.parse', 'parser.parse', (['startFrom'], {}), '(startFrom)\n', (1136, 1147), False, 'from dateutil import parser\n'), ((1760, 1878), 'boto3.client', 'boto3.client', (['"""codepipeline"""'], {'aws_access_key_id': 'accesskey', 'aws_secret_access_key': 'secretkey', 'region_name': 'regionName'}), "('codepipeline', aws_access_key_id=accesskey,\n aws_secret_access_key=secretkey, region_name=regionName)\n", (1772, 1878), False, 'import boto3\n'), ((1391, 1410), 'dateutil.parser.parse', 'parser.parse', (['since'], {}), '(since)\n', (1403, 1410), False, 'from dateutil import parser\n'), ((2274, 2292), 'dateutil.parser.parse', 'parser.parse', (['date'], {}), '(date)\n', (2286, 2292), False, 'from dateutil import parser\n'), ((3525, 3551), 'dateutil.parser.parse', 'parser.parse', (['fromDateTime'], {}), '(fromDateTime)\n', (3537, 3551), False, 'from dateutil import parser\n'), ((1545, 1574), 'time.strptime', 'time.strptime', (['since', 'pattern'], {}), '(since, pattern)\n', (1558, 1574), False, 'import time\n'), ((2424, 2452), 'time.strptime', 'time.strptime', (['date', 'pattern'], {}), '(date, pattern)\n', (2437, 2452), False, 'import time\n'), ((3322, 3344), 'json.dumps', 'json.dumps', (['injectData'], {}), '(injectData)\n', (3332, 3344), False, 'import json, ast\n')]
|
from femagtools import winding_diagram
def test_winding_diagram():
data = winding_diagram._winding_data(12, 2, 3)
assert data == [1, -2, 3, -1, 2, -3, 1, -2, 3, -1, 2, -3]
data = winding_diagram._winding_data(36, 2, 3)
assert data == [1, 1, 1, -2, -2, -2, 3, 3, 3, -1, -1, -1, 2, 2, 2, -3, -3, -3, 1, 1, 1, -2, -2, -2, 3, 3, 3, -1, -1, -1, 2, 2, 2, -3, -3, -3]
data = winding_diagram._winding_data(36, 3, 3)
assert data == [1, 1, -2, -2, 3, 3, -1, -1, 2, 2, -3, -3, 1, 1, -2, -2, 3, 3, -1, -1, 2, 2, -3, -3, 1, 1, -2, -2, 3, 3, -1, -1, 2, 2, -3, -3]
|
[
"femagtools.winding_diagram._winding_data"
] |
[((82, 121), 'femagtools.winding_diagram._winding_data', 'winding_diagram._winding_data', (['(12)', '(2)', '(3)'], {}), '(12, 2, 3)\n', (111, 121), False, 'from femagtools import winding_diagram\n'), ((196, 235), 'femagtools.winding_diagram._winding_data', 'winding_diagram._winding_data', (['(36)', '(2)', '(3)'], {}), '(36, 2, 3)\n', (225, 235), False, 'from femagtools import winding_diagram\n'), ((394, 433), 'femagtools.winding_diagram._winding_data', 'winding_diagram._winding_data', (['(36)', '(3)', '(3)'], {}), '(36, 3, 3)\n', (423, 433), False, 'from femagtools import winding_diagram\n')]
|
MONGODB_SETTINGS = {
'DB': 'Your_DB_Name',
'host': 'localhost',
'port': 27017,
}
from pymongo import MongoClient
client = MongoClient(f'{MONGODB_SETTINGS["host"]}:{MONGODB_SETTINGS["port"]}')
db = client.DoctorsDB
|
[
"pymongo.MongoClient"
] |
[((140, 209), 'pymongo.MongoClient', 'MongoClient', (['f"""{MONGODB_SETTINGS[\'host\']}:{MONGODB_SETTINGS[\'port\']}"""'], {}), '(f"{MONGODB_SETTINGS[\'host\']}:{MONGODB_SETTINGS[\'port\']}")\n', (151, 209), False, 'from pymongo import MongoClient\n')]
|
import numpy as np
### 1
def fib_matrix(n):
for i in range(n):
res = pow((np.matrix([[1, 1], [1, 0]], dtype='int64')), i) * np.matrix([[1], [0]])
print(int(res[0][0]))
# 调用
fib_matrix(100)
### 2
# 使用矩阵计算斐波那契数列
def Fibonacci_Matrix_tool(n):
Matrix = np.matrix("1 1;1 0", dtype='int64')
# 返回是matrix类型
return np.linalg.matrix_power(Matrix, n)
def Fibonacci_Matrix(n):
result_list = []
for i in range(0, n):
result_list.append(np.array(Fibonacci_Matrix_tool(i))[0][0])
return result_list
# 调用
Fibonacci_Matrix(100)
### pow 速度 比 双**号快, np.linalg.matrix_power也是一种方法
|
[
"numpy.matrix",
"numpy.linalg.matrix_power"
] |
[((278, 313), 'numpy.matrix', 'np.matrix', (['"""1 1;1 0"""'], {'dtype': '"""int64"""'}), "('1 1;1 0', dtype='int64')\n", (287, 313), True, 'import numpy as np\n'), ((343, 376), 'numpy.linalg.matrix_power', 'np.linalg.matrix_power', (['Matrix', 'n'], {}), '(Matrix, n)\n', (365, 376), True, 'import numpy as np\n'), ((138, 159), 'numpy.matrix', 'np.matrix', (['[[1], [0]]'], {}), '([[1], [0]])\n', (147, 159), True, 'import numpy as np\n'), ((88, 130), 'numpy.matrix', 'np.matrix', (['[[1, 1], [1, 0]]'], {'dtype': '"""int64"""'}), "([[1, 1], [1, 0]], dtype='int64')\n", (97, 130), True, 'import numpy as np\n')]
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import pickle
from ssd_utils import BBoxUtility
from generator import Generator
from ssd_training import MultiboxLoss
from keras.callbacks import TensorBoard
from keras.callbacks import ModelCheckpoint
from time import gmtime, strftime
import os
def schedule(epoch, base_lr=3e-4, decay=0.9):
return base_lr * decay ** (epoch)
class Trainer(object):
"""
Trainer for ssd_model
"""
def __init__(self,
class_number=21,
input_shape=(300, 300, 3),
priors_file='prior_boxes_ssd300.pkl',
train_file='VOC2007.pkl',
path_prefix='./VOCdevkit/VOC2007/JPEGImages/',
model=None,
weight_file='weights_SSD300.hdf5',
freeze=('input_1', 'conv1_1', 'conv1_2', 'pool1',
'conv2_1', 'conv2_2', 'pool2',
'conv3_1', 'conv3_2', 'conv3_3', 'pool3'),
save_weight_file='/src/resource/checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5', # noqa
optim=None,
batch_size=20,
nb_worker=1
):
"""
Setting below parameter
:param class_number(int): class number
:param input_shape(set): set input shape
:param priors_file(str): set prior file name
:param train_file(str): train file name
:param path_prefix(str): path prefix
:param model(keras model): set the keras model such as the ssd
:param weight_file(str): weight file name
:param freeze(set): set untraining layer
"""
self.input_shape = input_shape
priors = pickle.load(open(priors_file, 'rb'))
self.bbox_utils = BBoxUtility(class_number, priors)
self.train_data = pickle.load(open(train_file, 'rb'))
keys = sorted(self.train_data.keys())
num_train = int(round(0.8 * len(keys)))
self.train_keys = keys[:num_train]
self.val_keys = keys[num_train:]
self.num_val = len(self.val_keys)
self.batch_size = batch_size
self.gen = Generator(self.train_data, self.bbox_utils, batch_size, path_prefix,
self.train_keys, self.val_keys,
(self.input_shape[0], self.input_shape[1]),
do_crop=True)
self.model = model
model.load_weights(weight_file, by_name=True)
self.freeze = list(freeze)
self.save_weight_file = save_weight_file
self.optim = optim
self.nb_worker = nb_worker
self.model.compile(optimizer=optim,
metrics=['accuracy'],
loss=MultiboxLoss(class_number,
neg_pos_ratio=2.0).compute_loss)
def train(self, nb_epoch):
"""
Call Train
:param nb_epoch(int): setting number of epoch
"""
for L in self.model.layers:
if L.name in self.freeze:
L.trainable = False
callbacks = [ModelCheckpoint(self.save_weight_file, verbose=1,
save_weights_only=True)]
callbacks.append(self.__make_tensorboard())
history = self.model.fit_generator(self.gen.generate(True),
self.gen.train_batches // self.batch_size,
nb_epoch, verbose=1,
callbacks=callbacks,
validation_data=self.gen.generate(
False),
nb_val_samples=self.gen.val_batches,
nb_worker=self.nb_worker)
def __make_tensorboard(self):
"""
Make tensorboard for visualize information
:return: tensorboard
"""
tictoc = strftime("%a_%d_%b_%Y_%H_%M_%S", gmtime())
directory_name = tictoc
self.log_dir = "./log/" + directory_name
os.mkdir(self.log_dir)
tensorboard = TensorBoard(log_dir=self.log_dir, histogram_freq=1,
write_graph=True, )
return tensorboard
|
[
"os.mkdir",
"ssd_training.MultiboxLoss",
"keras.callbacks.ModelCheckpoint",
"time.gmtime",
"keras.callbacks.TensorBoard",
"generator.Generator",
"ssd_utils.BBoxUtility"
] |
[((1794, 1827), 'ssd_utils.BBoxUtility', 'BBoxUtility', (['class_number', 'priors'], {}), '(class_number, priors)\n', (1805, 1827), False, 'from ssd_utils import BBoxUtility\n'), ((2166, 2333), 'generator.Generator', 'Generator', (['self.train_data', 'self.bbox_utils', 'batch_size', 'path_prefix', 'self.train_keys', 'self.val_keys', '(self.input_shape[0], self.input_shape[1])'], {'do_crop': '(True)'}), '(self.train_data, self.bbox_utils, batch_size, path_prefix, self.\n train_keys, self.val_keys, (self.input_shape[0], self.input_shape[1]),\n do_crop=True)\n', (2175, 2333), False, 'from generator import Generator\n'), ((4146, 4168), 'os.mkdir', 'os.mkdir', (['self.log_dir'], {}), '(self.log_dir)\n', (4154, 4168), False, 'import os\n'), ((4191, 4260), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': 'self.log_dir', 'histogram_freq': '(1)', 'write_graph': '(True)'}), '(log_dir=self.log_dir, histogram_freq=1, write_graph=True)\n', (4202, 4260), False, 'from keras.callbacks import TensorBoard\n'), ((3130, 3203), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['self.save_weight_file'], {'verbose': '(1)', 'save_weights_only': '(True)'}), '(self.save_weight_file, verbose=1, save_weights_only=True)\n', (3145, 3203), False, 'from keras.callbacks import ModelCheckpoint\n'), ((4047, 4055), 'time.gmtime', 'gmtime', ([], {}), '()\n', (4053, 4055), False, 'from time import gmtime, strftime\n'), ((2764, 2809), 'ssd_training.MultiboxLoss', 'MultiboxLoss', (['class_number'], {'neg_pos_ratio': '(2.0)'}), '(class_number, neg_pos_ratio=2.0)\n', (2776, 2809), False, 'from ssd_training import MultiboxLoss\n')]
|
from thundra_demo_localstack.service import start_new_request, list_requests_by_request_id
import json
headers = {
"content-type": "application/json"
}
Handlers = {
'POST/requests': start_new_request,
'GET/request/{requestId}': list_requests_by_request_id
}
def generate_request_content(event, action):
params = None
result = None
if "pathParameters" in event and event["pathParameters"]:
params = dict()
params = event["pathParameters"]
if not params:
result = action()
else:
result = action(path_parameters=params)
return result
def handler(event, context):
resource = event["resource"]
http_method = event["httpMethod"]
handler_key = http_method + resource
action = Handlers.get(handler_key, None)
if not action:
return {"statusCode": 404, "body": json.dumps({})}
result = generate_request_content(event, action)
return {
"headers": headers,
"statusCode": 200,
"body": json.dumps(result)
}
|
[
"json.dumps"
] |
[((1010, 1028), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (1020, 1028), False, 'import json\n'), ((857, 871), 'json.dumps', 'json.dumps', (['{}'], {}), '({})\n', (867, 871), False, 'import json\n')]
|
#!/usr/bin/env python
import time
import unittest
import rospy
import rostest
from rosbridge_library.internal import subscription_modifiers as subscribe
class TestMessageHandlers(unittest.TestCase):
def setUp(self):
rospy.init_node("test_message_handlers")
def dummy_cb(self, msg):
pass
def test_default_message_handler(self):
handler = subscribe.MessageHandler(None, self.dummy_cb)
self.help_test_default(handler)
def test_throttle_message_handler(self):
handler = subscribe.ThrottleMessageHandler(subscribe.MessageHandler(None, self.dummy_cb))
self.help_test_throttle(handler, 50)
def test_queue_message_handler_passes_msgs(self):
handler = subscribe.QueueMessageHandler(subscribe.MessageHandler(None, self.dummy_cb))
self.help_test_queue(handler, 1000)
handler.finish()
def test_queue_message_handler_stops(self):
received = {"msgs": []}
def cb(msg):
received["msgs"].append(msg)
handler = subscribe.QueueMessageHandler(subscribe.MessageHandler(None, cb))
self.assertTrue(handler.is_alive())
handler.finish()
self.assertFalse(handler.is_alive())
def test_queue_message_handler_queue(self):
received = {"msgs": []}
def cb(msg):
received["msgs"].append(msg)
msgs = range(1000)
handler = subscribe.MessageHandler(None, cb)
handler = handler.set_throttle_rate(10000)
handler = handler.set_queue_length(10)
self.assertIsInstance(handler, subscribe.QueueMessageHandler)
# 'hello' is handled immediately
handler.handle_message("hello")
time.sleep(0.02)
# queue is now empty, but throttling is in effect
# no messages will be handled in the next 10 seconds
# these will fill up the queue, with newer values displacing old ones
# nothing gets sent because the throttle rate
for x in msgs:
handler.handle_message(x)
handler = handler.set_throttle_rate(0)
time.sleep(0.1)
try:
self.assertEqual(["hello"] + list(range(990, 1000)), received["msgs"])
finally:
handler.finish()
def test_queue_message_handler_dropping(self):
received = {"msgs": []}
def cb(msg):
received["msgs"].append(msg)
time.sleep(1)
queue_length = 5
msgs = range(queue_length * 5)
handler = subscribe.MessageHandler(None, cb)
handler = handler.set_queue_length(queue_length)
self.assertIsInstance(handler, subscribe.QueueMessageHandler)
# send all messages at once.
# only the first and the last queue_length should get through,
# because the callbacks are blocked.
for x in msgs:
handler.handle_message(x)
# yield the thread so the first callback can append,
# otherwise the first handled value is non-deterministic.
time.sleep(0)
# wait long enough for all the callbacks, and then some.
time.sleep(queue_length + 3)
try:
self.assertEqual([msgs[0]] + msgs[-queue_length:], received["msgs"])
except: # noqa: E722 # Will finish and raise
handler.finish()
raise
handler.finish()
def test_queue_message_handler_rate(self):
handler = subscribe.MessageHandler(None, self.dummy_cb)
self.help_test_queue_rate(handler, 50, 10)
handler.finish()
# Helper methods for each of the three Handler types, plus one for Queue+Rate.
# Used in standalone testing as well as the test_transition_functionality test
def help_test_default(self, handler):
handler = handler.set_queue_length(0)
handler = handler.set_throttle_rate(0)
self.assertIsInstance(handler, subscribe.MessageHandler)
msg = "test_default_message_handler"
received = {"msg": None}
def cb(msg):
received["msg"] = msg
handler.publish = cb
self.assertTrue(handler.time_remaining() == 0)
t1 = time.time()
handler.handle_message(msg)
t2 = time.time()
self.assertEqual(received["msg"], msg)
self.assertLessEqual(t1, handler.last_publish)
self.assertLessEqual(handler.last_publish, t2)
self.assertEqual(handler.time_remaining(), 0)
received = {"msgs": []}
def cb(msg):
received["msgs"].append(msg)
handler.publish = cb
xs = list(range(10000))
for x in xs:
handler.handle_message(x)
self.assertEqual(received["msgs"], xs)
return handler
def help_test_throttle(self, handler, throttle_rate):
handler = handler.set_queue_length(0)
handler = handler.set_throttle_rate(throttle_rate)
self.assertIsInstance(handler, subscribe.ThrottleMessageHandler)
msg = "test_throttle_message_handler"
# First, try with a single message
received = {"msg": None}
def cb(msg):
received["msg"] = msg
handler.publish = cb
# ensure the handler doesn't swallow this message
time.sleep(2.0 * handler.throttle_rate)
handler.handle_message(msg)
self.assertEqual(received["msg"], msg)
# sleep to make sure the handler sends right away for the second part
time.sleep(2.0 * handler.throttle_rate)
received = {"msgs": []}
def cb(msg):
received["msgs"].append(msg)
handler.publish = cb
x = 0
time_padding = handler.throttle_rate / 4.0
for i in range(1, 10):
# We guarantee that in the while loop below only the first message is handled
# All subsequent messages (within throttling window - time_padding ) are dropped
# Time padding is a test-only hack around race condition when time.time() - fin is within
# the throttling window, but handler.handle_message(x) gets a later timestamp that is outside.
time.sleep(2.0 * time_padding)
fin = time.time() + throttle_rate / 1000.0 - time_padding
while time.time() < fin:
handler.handle_message(x)
x = x + 1
self.assertEqual(len(received["msgs"]), i)
return handler
def help_test_queue(self, handler, queue_length):
handler = handler.set_queue_length(queue_length)
self.assertIsInstance(handler, subscribe.QueueMessageHandler)
received = {"msgs": []}
def cb(msg):
received["msgs"].append(msg)
handler.publish = cb
msgs = list(range(queue_length))
for x in msgs:
handler.handle_message(x)
time.sleep(0.1)
self.assertEqual(msgs, received["msgs"])
return handler
def help_test_queue_rate(self, handler, throttle_rate, queue_length):
handler = handler.set_throttle_rate(throttle_rate)
handler = handler.set_queue_length(queue_length)
self.assertIsInstance(handler, subscribe.QueueMessageHandler)
received = {"msg": None}
def cb(msg):
received["msg"] = msg
handler.publish = cb
throttle_rate_sec = throttle_rate / 1000.0
# ensure previous tests' last sent time is long enough ago
time.sleep(throttle_rate_sec)
for x in range(queue_length):
handler.handle_message(x)
time.sleep(throttle_rate_sec / 2.0)
try:
for x in range(10):
self.assertEqual(x, received["msg"])
time.sleep(throttle_rate_sec)
except: # noqa: E722 # Will finish and raise
handler.finish()
raise
return handler
# Test that each transition works and is stable
def test_transitions(self):
# MessageHandler.transition is stable
handler = subscribe.MessageHandler(None, self.dummy_cb)
next_handler = handler.transition()
self.assertEqual(handler, next_handler)
# Going from MessageHandler to ThrottleMessageHandler...
handler = subscribe.MessageHandler(None, self.dummy_cb)
next_handler = handler.set_throttle_rate(100)
self.assertIsInstance(next_handler, subscribe.ThrottleMessageHandler)
handler = next_handler
# Testing transition returns another ThrottleMessageHandler
next_handler = handler.transition()
self.assertEqual(handler, next_handler)
# And finally going back to MessageHandler
next_handler = handler.set_throttle_rate(0)
self.assertIsInstance(next_handler, subscribe.MessageHandler)
# Same for QueueMessageHandler
handler = subscribe.MessageHandler(None, self.dummy_cb)
next_handler = handler.set_queue_length(100)
self.assertIsInstance(next_handler, subscribe.QueueMessageHandler)
handler = next_handler
next_handler = handler.transition()
self.assertEqual(handler, next_handler)
next_handler = handler.set_queue_length(0)
self.assertIsInstance(next_handler, subscribe.MessageHandler)
# Checking a QueueMessageHandler with rate limit can be generated both ways
handler = subscribe.MessageHandler(None, self.dummy_cb)
next_handler = handler.set_queue_length(100).set_throttle_rate(100)
self.assertIsInstance(next_handler, subscribe.QueueMessageHandler)
next_handler.finish()
next_handler = handler.set_throttle_rate(100).set_queue_length(100)
self.assertIsInstance(next_handler, subscribe.QueueMessageHandler)
next_handler.finish()
handler = next_handler
next_handler = handler.transition()
self.assertEqual(handler, next_handler)
# Check both steps on the way back to plain MessageHandler
next_handler = handler.set_throttle_rate(0)
self.assertIsInstance(next_handler, subscribe.QueueMessageHandler)
next_handler = handler.set_queue_length(0)
self.assertIsInstance(next_handler, subscribe.MessageHandler)
def test_transition_functionality(self):
# Test individually
handler = subscribe.MessageHandler(None, None)
handler = self.help_test_queue(handler, 10)
handler.finish()
handler = subscribe.MessageHandler(None, None)
handler = self.help_test_throttle(handler, 50)
handler.finish()
handler = subscribe.MessageHandler(None, None)
handler = self.help_test_default(handler)
handler.finish()
# Test combinations
handler = subscribe.MessageHandler(None, None)
handler = self.help_test_queue(handler, 10)
handler = self.help_test_throttle(handler, 50)
handler = self.help_test_default(handler)
handler.finish()
handler = subscribe.MessageHandler(None, None)
handler = self.help_test_queue(handler, 10)
handler = self.help_test_default(handler)
handler = self.help_test_throttle(handler, 50)
handler.finish()
handler = subscribe.MessageHandler(None, None)
handler = self.help_test_throttle(handler, 50)
handler = self.help_test_queue_rate(handler, 50, 10)
handler = self.help_test_default(handler)
handler.finish()
handler = subscribe.MessageHandler(None, None)
handler = self.help_test_throttle(handler, 50)
handler = self.help_test_default(handler)
handler = self.help_test_queue_rate(handler, 50, 10)
handler.finish()
handler = subscribe.MessageHandler(None, None)
handler = self.help_test_default(handler)
handler = self.help_test_throttle(handler, 50)
handler = self.help_test_queue_rate(handler, 50, 10)
handler.finish()
handler = subscribe.MessageHandler(None, None)
handler = self.help_test_default(handler)
handler = self.help_test_queue(handler, 10)
handler = self.help_test_throttle(handler, 50)
handler.finish()
# Test duplicates
handler = subscribe.MessageHandler(None, None)
handler = self.help_test_queue_rate(handler, 50, 10)
handler = self.help_test_queue_rate(handler, 100, 10)
handler.finish()
handler = subscribe.MessageHandler(None, None)
handler = self.help_test_throttle(handler, 50)
handler = self.help_test_throttle(handler, 100)
handler.finish()
handler = subscribe.MessageHandler(None, None)
handler = self.help_test_default(handler)
handler = self.help_test_default(handler)
handler.finish()
# handler = self.help_test_throttle(handler, 50)
# handler = self.help_test_default(handler)
# handler = self.help_test_throttle(handler, 50)
# handler = self.help_test_default(handler)
# handler = self.help_test_throttle(handler, 50)
PKG = "rosbridge_library"
NAME = "test_message_handlers"
if __name__ == "__main__":
rostest.unitrun(PKG, NAME, TestMessageHandlers)
|
[
"rosbridge_library.internal.subscription_modifiers.MessageHandler",
"time.time",
"time.sleep",
"rospy.init_node",
"rostest.unitrun"
] |
[((13106, 13153), 'rostest.unitrun', 'rostest.unitrun', (['PKG', 'NAME', 'TestMessageHandlers'], {}), '(PKG, NAME, TestMessageHandlers)\n', (13121, 13153), False, 'import rostest\n'), ((231, 271), 'rospy.init_node', 'rospy.init_node', (['"""test_message_handlers"""'], {}), "('test_message_handlers')\n", (246, 271), False, 'import rospy\n'), ((378, 423), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'self.dummy_cb'], {}), '(None, self.dummy_cb)\n', (402, 423), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((1409, 1443), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'cb'], {}), '(None, cb)\n', (1433, 1443), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((1703, 1719), 'time.sleep', 'time.sleep', (['(0.02)'], {}), '(0.02)\n', (1713, 1719), False, 'import time\n'), ((2090, 2105), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2100, 2105), False, 'import time\n'), ((2506, 2540), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'cb'], {}), '(None, cb)\n', (2530, 2540), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((3119, 3147), 'time.sleep', 'time.sleep', (['(queue_length + 3)'], {}), '(queue_length + 3)\n', (3129, 3147), False, 'import time\n'), ((3437, 3482), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'self.dummy_cb'], {}), '(None, self.dummy_cb)\n', (3461, 3482), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((4160, 4171), 'time.time', 'time.time', ([], {}), '()\n', (4169, 4171), False, 'import time\n'), ((4221, 4232), 'time.time', 'time.time', ([], {}), '()\n', (4230, 4232), False, 'import time\n'), ((5247, 5286), 'time.sleep', 'time.sleep', (['(2.0 * handler.throttle_rate)'], {}), '(2.0 * handler.throttle_rate)\n', (5257, 5286), False, 'import time\n'), ((5457, 5496), 'time.sleep', 'time.sleep', (['(2.0 * handler.throttle_rate)'], {}), '(2.0 * handler.throttle_rate)\n', (5467, 5496), False, 'import time\n'), ((6827, 6842), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6837, 6842), False, 'import time\n'), ((7425, 7454), 'time.sleep', 'time.sleep', (['throttle_rate_sec'], {}), '(throttle_rate_sec)\n', (7435, 7454), False, 'import time\n'), ((7540, 7575), 'time.sleep', 'time.sleep', (['(throttle_rate_sec / 2.0)'], {}), '(throttle_rate_sec / 2.0)\n', (7550, 7575), False, 'import time\n'), ((7996, 8041), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'self.dummy_cb'], {}), '(None, self.dummy_cb)\n', (8020, 8041), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((8218, 8263), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'self.dummy_cb'], {}), '(None, self.dummy_cb)\n', (8242, 8263), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((8818, 8863), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'self.dummy_cb'], {}), '(None, self.dummy_cb)\n', (8842, 8863), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((9339, 9384), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'self.dummy_cb'], {}), '(None, self.dummy_cb)\n', (9363, 9384), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((10277, 10313), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'None'], {}), '(None, None)\n', (10301, 10313), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((10410, 10446), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'None'], {}), '(None, None)\n', (10434, 10446), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((10546, 10582), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'None'], {}), '(None, None)\n', (10570, 10582), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((10705, 10741), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'None'], {}), '(None, None)\n', (10729, 10741), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((10943, 10979), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'None'], {}), '(None, None)\n', (10967, 10979), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((11181, 11217), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'None'], {}), '(None, None)\n', (11205, 11217), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((11428, 11464), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'None'], {}), '(None, None)\n', (11452, 11464), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((11675, 11711), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'None'], {}), '(None, None)\n', (11699, 11711), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((11922, 11958), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'None'], {}), '(None, None)\n', (11946, 11958), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((12186, 12222), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'None'], {}), '(None, None)\n', (12210, 12222), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((12390, 12426), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'None'], {}), '(None, None)\n', (12414, 12426), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((12582, 12618), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'None'], {}), '(None, None)\n', (12606, 12618), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((561, 606), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'self.dummy_cb'], {}), '(None, self.dummy_cb)\n', (585, 606), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((756, 801), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'self.dummy_cb'], {}), '(None, self.dummy_cb)\n', (780, 801), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((1065, 1099), 'rosbridge_library.internal.subscription_modifiers.MessageHandler', 'subscribe.MessageHandler', (['None', 'cb'], {}), '(None, cb)\n', (1089, 1099), True, 'from rosbridge_library.internal import subscription_modifiers as subscribe\n'), ((2408, 2421), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2418, 2421), False, 'import time\n'), ((3031, 3044), 'time.sleep', 'time.sleep', (['(0)'], {}), '(0)\n', (3041, 3044), False, 'import time\n'), ((6123, 6153), 'time.sleep', 'time.sleep', (['(2.0 * time_padding)'], {}), '(2.0 * time_padding)\n', (6133, 6153), False, 'import time\n'), ((6242, 6253), 'time.time', 'time.time', ([], {}), '()\n', (6251, 6253), False, 'import time\n'), ((7691, 7720), 'time.sleep', 'time.sleep', (['throttle_rate_sec'], {}), '(throttle_rate_sec)\n', (7701, 7720), False, 'import time\n'), ((6172, 6183), 'time.time', 'time.time', ([], {}), '()\n', (6181, 6183), False, 'import time\n')]
|
import math
import time
import logging
import socket
import select
try:
import socketserver
except ImportError:
import SocketServer as socketserver
def ping(addr, count=20, timeout=1):
"""UDP ping client"""
# print "--- PING %s:%d ---" % addr
results = []
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for i in range(count):
ts = time.time()
data = 'PING %d %f %s' % (i, ts, '#' * 480)
data = data.encode('utf-8')
sock.sendto(data, addr)
readables, writeables, exceptions = select.select(
[sock], [], [], timeout)
# exception
if exceptions:
time.sleep(1)
continue
# timeout
if (readables, writeables, exceptions) == ([], [], []):
continue
if readables:
ret = readables[0].recv(512)
if ret == data:
time_spent = (time.time() - ts) * 1000
results.append(time_spent)
# print '%d bytes from %s:%d, seq=%d time=%.3f ms' % (len(data), addr[0], addr[1], i, time_spent)
received = len(results)
missing = count - received
loss = count - received
# print "--- %s:%d ping statistics---" % addr
# print "%d packets transmitted, %d packets received, %.1f%% packet loss" % (count, received, float(loss)*100/count)
logging.debug("ping %s result: %d transmitted, %d received, %.1f%% loss",
addr, count, received, float(loss) * 100 // count)
if received != 0:
min_val = min(results)
max_val = max(results)
avg = sum(results) // count
stddev = math.sqrt(sum([(x - avg) ** 2 for x in results]) // received)
# print "round-trip min/avg/max/stddev = %.3f/%.3f/%.3f/%.3f" % (min_val, avg, max_val, stddev)
logging.debug("ping %s min/avg/max/stddev = %.3f/%.3f/%.3f/%.3f", addr, min_val, avg, max_val, stddev)
return missing * 500 + avg
else:
return float("inf")
class PingHandler(socketserver.BaseRequestHandler):
"""UDP Ping server handler"""
def handle(self):
data = self.request[0].strip()
sock = self.request[1]
sock.sendto(data, self.client_address)
# print data
# Test client
# import threading
# for x in range(10):
# threading.Thread(target = ping, args = (('172.16.31.10', 8888),)).start()
|
[
"logging.debug",
"socket.socket",
"time.sleep",
"time.time",
"select.select"
] |
[((290, 338), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (303, 338), False, 'import socket\n'), ((379, 390), 'time.time', 'time.time', ([], {}), '()\n', (388, 390), False, 'import time\n'), ((555, 593), 'select.select', 'select.select', (['[sock]', '[]', '[]', 'timeout'], {}), '([sock], [], [], timeout)\n', (568, 593), False, 'import select\n'), ((1820, 1926), 'logging.debug', 'logging.debug', (['"""ping %s min/avg/max/stddev = %.3f/%.3f/%.3f/%.3f"""', 'addr', 'min_val', 'avg', 'max_val', 'stddev'], {}), "('ping %s min/avg/max/stddev = %.3f/%.3f/%.3f/%.3f', addr,\n min_val, avg, max_val, stddev)\n", (1833, 1926), False, 'import logging\n'), ((662, 675), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (672, 675), False, 'import time\n'), ((921, 932), 'time.time', 'time.time', ([], {}), '()\n', (930, 932), False, 'import time\n')]
|
# Copyright (c) 2011-2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
"""Networking related functionality."""
from __future__ import annotations
import copy
import threading
import weakref
from enum import Enum
from typing import TYPE_CHECKING
import _ba
if TYPE_CHECKING:
from typing import Any, Dict, Union, Callable, Optional
import socket
import ba
ServerCallbackType = Callable[[Union[None, Dict[str, Any]]], None]
def get_ip_address_type(addr: str) -> socket.AddressFamily:
"""Return socket.AF_INET6 or socket.AF_INET4 for the provided address."""
import socket
socket_type = None
# First try it as an ipv4 address.
try:
socket.inet_pton(socket.AF_INET, addr)
socket_type = socket.AF_INET
except OSError:
pass
# Hmm apparently not ipv4; try ipv6.
if socket_type is None:
try:
socket.inet_pton(socket.AF_INET6, addr)
socket_type = socket.AF_INET6
except OSError:
pass
if socket_type is None:
raise ValueError('addr seems to be neither v4 or v6: ' + str(addr))
return socket_type
class ServerResponseType(Enum):
"""How to interpret responses from the server."""
JSON = 0
class ServerCallThread(threading.Thread):
"""Thread to communicate with the master server."""
def __init__(self, request: str, request_type: str,
data: Optional[Dict[str, Any]],
callback: Optional[ServerCallbackType],
response_type: ServerResponseType):
super().__init__()
self._request = request
self._request_type = request_type
if not isinstance(response_type, ServerResponseType):
raise TypeError(f'Invalid response type: {response_type}')
self._response_type = response_type
self._data = {} if data is None else copy.deepcopy(data)
self._callback: Optional[ServerCallbackType] = callback
self._context = _ba.Context('current')
# Save and restore the context we were created from.
activity = _ba.getactivity(doraise=False)
self._activity = weakref.ref(
activity) if activity is not None else None
def _run_callback(self, arg: Union[None, Dict[str, Any]]) -> None:
# If we were created in an activity context and that activity has
# since died, do nothing.
# FIXME: Should we just be using a ContextCall instead of doing
# this check manually?
if self._activity is not None:
activity = self._activity()
if activity is None or activity.expired:
return
# Technically we could do the same check for session contexts,
# but not gonna worry about it for now.
assert self._context is not None
assert self._callback is not None
with self._context:
self._callback(arg)
def run(self) -> None:
import urllib.request
import urllib.error
import json
from ba import _general
try:
self._data = _general.utf8_all(self._data)
_ba.set_thread_name('BA_ServerCallThread')
# Seems pycharm doesn't know about urllib.parse.
parse = urllib.parse
if self._request_type == 'get':
response = urllib.request.urlopen(
urllib.request.Request(
(_ba.get_master_server_address() + '/' +
self._request + '?' + parse.urlencode(self._data)),
None, {'User-Agent': _ba.app.user_agent_string}))
elif self._request_type == 'post':
response = urllib.request.urlopen(
urllib.request.Request(
_ba.get_master_server_address() + '/' + self._request,
parse.urlencode(self._data).encode(),
{'User-Agent': _ba.app.user_agent_string}))
else:
raise TypeError('Invalid request_type: ' + self._request_type)
# If html request failed.
if response.getcode() != 200:
response_data = None
elif self._response_type == ServerResponseType.JSON:
raw_data = response.read()
# Empty string here means something failed server side.
if raw_data == b'':
response_data = None
else:
# Json.loads requires str in python < 3.6.
raw_data_s = raw_data.decode()
response_data = json.loads(raw_data_s)
else:
raise TypeError(f'invalid responsetype: {self._response_type}')
except (urllib.error.URLError, ConnectionError):
# Server rejected us, broken pipe, etc. It happens. Ignoring.
response_data = None
except Exception as exc:
# Any other error here is unexpected, so let's make a note of it.
print('Exc in ServerCallThread:', exc)
import traceback
traceback.print_exc()
response_data = None
if self._callback is not None:
_ba.pushcall(_general.Call(self._run_callback, response_data),
from_other_thread=True)
def serverget(
request: str,
data: Dict[str, Any],
callback: Optional[ServerCallbackType] = None,
response_type: ServerResponseType = ServerResponseType.JSON) -> None:
"""Make a call to the master server via a http GET."""
ServerCallThread(request, 'get', data, callback, response_type).start()
def serverput(
request: str,
data: Dict[str, Any],
callback: Optional[ServerCallbackType] = None,
response_type: ServerResponseType = ServerResponseType.JSON) -> None:
"""Make a call to the master server via a http POST."""
ServerCallThread(request, 'post', data, callback, response_type).start()
|
[
"copy.deepcopy",
"traceback.print_exc",
"json.loads",
"_ba.getactivity",
"ba._general.utf8_all",
"ba._general.Call",
"_ba.get_master_server_address",
"_ba.set_thread_name",
"_ba.Context",
"socket.inet_pton",
"weakref.ref"
] |
[((1777, 1815), 'socket.inet_pton', 'socket.inet_pton', (['socket.AF_INET', 'addr'], {}), '(socket.AF_INET, addr)\n', (1793, 1815), False, 'import socket\n'), ((3079, 3101), '_ba.Context', '_ba.Context', (['"""current"""'], {}), "('current')\n", (3090, 3101), False, 'import _ba\n'), ((3183, 3213), '_ba.getactivity', '_ba.getactivity', ([], {'doraise': '(False)'}), '(doraise=False)\n', (3198, 3213), False, 'import _ba\n'), ((1981, 2020), 'socket.inet_pton', 'socket.inet_pton', (['socket.AF_INET6', 'addr'], {}), '(socket.AF_INET6, addr)\n', (1997, 2020), False, 'import socket\n'), ((2971, 2990), 'copy.deepcopy', 'copy.deepcopy', (['data'], {}), '(data)\n', (2984, 2990), False, 'import copy\n'), ((3239, 3260), 'weakref.ref', 'weakref.ref', (['activity'], {}), '(activity)\n', (3250, 3260), False, 'import weakref\n'), ((4185, 4214), 'ba._general.utf8_all', '_general.utf8_all', (['self._data'], {}), '(self._data)\n', (4202, 4214), False, 'from ba import _general\n'), ((4227, 4269), '_ba.set_thread_name', '_ba.set_thread_name', (['"""BA_ServerCallThread"""'], {}), "('BA_ServerCallThread')\n", (4246, 4269), False, 'import _ba\n'), ((6205, 6226), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6224, 6226), False, 'import traceback\n'), ((6325, 6373), 'ba._general.Call', '_general.Call', (['self._run_callback', 'response_data'], {}), '(self._run_callback, response_data)\n', (6338, 6373), False, 'from ba import _general\n'), ((5716, 5738), 'json.loads', 'json.loads', (['raw_data_s'], {}), '(raw_data_s)\n', (5726, 5738), False, 'import json\n'), ((4886, 4917), '_ba.get_master_server_address', '_ba.get_master_server_address', ([], {}), '()\n', (4915, 4917), False, 'import _ba\n'), ((4529, 4560), '_ba.get_master_server_address', '_ba.get_master_server_address', ([], {}), '()\n', (4558, 4560), False, 'import _ba\n')]
|
from dsl_parser import SchemeParser, Accumulator, Cons
import transform
def compute_buffer_length(bytecode_list):
result = 0
while bytecode_list is not None:
result += len(bytecode_list.car)
bytecode_list = bytecode_list.cdr
return result
RULES = {}
def load_transforms(path):
sexp = None
with open(path, 'rb') as fd:
parser = SchemeParser()
sexp = parser.parse(fd)
while sexp is not None:
rule = transform.Transform(sexp.car)
RULES[rule.name] = rule
sexp = sexp.cdr
def transform_repeatedly_cb(callback, sexp):
changed = True
while changed:
result = callback(sexp)
if result is None:
return None
elif result is transform.UNCHANGED:
changed = False
else:
sexp = result.car
return sexp
def transform_repeatedly(name, original_sexp):
def callback(sexp):
return RULES[name].recursively_transform(sexp)
return transform_repeatedly_cb(callback, original_sexp)
def apply_multiple_transforms(rules, apply_transform, current):
changed = False
for rule in rules:
result = apply_transform(rule, current)
if result is None: # deleted by rule
return None
elif result is not transform.UNCHANGED:
current = result.car
changed = True
if changed:
return Cons(current, None)
else:
return transform.UNCHANGED
def expand_AND_OR(sexp):
rules = (RULES['expand-and'], RULES['expand-and/cleanup'],
RULES['expand-or'], RULES['expand-or/cleanup'])
return apply_multiple_transforms(
rules,
lambda rule, current: rule.recursively_transform(current),
sexp
)
def expand_compute_input_AND_OR(sexp):
return apply_multiple_transforms(
(RULES['defuzzer-input/and'], RULES['defuzzer-input/or']),
lambda rule, current: rule.recursively_transform(current),
sexp
)
def expand_if_statement(sexp):
if sexp.car != 'if':
return sexp
else:
return transform_repeatedly_cb(expand_AND_OR, sexp)
def sexp_map(func, sexp):
acc = Accumulator()
while sexp is not None:
acc.append(func(sexp.car))
sexp = sexp.cdr
return acc.to_list()
def compile_to_bytecode(sexp):
# lift all nested if statements
sexp = transform_repeatedly('if-lifting', sexp)
sexp = transform_repeatedly('delete-empty-then', sexp)
# expand all conditions in the if statements
sexp = sexp_map(expand_if_statement, sexp)
# expand if statements
sexp = transform_repeatedly('compile-if-statement', sexp)
# expand %compute-defuzzer-input
sexp = transform_repeatedly_cb(expand_compute_input_AND_OR, sexp)
sexp = transform_repeatedly('defuzzer-input/is', sexp)
# expand %compute-defuzzer-output
sexp = transform_repeatedly('defuzzer-output/begin', sexp)
sexp = transform_repeatedly('defuzzer-output/empty-begin', sexp)
sexp = transform_repeatedly('defuzzer-output/set!', sexp)
return sexp
def main():
load_transforms('transform.scm')
sexp = SchemeParser().parse(open('rule.scm', 'rb'))
return compile_to_bytecode(sexp)
class Constants:
@staticmethod
def assign_number(dictionary):
return dict(zip(dictionary.keys(), range(len(dictionary))))
@staticmethod
def flatten(dictionary, index_info):
array = [None] * len(dictionary)
for name, index in index_info.items():
array[index] = dictionary[name]
return array
def __init__(self, inputs, outputs, bytecode):
self.input_to_index = self.assign_number(inputs)
self.output_to_index = self.assign_number(outputs)
self.inputs = [None] * len(inputs)
mf = {}
for key in self.extract_member_function_calls(bytecode):
if key[2] == 'in' and key[0] not in self.input_to_index:
raise RuntimeError('Unknown input {}'.format(key[0]))
elif key[2] == 'out' and key[0] not in self.output_to_index:
raise RuntimeError('Unknown output {}'.format(key[0]))
if key[2] == 'in':
mf[(key[0], key[1])] = getattr(inputs[key[0]], key[1])
else:
mf[(key[0], key[1])] = getattr(outputs[key[0]], key[1])
self.member_function_to_index = self.assign_number(mf)
self.member_functions = self.flatten(mf, self.member_function_to_index)
@staticmethod
def extract_member_function_calls(bytecode):
while bytecode is not None:
instruction = bytecode.car
if instruction.car == '%call-member-function':
yield (instruction.cdr.car, instruction.cdr.cdr.car, 'in')
elif instruction.car == '%feed':
yield (instruction.cdr.car, instruction.cdr.cdr.car, 'out')
bytecode = bytecode.cdr
def eliminate_map_lookup(self, bytecode):
return sexp_map(self._indexify, bytecode)
def get_member_function(self, cons):
key = (cons.car, cons.cdr.car)
idx = self.member_function_to_index[key]
return self.member_functions[idx]
def _indexify(self, instruction):
if instruction.car == '%get-input':
# (%get-input <name>)
# => (%get-input-by-index <index>)
idx = self.input_to_index[instruction.cdr.car]
return Cons('%get-input-by-index', Cons(idx, None))
elif instruction.car == '%call-member-function':
# (%call-member-function <input> <level>)
# => (%call-function-by-ref <triangle-function-ref>)
ref = self.get_member_function(instruction.cdr)
return Cons('%call-function-by-ref', Cons(ref, None))
elif instruction.car == '%feed':
# (%feed <output> <level>)
# => (%feed-defuzzer-fast <defuzzer-index> <x2>)
idx = self.output_to_index[instruction.cdr.car]
mf = self.get_member_function(instruction.cdr)
return Cons('%feed-defuzzer-fast', Cons(idx, Cons(mf.x2, None)))
else:
return instruction
def init():
load_transforms('transform.scm')
init()
|
[
"dsl_parser.Cons",
"dsl_parser.Accumulator",
"dsl_parser.SchemeParser",
"transform.Transform"
] |
[((2173, 2186), 'dsl_parser.Accumulator', 'Accumulator', ([], {}), '()\n', (2184, 2186), False, 'from dsl_parser import SchemeParser, Accumulator, Cons\n'), ((374, 388), 'dsl_parser.SchemeParser', 'SchemeParser', ([], {}), '()\n', (386, 388), False, 'from dsl_parser import SchemeParser, Accumulator, Cons\n'), ((464, 493), 'transform.Transform', 'transform.Transform', (['sexp.car'], {}), '(sexp.car)\n', (483, 493), False, 'import transform\n'), ((1404, 1423), 'dsl_parser.Cons', 'Cons', (['current', 'None'], {}), '(current, None)\n', (1408, 1423), False, 'from dsl_parser import SchemeParser, Accumulator, Cons\n'), ((3141, 3155), 'dsl_parser.SchemeParser', 'SchemeParser', ([], {}), '()\n', (3153, 3155), False, 'from dsl_parser import SchemeParser, Accumulator, Cons\n'), ((5461, 5476), 'dsl_parser.Cons', 'Cons', (['idx', 'None'], {}), '(idx, None)\n', (5465, 5476), False, 'from dsl_parser import SchemeParser, Accumulator, Cons\n'), ((5763, 5778), 'dsl_parser.Cons', 'Cons', (['ref', 'None'], {}), '(ref, None)\n', (5767, 5778), False, 'from dsl_parser import SchemeParser, Accumulator, Cons\n'), ((6097, 6114), 'dsl_parser.Cons', 'Cons', (['mf.x2', 'None'], {}), '(mf.x2, None)\n', (6101, 6114), False, 'from dsl_parser import SchemeParser, Accumulator, Cons\n')]
|
#!/usr/bin/python3
"""
Simplify AST-XML structures for later generation of Python files.
"""
from optparse import OptionParser
import os
import os.path
import sys
from xml.etree import ElementTree
from xml.dom import minidom # type: ignore
import logging
import importlib
from importlib import machinery
from . import pytnyzer
from . import flscriptparse
from typing import List, Type, Optional, Dict, Tuple, Any, Callable, cast, Iterable
STRICT_MODE = pytnyzer.STRICT_MODE
importlib.reload(pytnyzer)
pytnyzer.STRICT_MODE = STRICT_MODE
TreeData = Dict[str, Any]
LOGGER = logging.getLogger(__name__)
USEFUL_TOKENS = "ID,ICONST,FCONST,SCONST,CCONST,RXCONST".split(",")
KNOWN_PARSERS: Dict[str, Type["TagObjectBase"]] = {}
UNKNOWN_PARSERS = {}
def parse_for(*tag_names: str) -> Callable:
"""Decorate functions for registering tags."""
global KNOWN_PARSERS
def decorator(func: Type["TagObjectBase"]) -> Type["TagObjectBase"]:
for tag_name in tag_names:
KNOWN_PARSERS[tag_name] = func
return func
return decorator
def parse(tag_name: str, tree_data: TreeData) -> "TagObject":
"""Excecute registered function for given tagname on treedata."""
global KNOWN_PARSERS, UNKNOWN_PARSERS
if tag_name not in KNOWN_PARSERS:
UNKNOWN_PARSERS[tag_name] = 1
func = parse_unknown
else:
func = KNOWN_PARSERS[tag_name]
return func(tag_name, tree_data)
def getxmltagname(tag_name: str) -> str:
"""Transform tag names."""
if tag_name == "source":
return "Source"
elif tag_name == "funcdeclaration":
return "Function"
elif tag_name == "classdeclaration":
return "Class"
elif tag_name == "vardeclaration":
return "Variable"
else:
return "Unknown.%s" % tag_name
class TagObjectBase:
"""Base class for registering tag processors."""
tags: List[str] = []
@classmethod
def can_process_tag(cls, tagname: str) -> bool:
"""Return if tagname is in class known tags."""
return tagname in cls.tags
def __init__(self, tagname: str) -> None:
"""Create base object for processing tags."""
self.astname = tagname
def add_subelem(self, argn: int, subelem: "TagObject") -> None:
"""Abstract function for adding sub elements."""
def add_value(self, argn: int, vtype: str, value: str) -> None:
"""Abstract function for adding values."""
def add_other(self, argn: int, vtype: str, data: str) -> None:
"""Abstract function for adding other types of data."""
XML_CLASS_TYPES: List[Type[TagObjectBase]] = []
class TagObjectFactory(type):
"""Metaclass for registering tag processors."""
def __init__(cls, name: str, bases: Any, dct: Any) -> None:
"""Register a new class as tag processor."""
global XML_CLASS_TYPES
if issubclass(cls, TagObjectBase):
XML_CLASS_TYPES.append(cast(Type[TagObjectBase], cls))
else:
raise Exception("This metaclass must be used as a subclass of TagObjectBase")
super().__init__(name, bases, dct)
class TagObject(TagObjectBase, metaclass=TagObjectFactory):
"""Process XML tags for simplification. Main class with shared functionality."""
set_child_argn = False
name_is_first_id = False
debug_other = True
adopt_childs_tags: List[str] = []
omit_tags = ["empty"]
callback_subelem: Dict[Type["TagObject"], str] = {}
promote_child_if_alone = False
@classmethod
def tagname(self, tagname: str) -> str:
"""Return processed target tag name."""
return self.__name__
def __init__(self, tagname: str) -> None:
"""Create new processor."""
super().__init__(tagname)
self.xml = ElementTree.Element(self.tagname(tagname))
self.xmlname: Optional[str] = None
self.subelems: List[Any] = []
self.values: List[Tuple[str, str]] = []
if self.name_is_first_id:
self.xml.set("name", "")
def adopt_children(self, argn: int, subelem: "TagObject"):
"""Simplify tree by "merging" childs into itself."""
for child in list(subelem.xml):
if self.set_child_argn:
child.set("argn", str(argn))
else:
if "argn" in child.attrib:
del child.attrib["argn"]
self.xml.append(child)
def omit_subelem(self, argn: int, subelem: "TagObject"):
"""Abstract function. Simplifies XML by removing unwanted terms."""
return
def is_in(self, listobj: Iterable) -> bool:
"""Return if the class type appears in any of the items."""
return self.__class__ in listobj or self.astname in listobj
def get(self, listobj: Dict[Any, str], default=None) -> Any:
"""Retrieve value from list based on this class type."""
if self.__class__ in listobj:
return listobj[self.__class__]
if self.astname in listobj:
return listobj[self.astname]
return default
def add_subelem(self, argn: int, subelem: "TagObject") -> None:
"""Add a new XML child."""
if subelem.is_in(self.omit_tags):
return self.omit_subelem(argn, subelem)
if subelem.is_in(self.adopt_childs_tags):
return self.adopt_children(argn, subelem)
callback = subelem.get(self.callback_subelem)
if callback:
return getattr(self, callback)(argn, subelem)
if self.set_child_argn:
subelem.xml.set("argn", str(argn))
self.xml.append(subelem.xml)
self.subelems.append(subelem)
def add_value(self, argn: int, vtype: str, value: str) -> None:
"""Add a new XML value."""
self.values.append((vtype, value))
if vtype == "ID" and self.name_is_first_id and self.xmlname is None:
self.xmlname = value
self.xml.set("name", value)
return
self.xml.set("arg%02d" % argn, vtype + ":" + repr(value))
def add_other(self, argn: int, vtype: str, data: str) -> None:
"""Add extra data to XML."""
if self.debug_other:
self.xml.set("arg%02d" % argn, vtype)
def polish(self) -> "TagObject":
"""Clean up the structure by removing or merging some data."""
if self.promote_child_if_alone:
if len(self.values) == 0 and len(self.subelems) == 1:
return self.subelems[0]
return self
class ListObject(TagObject):
"""Base class for list objects."""
set_child_argn = False
debug_other = False
class NamedObject(TagObject):
"""Base class for objects with names."""
name_is_first_id = True
debug_other = False
class ListNamedObject(TagObject):
"""Base class for list objects with names."""
name_is_first_id = True
set_child_argn = False
debug_other = False
class TypedObject(ListObject):
"""Base class for typed objects."""
type_arg = 0
def add_other(self, argn, vtype, value):
"""Add extra data to XML."""
if argn == self.type_arg:
self.xml.set("type", vtype)
class Source(ListObject):
"""Process Source tags."""
tags = ["source", "basicsource", "classdeclarationsource", "statement_list", "statement_block"]
adopt_childs_tags = ["source_element", "statement_list", "statement", "statement_block"]
class Identifier(NamedObject):
"""Process Identifier tags."""
tags = ["identifier", "optid"]
def polish(self):
"""Fix astname attribute."""
if self.xmlname is None:
self.astname = "empty"
return self
class Arguments(ListObject):
"""Process Argument tags."""
tags = ["arglist"]
adopt_childs_tags = ["vardecl_list"]
class VariableType(NamedObject):
"""Process VariableType tags."""
tags = ["optvartype"]
def polish(self):
"""Fix astname attribute."""
if self.xmlname is None:
self.astname = "empty"
return self
class ExtendsType(NamedObject):
"""Process ExtendsType tags."""
tags = ["optextends"]
def polish(self):
"""Fix astname attribute."""
if self.xmlname is None:
self.astname = "empty"
return self
class Function(ListNamedObject):
"""Process Function tags."""
tags = ["funcdeclaration"]
callback_subelem = ListNamedObject.callback_subelem.copy()
callback_subelem[VariableType] = "add_vartype"
def add_vartype(self, argn, subelem):
"""Add returns notation."""
self.xml.set("returns", str(subelem.xmlname))
class FunctionAnon(ListObject):
"""Process FunctionAnon tags."""
tags = ["funcdeclaration_anon"]
class FunctionAnonExec(ListObject):
"""Process FunctionAnonExec tags."""
tags = ["funcdeclaration_anon_exec"]
class Variable(NamedObject):
"""Process Variable tags."""
tags = ["vardecl"]
callback_subelem = NamedObject.callback_subelem.copy()
callback_subelem[VariableType] = "add_vartype"
def add_vartype(self, argn, subelem):
"""Add type notation."""
self.xml.set("type", str(subelem.xmlname))
class DeclarationBlock(ListObject):
"""Process DeclarationBlock tags."""
tags = ["vardeclaration"]
adopt_childs_tags = ["vardecl_list"]
def add_other(self, argn, vtype, value):
"""Add debug info."""
if argn == 0:
self.xml.set("mode", vtype)
def polish(self):
"""Cleanup."""
# if len(self.values) == 0 and len(self.subelems) == 1:
# self.subelems[0].xml.set("mode",self.xml.get("mode"))
# return self.subelems[0]
return self
class Class(ListNamedObject):
"""Process Class tags."""
tags = ["classdeclaration"]
callback_subelem = ListNamedObject.callback_subelem.copy()
callback_subelem[ExtendsType] = "add_exttype"
def add_exttype(self, argn, subelem):
"""Add extends notation."""
self.xml.set("extends", str(subelem.xmlname))
class Member(TagObject):
"""Process Member tags."""
debug_other = False
set_child_argn = False
tags = ["member_var", "member_call"]
adopt_childs_tags = ["varmemcall", "member_var", "member_call"]
class ArrayMember(TagObject):
"""Process ArrayMember tags."""
debug_other = False
set_child_argn = False
tags = ["array_member"]
adopt_childs_tags = ["variable_1", "func_call"]
class InstructionCall(TagObject):
"""Process InstructionCall tags."""
debug_other = False
tags = ["callinstruction"]
class InstructionStore(TagObject):
"""Process InstructionStore tags."""
promote_child_if_alone = True
debug_other = False
tags = ["storeinstruction"]
class InstructionFlow(TypedObject):
"""Process InstructionFlow tags."""
debug_other = True
tags = ["flowinstruction"]
class Instruction(TagObject):
"""Process Instruction tags."""
promote_child_if_alone = True
debug_other = False
tags = ["instruction"]
class OpMath(TypedObject):
"""Process OpMath tags."""
debug_other = True
tags = ["mathoperator"]
class Compare(TypedObject):
"""Process Compare tags."""
debug_other = True
tags = ["cmp_symbol", "boolcmp_symbol"]
class FunctionCall(NamedObject):
"""Process FunctionCall tags."""
tags = ["funccall_1"]
class CallArguments(ListObject):
"""Process CallArguments tags."""
tags = ["callargs"]
class Constant(ListObject):
"""Process Constant tags."""
tags = ["constant"]
def add_value(self, argn: int, vtype: str, value: str) -> None:
"""Add value notation."""
value = str(value) # str(value,"ISO-8859-15","replace")
if vtype == "SCONST":
vtype = "String"
value = value[1:-1]
self.xml.set("delim", '"')
if vtype == "CCONST":
vtype = "String"
value = value[1:-1]
self.xml.set("delim", "'")
if vtype == "RCONST":
vtype = "Regex"
if vtype == "ICONST":
vtype = "Number"
if vtype == "FCONST":
vtype = "Number"
self.const_value = value
self.const_type = vtype
self.xml.set("type", vtype)
self.xml.set("value", value)
class InlineUpdate(ListObject):
"""Process InlineUpdate tags."""
tags = ["inlinestoreinstruction"]
def add_other(self, argn, vtype, value):
"""Add debug info."""
self.xml.set("type", vtype)
if argn == 0:
self.xml.set("mode", "update-read")
if argn == 1:
self.xml.set("mode", "read-update")
class If(ListObject):
"""Process If tags."""
tags = ["ifstatement"]
class Condition(ListObject):
"""Process Condition tags."""
tags = ["condition"]
class Else(ListObject):
"""Process Else tags."""
tags = ["optelse"]
def polish(self):
"""Fix astname."""
if len(self.subelems) == 0:
self.astname = "empty"
return self
class DictObject(ListObject):
"""Process DictObject tags."""
tags = ["dictobject_value_elemlist", "dictobject_value"]
adopt_childs_tags = ["dictobject_value_elemlist", "dictobject_value"]
class DictElem(ListObject):
"""Process DictElem tags."""
tags = ["dictobject_value_elem"]
class ExpressionContainer(ListObject):
"""Process ExpressionContainer tags."""
tags = ["expression"]
# adopt_childs_tags = ['base_expression']
def polish(self):
"""Fix internal expressions."""
if len(self.values) == 0 and len(self.subelems) == 1:
# if isinstance(self.subelems[0], Constant):
if self.subelems[0].xml.tag == "base_expression":
self.subelems[0].xml.tag = "Expression"
return self.subelems[0]
else:
self.xml.tag = "Value"
return self
class InstructionUpdate(ListObject):
"""Process InstructionUpdate tags."""
tags = ["updateinstruction"]
class Switch(ListObject):
"""Process Switch tags."""
tags = ["switch"]
adopt_childs_tags = ["case_cblock_list", "case_block_list"]
class CaseList(ListObject):
"""Process CaseList tags."""
tags = ["case_block_list"]
adopt_childs_tags = ["case_cblock_list", "case_block_list"]
class Case(ListObject):
"""Process ExtendsType tags."""
tags = ["case_block"]
class CaseDefault(ListObject):
"""Process CaseDefault tags."""
tags = ["case_default"]
class While(ListObject):
"""Process While tags."""
tags = ["whilestatement"]
class For(ListObject):
"""Process For tags."""
tags = ["forstatement"]
class ForInitialize(ListObject):
"""Process ForInitialize tags."""
tags = ["for_initialize"]
class ForCompare(ListObject):
"""Process ExtendsType tags."""
tags = ["for_compare"]
class ForIncrement(ListObject):
"""Process ExtendsType tags."""
tags = ["for_increment"]
class DoWhile(ListObject):
"""Process DoWhile tags."""
tags = ["dowhilestatement"]
class ForIn(ListObject):
"""Process ExtendsType tags."""
tags = ["forinstatement"]
class With(ListObject):
"""Process ExtendsType tags."""
tags = ["withstatement"]
class TryCatch(ListObject):
"""Process TryCatch tags."""
tags = ["trycatch"]
class New(ListObject):
"""Process New tags."""
tags = ["new_operator"]
class Delete(ListObject):
"""Process Delete tags."""
tags = ["deleteinstruction"]
class Parentheses(ListObject):
"""Process ExtendsType tags."""
tags = ["parentheses"]
adopt_childs_tags = ["base_expression"]
class OpUnary(TypedObject):
"""Process OpUnary tags."""
tags = ["unary_operator"]
class OpTernary(ListObject):
"""Process OpTernary tags."""
tags = ["ternary_operator"]
class OpUpdate(TypedObject):
"""Process OpUpdate tags."""
tags = ["updateoperator"]
# ----- keep this one at the end.
class Unknown(TagObject):
"""Process Unknown tags."""
promote_child_if_alone = True
set_child_argn = False
@classmethod
def tagname(self, tagname):
"""Just return tagname."""
return tagname
@classmethod
def can_process_tag(self, tagname):
"""Just return true."""
return True
# -----------------
def create_xml(tagname) -> Optional[TagObject]:
"""Create processor for tagname by inspecting first known processor that fits."""
classobj = None
for cls in XML_CLASS_TYPES:
if cls.can_process_tag(tagname):
classobj = cls
break
if classobj is None:
return None
if issubclass(classobj, TagObject):
return classobj(tagname)
else:
raise ValueError("Unexpected class %s" % classobj)
def parse_unknown(tagname, treedata):
"""Parse anything and error handling."""
xmlelem = create_xml(tagname)
if xmlelem is None:
raise Exception("No class for parsing tagname %s" % tagname)
position = 0
for key, value in treedata["content"]:
if type(value) is dict:
instruction = parse(key, value)
xmlelem.add_subelem(position, instruction)
elif key in USEFUL_TOKENS:
xmlelem.add_value(position, key, value)
else:
xmlelem.add_other(position, key, value)
position += 1
return xmlelem.polish()
def post_parse(treedata: TreeData):
"""Parse a xml. Convenience function."""
source = parse("source", treedata)
# print UNKNOWN_PARSERS.keys()
return source.xml
class Module(object):
"""Python code tester for pineboo-parse."""
def __init__(self, name: str, path: str) -> None:
"""Create Module."""
self.name = name
self.path = path
def loadModule(self):
"""Import and return Python file."""
try:
name = self.name[: self.name.find(".")]
loader = machinery.SourceFileLoader(name, os.path.join(self.path, self.name))
self.module = loader.load_module() # type: ignore[call-arg] # noqa: F821
result = True
except FileNotFoundError:
LOGGER.error("Fichero %r no encontrado" % self.name)
result = False
except Exception:
LOGGER.exception("Unexpected exception on loadModule")
result = False
return result
def parse_args(argv: List[str]) -> Tuple[Any, List[str]]:
"""Define parsing arguments for the program."""
parser = OptionParser()
parser.add_option(
"-q",
"--quiet",
action="store_false",
dest="verbose",
default=True,
help="don't print status messages to stdout",
)
parser.add_option(
"--optdebug",
action="store_true",
dest="optdebug",
default=False,
help="debug optparse module",
)
parser.add_option(
"--debug",
action="store_true",
dest="debug",
default=False,
help="prints lots of useless messages",
)
parser.add_option("--path", dest="storepath", default=None, help="store XML results in PATH")
parser.add_option(
"--topython",
action="store_true",
dest="topython",
default=False,
help="write python file from xml",
)
parser.add_option(
"--exec-py",
action="store_true",
dest="exec_python",
default=False,
help="try to execute python file",
)
parser.add_option(
"--toxml", action="store_true", dest="toxml", default=False, help="write xml file from qs"
)
parser.add_option(
"--full", action="store_true", dest="full", default=False, help="write xml file from qs"
)
parser.add_option(
"--cache",
action="store_true",
dest="cache",
default=False,
help="If dest file exists, don't regenerate it",
)
parser.add_option(
"--strict",
action="store_true",
dest="strict",
default=False,
help="Enable STRICT_MODE on pytnyzer",
)
parser.add_option(
"--python-ext",
dest="python_ext",
default=".qs.py",
help="Change Python file extension (default: '.qs.py')",
)
(options, args) = parser.parse_args(argv)
return (options, args)
def main() -> None:
"""Run the program from command line."""
log_format = "%(asctime)s - %(levelname)s: %(name)s: %(message)s"
logging.basicConfig(format=log_format, level=0)
blib_logger = logging.getLogger("blib2to3.pgen2.driver")
blib_logger.setLevel(logging.WARNING)
options, args = parse_args(sys.argv[1:])
execute(options, args)
def pythonify(filelist: List[str], arguments: List[str] = []) -> None:
"""Convert to python the files included in the list."""
if not isinstance(filelist, list):
raise ValueError("First argument must be a list")
options, args = parse_args(arguments)
options.full = True
execute(options, filelist)
def pythonify2(filename: str, known_refs: Dict[str, Tuple[str, str]] = {}) -> str:
"""Convert File to Python. Faster version as does not write to disk. Avoids re-parsing XML."""
filecontent = open(filename, "r", encoding="latin-1").read()
prog = flscriptparse.parse(filecontent)
if not prog:
raise Exception("Parse failed")
if prog["error_count"] > 0:
raise Exception("Found %d errors parsing %r" % (prog["error_count"], filename))
tree_data: TreeData = flscriptparse.calctree(prog, alias_mode=0)
ast = post_parse(tree_data)
return pytnyzer.pythonize2(ast, known_refs)
def pythonify_string(
qs_code: str,
known_refs: Dict[str, Tuple[str, str]] = {},
parser_template: str = "expression_template",
) -> str:
"""Convert QS string to Python. For unit-testing, only evaluates expressions."""
prog = flscriptparse.parse(qs_code)
if not prog:
raise Exception("Parse failed")
if prog["error_count"] > 0:
raise Exception("Found %d errors parsing string" % (prog["error_count"]))
tree_data: TreeData = flscriptparse.calctree(prog, alias_mode=0)
ast = post_parse(tree_data)
ast.set("parser-template", parser_template)
return pytnyzer.pythonize2(ast, known_refs)
def execute(options: Any, args: List[str]) -> None:
"""Execute conversion orders given by options and args. Can be used to emulate program calls."""
from pineboolib.application.parsers.qsaparser import pytnyzer
pytnyzer.STRICT_MODE = options.strict
if options.full:
execpython = options.exec_python
options.exec_python = False
options.full = False
options.toxml = True
LOGGER.info("Pass 1 - Parse and write XML file . . .")
try:
execute(options, args)
except Exception:
LOGGER.exception("Error parseando:")
options.toxml = False
options.topython = True
LOGGER.info("Pass 2 - Pythonize and write PY file . . .")
try:
execute(options, [arg + ".xml" for arg in args])
except Exception:
LOGGER.exception("Error convirtiendo:")
if execpython:
options.exec_python = execpython
LOGGER.info("Pass 3 - Test PY file load . . .")
options.topython = False
try:
execute(
options,
[(arg + ".xml.py").replace(".qs.xml.py", options.python_ext) for arg in args],
)
except Exception:
LOGGER.exception("Error al ejecutar Python:")
LOGGER.debug("Done.")
elif options.exec_python:
# import qsatype
for filename in args:
realpath = os.path.realpath(filename)
path, name = os.path.split(realpath)
if not os.path.exists(realpath):
LOGGER.error("Fichero no existe: %s" % name)
continue
mod = Module(name, path)
if not mod.loadModule():
LOGGER.error("Error cargando modulo %s" % name)
elif options.topython:
from .pytnyzer import pythonize
import io
if options.cache:
args = [
x
for x in args
if not os.path.exists((x + ".py").replace(".qs.xml.py", options.python_ext))
or os.path.getmtime(x)
> os.path.getctime((x + ".py").replace(".qs.xml.py", options.python_ext))
]
nfs = len(args)
for nf_, filename in enumerate(args):
bname = os.path.basename(filename)
if options.storepath:
destname = os.path.join(options.storepath, bname + ".py")
else:
destname = filename + ".py"
destname = destname.replace(".qs.xml.py", options.python_ext)
if not os.path.exists(filename):
LOGGER.error("Fichero %r no encontrado" % filename)
continue
LOGGER.debug(
"Pythonizing File: %-35s . . . . (%.1f%%)"
% (bname, 100.0 * (nf_ + 1.0) / nfs)
)
old_stderr = sys.stdout
stream = io.StringIO()
sys.stdout = stream
try:
pythonize(filename, destname, destname + ".debug")
except Exception:
LOGGER.exception("Error al pythonificar %r:" % filename)
sys.stdout = old_stderr
text = stream.getvalue()
if len(text) > 2:
LOGGER.info("%s: " % bname + ("\n%s: " % bname).join(text.splitlines()))
else:
if options.cache:
args = [
x
for x in args
if not os.path.exists(x + ".xml")
or os.path.getmtime(x) > os.path.getctime(x + ".xml")
]
nfs = len(args)
for nf_, filename in enumerate(args):
bname = os.path.basename(filename)
LOGGER.debug(
"Parsing File: %-35s . . . . (%.1f%%)" % (bname, 100.0 * (nf_ + 1.0) / nfs)
)
try:
filecontent = open(filename, "r", encoding="latin-1").read()
except Exception:
LOGGER.exception("Error: No se pudo abrir fichero %s", filename)
continue
prog = flscriptparse.parse(filecontent)
if not prog:
LOGGER.error("Error: No se pudo abrir %s" % (repr(filename)))
continue
if prog["error_count"] > 0:
LOGGER.error(
"Encontramos %d errores parseando: %-35s"
% (prog["error_count"], repr(filename))
)
continue
if not options.toxml:
# Si no se quiere guardar resultado, no hace falta calcular mas
continue
tree_data = None
try:
tree_data = flscriptparse.calctree(prog, alias_mode=0)
except Exception:
LOGGER.exception("Error al convertir a XML %r:" % bname)
if not tree_data:
LOGGER.error("No se pudo parsear %s" % (repr(filename)))
continue
ast = post_parse(tree_data)
if ast is None:
LOGGER.error("No se pudo analizar %s" % (repr(filename)))
continue
if options.storepath:
destname = os.path.join(options.storepath, bname + ".xml")
else:
destname = filename + ".xml"
xml_str = minidom.parseString(ElementTree.tostring(ast)).toprettyxml(indent=" ")
with open(destname, "w", encoding="UTF-8") as file_:
file_.write(xml_str)
if __name__ == "__main__":
main()
|
[
"io.StringIO",
"optparse.OptionParser",
"logging.basicConfig",
"typing.cast",
"os.path.basename",
"os.path.realpath",
"os.path.exists",
"importlib.reload",
"os.path.getmtime",
"xml.etree.ElementTree.tostring",
"os.path.getctime",
"os.path.split",
"os.path.join",
"pineboolib.application.parsers.qsaparser.pytnyzer.pythonize2",
"logging.getLogger"
] |
[((476, 502), 'importlib.reload', 'importlib.reload', (['pytnyzer'], {}), '(pytnyzer)\n', (492, 502), False, 'import importlib\n'), ((576, 603), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (593, 603), False, 'import logging\n'), ((18581, 18595), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (18593, 18595), False, 'from optparse import OptionParser\n'), ((20566, 20613), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': 'log_format', 'level': '(0)'}), '(format=log_format, level=0)\n', (20585, 20613), False, 'import logging\n'), ((20632, 20674), 'logging.getLogger', 'logging.getLogger', (['"""blib2to3.pgen2.driver"""'], {}), "('blib2to3.pgen2.driver')\n", (20649, 20674), False, 'import logging\n'), ((21702, 21738), 'pineboolib.application.parsers.qsaparser.pytnyzer.pythonize2', 'pytnyzer.pythonize2', (['ast', 'known_refs'], {}), '(ast, known_refs)\n', (21721, 21738), False, 'from pineboolib.application.parsers.qsaparser import pytnyzer\n'), ((22348, 22384), 'pineboolib.application.parsers.qsaparser.pytnyzer.pythonize2', 'pytnyzer.pythonize2', (['ast', 'known_refs'], {}), '(ast, known_refs)\n', (22367, 22384), False, 'from pineboolib.application.parsers.qsaparser import pytnyzer\n'), ((2936, 2966), 'typing.cast', 'cast', (['Type[TagObjectBase]', 'cls'], {}), '(Type[TagObjectBase], cls)\n', (2940, 2966), False, 'from typing import List, Type, Optional, Dict, Tuple, Any, Callable, cast, Iterable\n'), ((18038, 18072), 'os.path.join', 'os.path.join', (['self.path', 'self.name'], {}), '(self.path, self.name)\n', (18050, 18072), False, 'import os\n'), ((23858, 23884), 'os.path.realpath', 'os.path.realpath', (['filename'], {}), '(filename)\n', (23874, 23884), False, 'import os\n'), ((23910, 23933), 'os.path.split', 'os.path.split', (['realpath'], {}), '(realpath)\n', (23923, 23933), False, 'import os\n'), ((23953, 23977), 'os.path.exists', 'os.path.exists', (['realpath'], {}), '(realpath)\n', (23967, 23977), False, 'import os\n'), ((24713, 24739), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (24729, 24739), False, 'import os\n'), ((25338, 25351), 'io.StringIO', 'io.StringIO', ([], {}), '()\n', (25349, 25351), False, 'import io\n'), ((26093, 26119), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (26109, 26119), False, 'import os\n'), ((24801, 24847), 'os.path.join', 'os.path.join', (['options.storepath', "(bname + '.py')"], {}), "(options.storepath, bname + '.py')\n", (24813, 24847), False, 'import os\n'), ((25003, 25027), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (25017, 25027), False, 'import os\n'), ((27621, 27668), 'os.path.join', 'os.path.join', (['options.storepath', "(bname + '.xml')"], {}), "(options.storepath, bname + '.xml')\n", (27633, 27668), False, 'import os\n'), ((27775, 27800), 'xml.etree.ElementTree.tostring', 'ElementTree.tostring', (['ast'], {}), '(ast)\n', (27795, 27800), False, 'from xml.etree import ElementTree\n'), ((24498, 24517), 'os.path.getmtime', 'os.path.getmtime', (['x'], {}), '(x)\n', (24514, 24517), False, 'import os\n'), ((25892, 25918), 'os.path.exists', 'os.path.exists', (["(x + '.xml')"], {}), "(x + '.xml')\n", (25906, 25918), False, 'import os\n'), ((25938, 25957), 'os.path.getmtime', 'os.path.getmtime', (['x'], {}), '(x)\n', (25954, 25957), False, 'import os\n'), ((25960, 25988), 'os.path.getctime', 'os.path.getctime', (["(x + '.xml')"], {}), "(x + '.xml')\n", (25976, 25988), False, 'import os\n')]
|
import psycopg2
import time
from action.case_one_subscription_rebill_cancel import case_one_subscription,\
case_one_first_rebill, \
case_one_second_rebill, \
case_one_third_rebill, \
case_one_fourth_rebill, \
case_one_cancel
from connection.connection_variables import pg_user, \
pg_password, \
pg_host, \
pg_port, \
pg_database
from parameters.subscription.case_1.subsciption_params import used_click_for_subscription, \
case_1_subscription_params
from parameters.rebill.case_1.rebill_params import case_1_first_rebill_params, \
case_1_second_rebill_params, \
case_1_third_rebill_params, case_1_fourth_rebill_params
"""Connect to database using connection variables"""
connection = psycopg2.connect(database=pg_database,
user=pg_user,
password=pg_password,
host=pg_host,
port=pg_port)
case_one_subscription() # Action subscription makes here
case_one_first_rebill() # First rebill action makes here
case_one_second_rebill() # Second rebill action makes here
case_one_third_rebill() # Third rebill action makes here
case_one_fourth_rebill() # Fourth rebill action makes here
"""Define sleeping time"""
define_sleep = time.sleep(7)
def select_created_subscription():
cursor = connection.cursor()
subscription_select_query = 'SELECT * ' \
'FROM subscription ' \
'WHERE click_id = %s;' % used_click_for_subscription
cursor.execute(subscription_select_query)
subscription_cortage = cursor.fetchall()
subscription_row = subscription_cortage[0]
return subscription_row
"""Save selected subscription data into variable"""
save_subscription_parameters = select_created_subscription()
"""Select subscription_click_id and validate it after"""
def subscription_click_id():
subscription_click_id = save_subscription_parameters[3]
return str(subscription_click_id)
if case_1_subscription_params['click_id'] == subscription_click_id():
print('subscription_click_id true')
else:
print('subscription_click_id false')
"""Select subscription_external_message_id and validate it after"""
def subscription_external_message_id():
subscription_external_message_id = save_subscription_parameters[14]
return str(subscription_external_message_id)
if case_1_subscription_params['external_message_id'] == subscription_external_message_id():
print('subscription_external_message_id true')
else:
print('subscription_external_message_id false')
"""Select gateway_id for validation after"""
def define_subscription_gateway_id():
cursor = connection.cursor()
gateway_select_query = "SELECT * " \
"FROM gateway_aliases " \
"WHERE alias = '{}'".format(case_1_subscription_params['partner'])
cursor.execute(gateway_select_query)
gateway_cortage = cursor.fetchall()
gateway_parameters = gateway_cortage[0]
gateway_id = gateway_parameters[2]
return str(gateway_id)
"""Select subscription_gateway_id and validate it using previous selecting"""
def subscription_gateway_id():
subscription_gateway_id = save_subscription_parameters[17]
return str(subscription_gateway_id)
if define_subscription_gateway_id() == subscription_gateway_id():
print('subscription_partner true')
else:
print('subscription_partner false')
"""Select subscription_external_subscription_id and validate it"""
def subscription_external_subscription_id():
subscription_external_subscription_id = save_subscription_parameters[2]
return subscription_external_subscription_id
if case_1_subscription_params['external_subscription_id'] == subscription_external_subscription_id():
print('subscription_external_subscription_id true')
else:
print('subscription_external_subscription_id false')
"""Select extra_param and validate it"""
def select_extra_param():
subscription_extra_param = save_subscription_parameters[15]
return subscription_extra_param
if case_1_subscription_params['extra_param'] == select_extra_param():
print('subscription_extra_param true')
else:
print('subscription_extra_param false')
"""Select and validate subscription user_role"""
def select_subscription_user_role():
cursor = connection.cursor()
select_subscription_user_role = "SELECT role " \
"FROM \"user\" " \
"WHERE id = {}".format(save_subscription_parameters[18])
cursor.execute(select_subscription_user_role)
subscription_user_role_cortage = cursor.fetchall()
subscription_user_role = subscription_user_role_cortage[0]
return subscription_user_role[0]
if select_subscription_user_role() == save_subscription_parameters[16]:
print("subscription_user_role true")
else:
print("subscription_user_role false")
"""Validate closed_at"""
if save_subscription_parameters[7] == None:
print('subscription closed_at true')
else:
print('subscription closed_at false')
"""Validate is_hidden"""
if save_subscription_parameters[13] == False:
print("Subscription is_hidden is checked. Test was passed")
else:
print("Subscription is_hidden is checked. Test wasn't passed")
"""Validate subscription access_period"""
if save_subscription_parameters[10] == 0:
print('Subscription access_period true')
else:
print('Subscription access_period false')
"""Validate subscription is_hidden"""
if save_subscription_parameters[13] == False:
print("Subscription is_hidden = False. Test was passed")
else:
print("Subscription is_hidden = True. Test wasn't passed")
"""
========================================================================================
========================================================================================
From this place we start validate first created rebill
"""
def select_created_first_rebill():
cursor = connection.cursor()
first_rebill_select_query = "SELECT * " \
"FROM rebill " \
"WHERE click_id = {} AND external_message_id = '{}'".format(used_click_for_subscription,
case_1_first_rebill_params[
'external_message_id'])
cursor.execute(first_rebill_select_query)
first_rebill_cortage = cursor.fetchall()
first_rebill_row = first_rebill_cortage[0]
return first_rebill_row
"""Save selected first rebill data into variable"""
save_first_rebill = select_created_first_rebill()
"""Select click_id and validate it"""
def first_rebill_click_id():
first_rebill_click_id = save_first_rebill[6]
return str(first_rebill_click_id)
if case_1_first_rebill_params['click_id'] == first_rebill_click_id():
print('first_rebill_click_id true')
else:
print('first_rebill_click_id false')
"""Select gateway_id for validation after"""
def define_first_rebill_gateway_id():
cursor = connection.cursor()
gateway_select_query = "SELECT * " \
"FROM gateway_aliases " \
"WHERE alias = '{}'".format(case_1_first_rebill_params['partner'])
cursor.execute(gateway_select_query)
gateway_cortage = cursor.fetchall()
gateway_parameters = gateway_cortage[0]
gateway_id = gateway_parameters[2]
return str(gateway_id)
def first_rebill_gateway_id():
return str(save_first_rebill[2])
if define_first_rebill_gateway_id() == first_rebill_gateway_id():
print('first_rebill_gateway_id true')
else:
print('first_rebill_gateway_id false')
"""Select first rebill external_message_id and validate it"""
def first_rebill_external_message_id():
return save_first_rebill[10]
if case_1_first_rebill_params['external_message_id'] == first_rebill_external_message_id():
print('first_rebill_external_message_id true')
else:
print('first_rebill_external_message_id false')
"""Validate first rebill external_subscription_id"""
if case_1_first_rebill_params['external_subscription_id'] == subscription_external_subscription_id():
print('First rebill external_subscription_id true')
else:
print('First rebill external_subscription_id false')
"""Validate related with money parameters"""
def select_first_rebill_currency_rate():
cursor = connection.cursor()
select_currency_rate_query = "SELECT rate " \
"FROM currency " \
"WHERE code = '{}'".format(case_1_first_rebill_params['currency'])
cursor.execute(select_currency_rate_query)
rate_cortage = cursor.fetchall()
first_rebill_row = rate_cortage[0]
return first_rebill_row[0]
"""Calculate money_from_gateway using currency rate"""
def calculate_first_rebill_money_from_gateway_using_currency():
calculate_payout = select_first_rebill_currency_rate() \
* float(case_1_first_rebill_params['payout'])
return calculate_payout
"""Define user base coefficient"""
def select_first_rebill_user_base_coefficient():
cursor = connection.cursor()
select_user_base_coefficient = "SELECT base_coefficient " \
"FROM \"user\" " \
"WHERE id = {}".format(save_first_rebill[3])
cursor.execute(select_user_base_coefficient)
base_coefficient_cortage = cursor.fetchall()
base_coefficient = base_coefficient_cortage[0]
return base_coefficient[0]
"""Validate money_from_gateway"""
if calculate_first_rebill_money_from_gateway_using_currency() == float(case_1_first_rebill_params['payout']) * select_first_rebill_currency_rate():
print('First rebill money_from_gateway true')
else:
print('First rebill money_from_gateway false')
"""Validate first rebill money_for partner"""
if round(float(calculate_first_rebill_money_from_gateway_using_currency() \
* select_first_rebill_user_base_coefficient()),2) == round(float(save_first_rebill[5]),2):
print("first rebill money_for partner true")
else:
print("first rebill money_for partner false")
"""Validate first_rebill is_payout_received"""
def select_first_rebill_is_payout_received():
cursor = connection.cursor()
select_is_payout_received = "SELECT is_payout_received " \
"FROM rebill " \
"WHERE click_id = {} " \
"AND external_message_id = '{}'".format(used_click_for_subscription,
case_1_first_rebill_params[
'external_message_id'])
cursor.execute(select_is_payout_received)
is_payout_received_cortage = cursor.fetchall()
is_payout_received = is_payout_received_cortage[0]
return is_payout_received[0]
def first_rebill_check_key_value():
is_payout = 'payout' in case_1_first_rebill_params
return is_payout
if select_first_rebill_is_payout_received() == first_rebill_check_key_value():
print('First rebill is_payout_received = true. Test was passed')
else:
print("First rebill is_payout_received = false. Test wasn't passed")
"""Validate first_rebill extra_param"""
if save_first_rebill[17] == case_1_first_rebill_params['extra_param']:
print('First rebill extra_param true')
else:
print('First rebill extra_param false')
"""Validate user role"""
def select_first_rebill_user_role():
cursor = connection.cursor()
select_user_role = "SELECT role " \
"FROM \"user\" " \
"WHERE id = {}".format(save_first_rebill[3])
cursor.execute(select_user_role)
user_role_cortage = cursor.fetchall()
user_role = user_role_cortage[0]
return user_role[0]
if select_first_rebill_user_role() == save_first_rebill[18]:
print('First rebill user_role true')
else:
print('First rebill user_role false')
"""
========================================================================================
========================================================================================
From this place we start validate second created rebill
"""
def select_created_second_rebill():
cursor = connection.cursor()
second_rebill_select_query = "SELECT * " \
"FROM rebill " \
"WHERE click_id = {} AND external_message_id = '{}'".format(used_click_for_subscription,
case_1_second_rebill_params[
'text_uniq_lead'])
cursor.execute(second_rebill_select_query)
second_rebill_cortage = cursor.fetchall()
second_rebill_row = second_rebill_cortage[0]
return second_rebill_row
"""Save selected second rebill data into variable"""
save_second_rebill = select_created_second_rebill()
"""Select second rebill click_id and validate it"""
def second_rebill_click_id():
second_rebill_click_id = save_second_rebill[6]
return str(second_rebill_click_id)
if case_1_second_rebill_params['click_id'] == second_rebill_click_id():
print('second_rebill_click_id true')
else:
print('second_rebill_click_id false')
"""Select second rebill gateway_id for validation after"""
def define_second_rebill_gateway_id():
cursor = connection.cursor()
gateway_select_query = "SELECT * " \
"FROM gateway_aliases " \
"WHERE alias = '{}'".format(case_1_second_rebill_params['partner'])
cursor.execute(gateway_select_query)
gateway_cortage = cursor.fetchall()
gateway_parameters = gateway_cortage[0]
gateway_id = gateway_parameters[2]
return str(gateway_id)
def second_rebill_gateway_id():
return str(save_second_rebill[2])
if define_second_rebill_gateway_id() == second_rebill_gateway_id():
print('second_rebill_gateway_id true')
else:
print('second_rebill_gateway_id false')
"""Validate second_rebill external_message_id(text_uniq_lead)"""
if case_1_second_rebill_params['text_uniq_lead'] == save_second_rebill[10]:
print('second_rebill_external_message_id true')
else:
print('second_rebill_external_message_id false')
"""Validate second rebill external_subscription_id"""
if case_1_second_rebill_params['subscr'] == save_subscription_parameters[2]:
print('second rebill external_subscription_id true')
else:
print('second rebill external_subscription_id false')
"""Validate related with second rebill's money parameters"""
def select_second_rebill_currency_rate():
cursor = connection.cursor()
select_currency_rate_query = "SELECT rate " \
"FROM currency " \
"WHERE code = '{}'".format(case_1_second_rebill_params['currency'])
cursor.execute(select_currency_rate_query)
rate_cortage = cursor.fetchall()
second_rebill_row = rate_cortage[0]
return second_rebill_row[0]
"""Calculate second rebill money_from_gateway using currency rate"""
def calculate_second_rebill_money_from_gateway_using_currency():
calculate_payout = select_second_rebill_currency_rate() \
* float(case_1_second_rebill_params['payout'])
return calculate_payout
"""Define second rebill user base coefficient"""
def select_second_rebill_user_base_coefficient():
cursor = connection.cursor()
select_user_base_coefficient = "SELECT base_coefficient " \
"FROM \"user\" " \
"WHERE id = {}".format(save_second_rebill[3])
cursor.execute(select_user_base_coefficient)
base_coefficient_cortage = cursor.fetchall()
base_coefficient = base_coefficient_cortage[0]
return base_coefficient[0]
"""Validate second rebill money_from_gateway"""
if calculate_second_rebill_money_from_gateway_using_currency() == \
float(case_1_second_rebill_params['payout']) * select_second_rebill_currency_rate():
print('second rebill money_from_gateway true')
else:
print('second rebill money_from_gateway false')
"""Validate second rebill money_for partner"""
if round(float(select_second_rebill_user_base_coefficient()
* calculate_second_rebill_money_from_gateway_using_currency()),2) \
== round(float(save_second_rebill[5]),2):
print("second rebill money_for partner true")
else:
print("second rebill money_for partner false")
"""Validate second_rebill is_payout_received"""
def select_second_rebill_is_payout_received():
cursor = connection.cursor()
select_is_payout_received = "SELECT is_payout_received " \
"FROM rebill " \
"WHERE click_id = {} " \
"AND external_message_id = '{}'".format(used_click_for_subscription,
case_1_second_rebill_params[
'text_uniq_lead'])
cursor.execute(select_is_payout_received)
is_payout_received_cortage = cursor.fetchall()
is_payout_received = is_payout_received_cortage[0]
return is_payout_received[0]
def second_rebill_check_key_value():
is_payout = 'payout' in case_1_second_rebill_params
return is_payout
if select_second_rebill_is_payout_received() == second_rebill_check_key_value():
print("second rebill is_payout_received = true. Test was passed")
else:
print("second rebill is_payout_received = false. Test wasn't passed")
"""Validate second_rebill extra_param"""
if save_second_rebill[17] == case_1_second_rebill_params['extra_param']:
print('second rebill extra_param true')
else:
print('second rebill extra_param false')
"""Validate second rebill user role"""
def select_second_rebill_user_role():
cursor = connection.cursor()
select_user_role = "SELECT role " \
"FROM \"user\" " \
"WHERE id = {}".format(save_second_rebill[3])
cursor.execute(select_user_role)
user_role_cortage = cursor.fetchall()
user_role = user_role_cortage[0]
return user_role[0]
if select_second_rebill_user_role() == save_second_rebill[18]:
print('second rebill user_role true')
else:
print('second rebill user_role false')
"""
========================================================================================
========================================================================================
From this place we start validate third created rebill
"""
def select_created_third_rebill():
cursor = connection.cursor()
third_rebill_select_query = "SELECT * " \
"FROM rebill " \
"WHERE click_id = {} AND external_message_id = '{}'".format(used_click_for_subscription,
case_1_third_rebill_params[
'text_uniq_lead'])
cursor.execute(third_rebill_select_query)
third_rebill_cortage = cursor.fetchall()
third_rebill_row = third_rebill_cortage[0]
return third_rebill_row
"""Save selected third rebill data into variable"""
save_third_rebill = select_created_third_rebill()
"""Select third rebill click_id and validate it"""
def third_rebill_click_id():
third_rebill_click_id = save_third_rebill[6]
return str(third_rebill_click_id)
if case_1_third_rebill_params['click_id'] == third_rebill_click_id():
print('third_rebill_click_id true')
else:
print('third_rebill_click_id false')
"""Select third rebill gateway_id for validation after"""
def define_third_rebill_gateway_id():
cursor = connection.cursor()
gateway_select_query = "SELECT * " \
"FROM gateway_aliases " \
"WHERE alias = '{}'".format(case_1_third_rebill_params['partner'])
cursor.execute(gateway_select_query)
gateway_cortage = cursor.fetchall()
gateway_parameters = gateway_cortage[0]
gateway_id = gateway_parameters[2]
return str(gateway_id)
def third_rebill_gateway_id():
return str(save_third_rebill[2])
if define_third_rebill_gateway_id() == third_rebill_gateway_id():
print('third_rebill_gateway_id true')
else:
print('third_rebill_gateway_id false')
"""Validate third_rebill external_message_id(text_uniq_lead)"""
if case_1_third_rebill_params['text_uniq_lead'] == save_third_rebill[10]:
print('third_rebill_external_message_id true')
else:
print('third_rebill_external_message_id false')
"""Validate third rebill external_subscription_id(subscr)"""
if case_1_third_rebill_params['subscr'] == save_subscription_parameters[2]:
print('third rebill external_subscription_id true')
else:
print('third rebill external_subscription_id false')
"""Validate related with third rebill's money parameters"""
def select_third_rebill_currency_rate():
cursor = connection.cursor()
select_currency_rate_query = "SELECT rate " \
"FROM currency " \
"WHERE code = 'EUR'" #because there is no currency parameter in query
cursor.execute(select_currency_rate_query)
rate_cortage = cursor.fetchall()
third_rebill_row = rate_cortage[0]
return third_rebill_row[0]
"""Calculate third rebill money_from_gateway using currency rate"""
def calculate_third_rebill_money_from_gateway_using_currency():
calculate_payout = select_third_rebill_currency_rate() \
* float(case_1_third_rebill_params['payout'])
return calculate_payout
"""Define third rebill user base coefficient"""
def select_third_rebill_user_base_coefficient():
cursor = connection.cursor()
select_user_base_coefficient = "SELECT base_coefficient " \
"FROM \"user\" " \
"WHERE id = {}".format(save_third_rebill[3])
cursor.execute(select_user_base_coefficient)
base_coefficient_cortage = cursor.fetchall()
base_coefficient = base_coefficient_cortage[0]
return base_coefficient[0]
"""Validate third rebill money_from_gateway"""
if calculate_third_rebill_money_from_gateway_using_currency() == \
float(case_1_third_rebill_params['payout']) * select_third_rebill_currency_rate():
print('third rebill money_from_gateway true')
else:
print('third rebill money_from_gateway false')
"""Validate third rebill money_for partner"""
if round(float(select_third_rebill_user_base_coefficient()
* calculate_third_rebill_money_from_gateway_using_currency()),2) \
== round(float(save_third_rebill[5]),2):
print("third rebill money_for partner true")
else:
print("third rebill money_for partner false")
"""Validate third is_payout_received"""
def select_third_rebill_is_payout_received():
cursor = connection.cursor()
select_is_payout_received = "SELECT is_payout_received " \
"FROM rebill " \
"WHERE click_id = {} " \
"AND external_message_id = '{}'".format(used_click_for_subscription,
case_1_third_rebill_params[
'text_uniq_lead'])
cursor.execute(select_is_payout_received)
is_payout_received_cortage = cursor.fetchall()
is_payout_received = is_payout_received_cortage[0]
return is_payout_received[0]
def third_rebill_check_key_value():
is_payout = 'payout' in case_1_third_rebill_params
return is_payout
if select_third_rebill_is_payout_received() == third_rebill_check_key_value():
print("third rebill is_payout_received = true. Test was passed")
else:
print("third rebill is_payout_received = false. test wasn't passed")
"""Validate third_rebill extra_param"""
if save_third_rebill[17] == case_1_third_rebill_params['extra_param']:
print('third rebill extra_param true')
else:
print('third rebill extra_param false')
"""Validate third rebill user role"""
def select_third_rebill_user_role():
cursor = connection.cursor()
select_user_role = "SELECT role " \
"FROM \"user\" " \
"WHERE id = {}".format(save_third_rebill[3])
cursor.execute(select_user_role)
user_role_cortage = cursor.fetchall()
user_role = user_role_cortage[0]
return user_role[0]
if select_third_rebill_user_role() == save_third_rebill[18]:
print('third rebill user_role true')
else:
print('third rebill user_role false')
"""
========================================================================================
========================================================================================
From this place we start validate fourth created rebill
"""
def select_created_fourth_rebill():
cursor = connection.cursor()
fourth_rebill_select_query = "SELECT * " \
"FROM rebill " \
"WHERE click_id = {} AND external_message_id = '{}'".format(used_click_for_subscription,
case_1_fourth_rebill_params[
'text_uniq_lead'])
cursor.execute(fourth_rebill_select_query)
fourth_rebill_cortage = cursor.fetchall()
fourth_rebill_row = fourth_rebill_cortage[0]
return fourth_rebill_row
"""Save selected fourth rebill data into variable"""
save_fourth_rebill = select_created_fourth_rebill()
"""Select fourth rebill click_id and validate it"""
def fourth_rebill_click_id():
fourth_rebill_click_id = save_fourth_rebill[6]
return str(fourth_rebill_click_id)
if case_1_fourth_rebill_params['click_id'] == fourth_rebill_click_id():
print('fourth_rebill_click_id true')
else:
print('fourth_rebill_click_id false')
"""Select fourth rebill gateway_id for validation after"""
def define_fourth_rebill_gateway_id():
cursor = connection.cursor()
gateway_select_query = "SELECT * " \
"FROM gateway_aliases " \
"WHERE alias = '{}'".format(case_1_fourth_rebill_params['partner'])
cursor.execute(gateway_select_query)
gateway_cortage = cursor.fetchall()
gateway_parameters = gateway_cortage[0]
gateway_id = gateway_parameters[2]
return str(gateway_id)
def fourth_rebill_gateway_id():
return str(save_fourth_rebill[2])
if define_fourth_rebill_gateway_id() == fourth_rebill_gateway_id():
print('fourth_rebill_gateway_id true')
else:
print('fourth_rebill_gateway_id false')
"""Validate fourth_rebill external_message_id(text_uniq_lead)"""
if case_1_fourth_rebill_params['text_uniq_lead'] == save_fourth_rebill[10]:
print('fourth_rebill_external_message_id true')
else:
print('fourth_rebill_external_message_id false')
"""Validate fourth rebill external_subscription_id"""
if case_1_fourth_rebill_params['subscr'] == save_subscription_parameters[2]:
print('fourth rebill external_subscription_id true')
else:
print('fourth rebill external_subscription_id false')
"""Validate related with fourth rebill's money parameters"""
def select_fourth_rebill_sms_offer_payout():
cursor = connection.cursor()
select_fourth_rebill_sms_offer_payout = "SELECT payout " \
"FROM sms_offer " \
"WHERE id = {}".format(save_fourth_rebill[9])
cursor.execute(select_fourth_rebill_sms_offer_payout)
fourth_rebill_sms_offer_payout_cortage = cursor.fetchall()
fourth_rebill_sms_offer_payout = fourth_rebill_sms_offer_payout_cortage[0]
return fourth_rebill_sms_offer_payout[0]
"""Define fourth rebill user base coefficient"""
def select_fourth_rebill_user_base_coefficient():
cursor = connection.cursor()
select_user_base_coefficient = "SELECT base_coefficient " \
"FROM \"user\" " \
"WHERE id = {}".format(save_fourth_rebill[3])
cursor.execute(select_user_base_coefficient)
base_coefficient_cortage = cursor.fetchall()
base_coefficient = base_coefficient_cortage[0]
return base_coefficient[0]
"""Calculate fourth rebill money_from_gateway"""
def calculate_fourth_rebill_money_from_gateway():
calculate_payout = float(select_fourth_rebill_sms_offer_payout()) \
* float(select_fourth_rebill_user_base_coefficient())
return calculate_payout
"""Validate fourth rebill money_from_gateway"""
if select_fourth_rebill_sms_offer_payout() == save_fourth_rebill[4]:
print("fourth rebill money_from_gateway true")
else:
print("fourth rebill money_from_gateway false")
"""Validate fourth rebill money_for_partner"""
if save_fourth_rebill[5] == calculate_fourth_rebill_money_from_gateway():
print("fourth rebill money_for_partner true")
else:
print("fourth rebill money_for_partner false")
def fourth_rebill_check_key_value():
is_payout = 'payout' in case_1_fourth_rebill_params
return is_payout
if save_fourth_rebill[16] == fourth_rebill_check_key_value():
print('Fourth rebill is_payout_received = false. Test was passed')
else:
print("Fourth rebill is_payout_received = true. Test wasn't passed")
"""Validate fourth_rebill extra_param"""
if save_fourth_rebill[17] == case_1_fourth_rebill_params['extra_param']:
print('fourth rebill extra_param true')
else:
print('fourth rebill extra_param false')
"""Validate fourth rebill user role"""
def select_fourth_rebill_user_role():
cursor = connection.cursor()
select_user_role = "SELECT role " \
"FROM \"user\" " \
"WHERE id = {}".format(save_fourth_rebill[3])
cursor.execute(select_user_role)
user_role_cortage = cursor.fetchall()
user_role = user_role_cortage[0]
return user_role[0]
if select_fourth_rebill_user_role() == save_fourth_rebill[18]:
print('fourth rebill user_role true')
else:
print('fourth rebill user_role false')
"""Validate Unsubscription"""
if save_subscription_parameters[7] == None:
print("Subscription is still opened. Initiate unsubscription.")
case_one_cancel()
else:
print("Subscription is closed already.")
time.sleep(3)
def select_subscription_closed_at():
cursor = connection.cursor()
closed_at_query = "SELECT closed_at " \
"FROM subscription " \
"WHERE click_id = {}".format(used_click_for_subscription)
cursor.execute(closed_at_query)
closed_at_cortage = cursor.fetchall()
closed_at = closed_at_cortage[0]
return closed_at[0]
if select_subscription_closed_at() != None:
print("Subscribtion is closed. Unsubscription test was passed.")
else:
print("Subscribtion isn't closed. Unsubscription test wasn't passed.")
|
[
"action.case_one_subscription_rebill_cancel.case_one_third_rebill",
"action.case_one_subscription_rebill_cancel.case_one_first_rebill",
"action.case_one_subscription_rebill_cancel.case_one_cancel",
"time.sleep",
"action.case_one_subscription_rebill_cancel.case_one_fourth_rebill",
"action.case_one_subscription_rebill_cancel.case_one_subscription",
"action.case_one_subscription_rebill_cancel.case_one_second_rebill",
"psycopg2.connect"
] |
[((732, 838), 'psycopg2.connect', 'psycopg2.connect', ([], {'database': 'pg_database', 'user': 'pg_user', 'password': 'pg_password', 'host': 'pg_host', 'port': 'pg_port'}), '(database=pg_database, user=pg_user, password=pg_password,\n host=pg_host, port=pg_port)\n', (748, 838), False, 'import psycopg2\n'), ((956, 979), 'action.case_one_subscription_rebill_cancel.case_one_subscription', 'case_one_subscription', ([], {}), '()\n', (977, 979), False, 'from action.case_one_subscription_rebill_cancel import case_one_subscription, case_one_first_rebill, case_one_second_rebill, case_one_third_rebill, case_one_fourth_rebill, case_one_cancel\n'), ((1027, 1050), 'action.case_one_subscription_rebill_cancel.case_one_first_rebill', 'case_one_first_rebill', ([], {}), '()\n', (1048, 1050), False, 'from action.case_one_subscription_rebill_cancel import case_one_subscription, case_one_first_rebill, case_one_second_rebill, case_one_third_rebill, case_one_fourth_rebill, case_one_cancel\n'), ((1098, 1122), 'action.case_one_subscription_rebill_cancel.case_one_second_rebill', 'case_one_second_rebill', ([], {}), '()\n', (1120, 1122), False, 'from action.case_one_subscription_rebill_cancel import case_one_subscription, case_one_first_rebill, case_one_second_rebill, case_one_third_rebill, case_one_fourth_rebill, case_one_cancel\n'), ((1170, 1193), 'action.case_one_subscription_rebill_cancel.case_one_third_rebill', 'case_one_third_rebill', ([], {}), '()\n', (1191, 1193), False, 'from action.case_one_subscription_rebill_cancel import case_one_subscription, case_one_first_rebill, case_one_second_rebill, case_one_third_rebill, case_one_fourth_rebill, case_one_cancel\n'), ((1241, 1265), 'action.case_one_subscription_rebill_cancel.case_one_fourth_rebill', 'case_one_fourth_rebill', ([], {}), '()\n', (1263, 1265), False, 'from action.case_one_subscription_rebill_cancel import case_one_subscription, case_one_first_rebill, case_one_second_rebill, case_one_third_rebill, case_one_fourth_rebill, case_one_cancel\n'), ((1356, 1369), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (1366, 1369), False, 'import time\n'), ((30948, 30961), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (30958, 30961), False, 'import time\n'), ((30878, 30895), 'action.case_one_subscription_rebill_cancel.case_one_cancel', 'case_one_cancel', ([], {}), '()\n', (30893, 30895), False, 'from action.case_one_subscription_rebill_cancel import case_one_subscription, case_one_first_rebill, case_one_second_rebill, case_one_third_rebill, case_one_fourth_rebill, case_one_cancel\n')]
|
from history import save_history, get_browser_history
from search import search
from rich import print
def main():
print("Seja bem-vindo ao py-google-search")
while True:
try:
search_term = input("Pesquisa: ")
save_history(search_term)
if search_term == '--history':
print(get_browser_history())
continue
s_result = search(search_term)
for i in s_result['results']:
name = i['name']
link = i['link']
print("========================================================================")
print(f'Title: {name}')
print(f'Link: {link}')
except KeyboardInterrupt:
ch = input("Deseja sair?(s/n)").lower()
if ch == 's':
exit()
elif ch == 'n':
continue
else:
print("Escolha inválida...")
continue
except Exception as e:
print(e)
break
if __name__ == '__main__':
main()
|
[
"rich.print",
"history.save_history",
"history.get_browser_history",
"search.search"
] |
[((120, 163), 'rich.print', 'print', (['"""Seja bem-vindo ao py-google-search"""'], {}), "('Seja bem-vindo ao py-google-search')\n", (125, 163), False, 'from rich import print\n'), ((251, 276), 'history.save_history', 'save_history', (['search_term'], {}), '(search_term)\n', (263, 276), False, 'from history import save_history, get_browser_history\n'), ((414, 433), 'search.search', 'search', (['search_term'], {}), '(search_term)\n', (420, 433), False, 'from search import search\n'), ((559, 645), 'rich.print', 'print', (['"""========================================================================"""'], {}), "(\n '========================================================================')\n", (564, 645), False, 'from rich import print\n'), ((657, 680), 'rich.print', 'print', (['f"""Title: {name}"""'], {}), "(f'Title: {name}')\n", (662, 680), False, 'from rich import print\n'), ((697, 719), 'rich.print', 'print', (['f"""Link: {link}"""'], {}), "(f'Link: {link}')\n", (702, 719), False, 'from rich import print\n'), ((1039, 1047), 'rich.print', 'print', (['e'], {}), '(e)\n', (1044, 1047), False, 'from rich import print\n'), ((342, 363), 'history.get_browser_history', 'get_browser_history', ([], {}), '()\n', (361, 363), False, 'from history import save_history, get_browser_history\n'), ((942, 970), 'rich.print', 'print', (['"""Escolha inválida..."""'], {}), "('Escolha inválida...')\n", (947, 970), False, 'from rich import print\n')]
|
import os
import sys
import unittest
import tempfile
import shutil
from cStringIO import StringIO
try:
# 'import as' required to protect nosetests
import catkin.test_results as catkin_test_results
except ImportError as impe:
raise ImportError(
'Please adjust your pythonpath before running this test: %s' % str(impe))
class TestResultsTest(unittest.TestCase):
def test_read_junit(self):
try:
rootdir = tempfile.mkdtemp()
result_file = os.path.join(rootdir, 'test1.xml')
with open(result_file, 'w') as fhand:
fhand.write('<testsuites tests="5" failures="3" errors="1" time="35" name="AllTests"></testsuites>')
(num_tests, num_errors, num_failures) = catkin_test_results.read_junit(result_file)
self.assertEqual((5, 1, 3), (num_tests, num_errors, num_failures))
finally:
shutil.rmtree(rootdir)
def test_test_results(self):
try:
rootdir = tempfile.mkdtemp()
for filename in ['test1.xml', 'test2.xml', 'foo.bar']:
result_file = os.path.join(rootdir, filename)
with open(result_file, 'w') as fhand:
fhand.write('<testsuites tests="5" failures="3" errors="1" time="35" name="AllTests"></testsuites>')
results = catkin_test_results.test_results(rootdir)
self.assertEqual({'test1.xml': (5, 1, 3), 'test2.xml': (5, 1, 3)}, results)
finally:
shutil.rmtree(rootdir)
def test_print_summary(self):
results = {'test1.xml': (5, 1, 3), 'test2.xml': (7, 2, 4)}
try:
oldstdout = sys.stdout
sys.stdout = StringIO()
catkin_test_results.print_summary(results)
summary = sys.stdout.getvalue()
self.assertTrue('5 tests, 1 errors, 3 failures' in summary, summary)
self.assertTrue('7 tests, 2 errors, 4 failures' in summary, summary)
self.assertTrue('12 tests, 3 errors, 7 failures' in summary, summary)
finally:
sys.stdout = oldstdout
|
[
"catkin.test_results.read_junit",
"cStringIO.StringIO",
"tempfile.mkdtemp",
"catkin.test_results.test_results",
"shutil.rmtree",
"sys.stdout.getvalue",
"os.path.join",
"catkin.test_results.print_summary"
] |
[((451, 469), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (467, 469), False, 'import tempfile\n'), ((497, 531), 'os.path.join', 'os.path.join', (['rootdir', '"""test1.xml"""'], {}), "(rootdir, 'test1.xml')\n", (509, 531), False, 'import os\n'), ((751, 794), 'catkin.test_results.read_junit', 'catkin_test_results.read_junit', (['result_file'], {}), '(result_file)\n', (781, 794), True, 'import catkin.test_results as catkin_test_results\n'), ((903, 925), 'shutil.rmtree', 'shutil.rmtree', (['rootdir'], {}), '(rootdir)\n', (916, 925), False, 'import shutil\n'), ((995, 1013), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (1011, 1013), False, 'import tempfile\n'), ((1341, 1382), 'catkin.test_results.test_results', 'catkin_test_results.test_results', (['rootdir'], {}), '(rootdir)\n', (1373, 1382), True, 'import catkin.test_results as catkin_test_results\n'), ((1500, 1522), 'shutil.rmtree', 'shutil.rmtree', (['rootdir'], {}), '(rootdir)\n', (1513, 1522), False, 'import shutil\n'), ((1698, 1708), 'cStringIO.StringIO', 'StringIO', ([], {}), '()\n', (1706, 1708), False, 'from cStringIO import StringIO\n'), ((1721, 1763), 'catkin.test_results.print_summary', 'catkin_test_results.print_summary', (['results'], {}), '(results)\n', (1754, 1763), True, 'import catkin.test_results as catkin_test_results\n'), ((1786, 1807), 'sys.stdout.getvalue', 'sys.stdout.getvalue', ([], {}), '()\n', (1805, 1807), False, 'import sys\n'), ((1112, 1143), 'os.path.join', 'os.path.join', (['rootdir', 'filename'], {}), '(rootdir, filename)\n', (1124, 1143), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: cosmos/auth/v1beta1/genesis.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from evmosproto.google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
from evmosproto.gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
from evmosproto.cosmos.auth.v1beta1 import auth_pb2 as cosmos_dot_auth_dot_v1beta1_dot_auth__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='cosmos/auth/v1beta1/genesis.proto',
package='cosmos.auth.v1beta1',
syntax='proto3',
serialized_options=b'Z)github.com/cosmos/cosmos-sdk/x/auth/types',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n!cosmos/auth/v1beta1/genesis.proto\x12\x13\x63osmos.auth.v1beta1\x1a\x19google/protobuf/any.proto\x1a\x14gogoproto/gogo.proto\x1a\x1e\x63osmos/auth/v1beta1/auth.proto\"i\n\x0cGenesisState\x12\x31\n\x06params\x18\x01 \x01(\x0b\x32\x1b.cosmos.auth.v1beta1.ParamsB\x04\xc8\xde\x1f\x00\x12&\n\x08\x61\x63\x63ounts\x18\x02 \x03(\x0b\x32\x14.google.protobuf.AnyB+Z)github.com/cosmos/cosmos-sdk/x/auth/typesb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,gogoproto_dot_gogo__pb2.DESCRIPTOR,cosmos_dot_auth_dot_v1beta1_dot_auth__pb2.DESCRIPTOR,])
_GENESISSTATE = _descriptor.Descriptor(
name='GenesisState',
full_name='cosmos.auth.v1beta1.GenesisState',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='params', full_name='cosmos.auth.v1beta1.GenesisState.params', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\310\336\037\000', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='accounts', full_name='cosmos.auth.v1beta1.GenesisState.accounts', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=139,
serialized_end=244,
)
_GENESISSTATE.fields_by_name['params'].message_type = cosmos_dot_auth_dot_v1beta1_dot_auth__pb2._PARAMS
_GENESISSTATE.fields_by_name['accounts'].message_type = google_dot_protobuf_dot_any__pb2._ANY
DESCRIPTOR.message_types_by_name['GenesisState'] = _GENESISSTATE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GenesisState = _reflection.GeneratedProtocolMessageType('GenesisState', (_message.Message,), {
'DESCRIPTOR' : _GENESISSTATE,
'__module__' : 'cosmos.auth.v1beta1.genesis_pb2'
# @@protoc_insertion_point(class_scope:cosmos.auth.v1beta1.GenesisState)
})
_sym_db.RegisterMessage(GenesisState)
DESCRIPTOR._options = None
_GENESISSTATE.fields_by_name['params']._options = None
# @@protoc_insertion_point(module_scope)
|
[
"google.protobuf.symbol_database.Default",
"google.protobuf.descriptor.FieldDescriptor",
"google.protobuf.reflection.GeneratedProtocolMessageType",
"google.protobuf.descriptor.FileDescriptor"
] |
[((432, 458), 'google.protobuf.symbol_database.Default', '_symbol_database.Default', ([], {}), '()\n', (456, 458), True, 'from google.protobuf import symbol_database as _symbol_database\n'), ((725, 1542), 'google.protobuf.descriptor.FileDescriptor', '_descriptor.FileDescriptor', ([], {'name': '"""cosmos/auth/v1beta1/genesis.proto"""', 'package': '"""cosmos.auth.v1beta1"""', 'syntax': '"""proto3"""', 'serialized_options': "b'Z)github.com/cosmos/cosmos-sdk/x/auth/types'", 'create_key': '_descriptor._internal_create_key', 'serialized_pb': 'b\'\\n!cosmos/auth/v1beta1/genesis.proto\\x12\\x13cosmos.auth.v1beta1\\x1a\\x19google/protobuf/any.proto\\x1a\\x14gogoproto/gogo.proto\\x1a\\x1ecosmos/auth/v1beta1/auth.proto"i\\n\\x0cGenesisState\\x121\\n\\x06params\\x18\\x01 \\x01(\\x0b2\\x1b.cosmos.auth.v1beta1.ParamsB\\x04\\xc8\\xde\\x1f\\x00\\x12&\\n\\x08accounts\\x18\\x02 \\x03(\\x0b2\\x14.google.protobuf.AnyB+Z)github.com/cosmos/cosmos-sdk/x/auth/typesb\\x06proto3\'', 'dependencies': '[google_dot_protobuf_dot_any__pb2.DESCRIPTOR, gogoproto_dot_gogo__pb2.\n DESCRIPTOR, cosmos_dot_auth_dot_v1beta1_dot_auth__pb2.DESCRIPTOR]'}), '(name=\'cosmos/auth/v1beta1/genesis.proto\',\n package=\'cosmos.auth.v1beta1\', syntax=\'proto3\', serialized_options=\n b\'Z)github.com/cosmos/cosmos-sdk/x/auth/types\', create_key=_descriptor.\n _internal_create_key, serialized_pb=\n b\'\\n!cosmos/auth/v1beta1/genesis.proto\\x12\\x13cosmos.auth.v1beta1\\x1a\\x19google/protobuf/any.proto\\x1a\\x14gogoproto/gogo.proto\\x1a\\x1ecosmos/auth/v1beta1/auth.proto"i\\n\\x0cGenesisState\\x121\\n\\x06params\\x18\\x01 \\x01(\\x0b2\\x1b.cosmos.auth.v1beta1.ParamsB\\x04\\xc8\\xde\\x1f\\x00\\x12&\\n\\x08accounts\\x18\\x02 \\x03(\\x0b2\\x14.google.protobuf.AnyB+Z)github.com/cosmos/cosmos-sdk/x/auth/typesb\\x06proto3\'\n , dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,\n gogoproto_dot_gogo__pb2.DESCRIPTOR,\n cosmos_dot_auth_dot_v1beta1_dot_auth__pb2.DESCRIPTOR])\n', (751, 1542), True, 'from google.protobuf import descriptor as _descriptor\n'), ((3186, 3352), 'google.protobuf.reflection.GeneratedProtocolMessageType', '_reflection.GeneratedProtocolMessageType', (['"""GenesisState"""', '(_message.Message,)', "{'DESCRIPTOR': _GENESISSTATE, '__module__': 'cosmos.auth.v1beta1.genesis_pb2'}"], {}), "('GenesisState', (_message.Message,\n ), {'DESCRIPTOR': _GENESISSTATE, '__module__':\n 'cosmos.auth.v1beta1.genesis_pb2'})\n", (3226, 3352), True, 'from google.protobuf import reflection as _reflection\n'), ((1790, 2203), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""params"""', 'full_name': '"""cosmos.auth.v1beta1.GenesisState.params"""', 'index': '(0)', 'number': '(1)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(1)', 'has_default_value': '(False)', 'default_value': 'None', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': "b'\\xc8\\xde\\x1f\\x00'", 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='params', full_name=\n 'cosmos.auth.v1beta1.GenesisState.params', index=0, number=1, type=11,\n cpp_type=10, label=1, has_default_value=False, default_value=None,\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=b'\\xc8\\xde\\x1f\\x00',\n file=DESCRIPTOR, create_key=_descriptor._internal_create_key)\n", (1817, 2203), True, 'from google.protobuf import descriptor as _descriptor\n'), ((2225, 2625), 'google.protobuf.descriptor.FieldDescriptor', '_descriptor.FieldDescriptor', ([], {'name': '"""accounts"""', 'full_name': '"""cosmos.auth.v1beta1.GenesisState.accounts"""', 'index': '(1)', 'number': '(2)', 'type': '(11)', 'cpp_type': '(10)', 'label': '(3)', 'has_default_value': '(False)', 'default_value': '[]', 'message_type': 'None', 'enum_type': 'None', 'containing_type': 'None', 'is_extension': '(False)', 'extension_scope': 'None', 'serialized_options': 'None', 'file': 'DESCRIPTOR', 'create_key': '_descriptor._internal_create_key'}), "(name='accounts', full_name=\n 'cosmos.auth.v1beta1.GenesisState.accounts', index=1, number=2, type=11,\n cpp_type=10, label=3, has_default_value=False, default_value=[],\n message_type=None, enum_type=None, containing_type=None, is_extension=\n False, extension_scope=None, serialized_options=None, file=DESCRIPTOR,\n create_key=_descriptor._internal_create_key)\n", (2252, 2625), True, 'from google.protobuf import descriptor as _descriptor\n')]
|
import pytest
from yarl import URL
from pyapp.conf import loaders
from pyapp.conf.loaders import Loader
from pyapp.exceptions import InvalidConfiguration
class TestModuleLoader:
def test__module_exists(self):
target = loaders.ModuleLoader("tests.settings")
actual = dict(target)
assert str(target) == "python:tests.settings"
assert all(key.isupper() for key in actual)
def test__module_not_found(self):
target = loaders.ModuleLoader("tests.unknown.settings")
with pytest.raises(InvalidConfiguration):
dict(target)
assert str(target) == "python:tests.unknown.settings"
class TestObjectLoader:
def test_from_url(self):
with pytest.raises(NotImplementedError):
loaders.ObjectLoader.from_url(URL(""))
def test_extracts_attributes(self):
class MyObject:
FOO = "abc"
BAR = 2
eek = "def"
target = loaders.ObjectLoader(MyObject)
actual = dict(target)
assert actual == {"FOO": "abc", "BAR": 2}
class TestSettingsLoaderRegistry:
def test_register__as_decorator(self):
target = loaders.SettingsLoaderRegistry()
@target.register
class SimpleSettings(Loader):
scheme = "eek"
@classmethod
def from_url(cls, settings_url):
return cls(settings_url)
def __init__(self, settings_url):
self.settings_url = settings_url
def __iter__(self):
return {"SIMPLE": self.settings_url}.items()
assert "eek" in target
assert isinstance(target.factory("eek:sample"), SimpleSettings)
def test_register__as_method(self):
target = loaders.SettingsLoaderRegistry()
class SimpleSettings(Loader):
scheme = ("eek", "ook")
@classmethod
def from_url(cls, settings_url):
return cls(settings_url)
def __init__(self, settings_url):
self.settings_url = settings_url
def __iter__(self):
return {"SIMPLE": self.settings_url}.items()
target.register(SimpleSettings)
assert "eek" in target
assert "ook" in target
assert isinstance(target.factory("eek:sample"), SimpleSettings)
assert isinstance(target.factory("ook:sample"), SimpleSettings)
@pytest.mark.parametrize(
("settings_uri", "expected", "str_value"),
(
("sample.settings", loaders.ModuleLoader, "python:sample.settings"),
("python:sample.settings", loaders.ModuleLoader, "python:sample.settings"),
(
"file:///path/to/sample.json",
loaders.FileLoader,
"file:///path/to/sample.json?type=application/json",
),
),
)
def test_factory__loaders_correctly_resolved(
self, settings_uri, expected, str_value
):
target = loaders.registry
actual = target.factory(settings_uri)
assert isinstance(actual, expected)
assert str(actual) == str_value
@pytest.mark.parametrize(
("settings_uri", "expected"),
(("py:sample.settings", "Unknown scheme `py` in settings URI:"),),
)
def test_factory__invalid_settings_uri(self, settings_uri, expected):
target = loaders.registry
with pytest.raises(InvalidConfiguration) as e:
target.factory(settings_uri)
assert str(e.value).startswith(expected)
|
[
"pyapp.conf.loaders.ModuleLoader",
"pyapp.conf.loaders.SettingsLoaderRegistry",
"pyapp.conf.loaders.ObjectLoader",
"pytest.raises",
"pytest.mark.parametrize",
"yarl.URL"
] |
[((2419, 2757), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('settings_uri', 'expected', 'str_value')", "(('sample.settings', loaders.ModuleLoader, 'python:sample.settings'), (\n 'python:sample.settings', loaders.ModuleLoader,\n 'python:sample.settings'), ('file:///path/to/sample.json', loaders.\n FileLoader, 'file:///path/to/sample.json?type=application/json'))"], {}), "(('settings_uri', 'expected', 'str_value'), ((\n 'sample.settings', loaders.ModuleLoader, 'python:sample.settings'), (\n 'python:sample.settings', loaders.ModuleLoader,\n 'python:sample.settings'), ('file:///path/to/sample.json', loaders.\n FileLoader, 'file:///path/to/sample.json?type=application/json')))\n", (2442, 2757), False, 'import pytest\n'), ((3149, 3274), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (["('settings_uri', 'expected')", "(('py:sample.settings', 'Unknown scheme `py` in settings URI:'),)"], {}), "(('settings_uri', 'expected'), ((\n 'py:sample.settings', 'Unknown scheme `py` in settings URI:'),))\n", (3172, 3274), False, 'import pytest\n'), ((233, 271), 'pyapp.conf.loaders.ModuleLoader', 'loaders.ModuleLoader', (['"""tests.settings"""'], {}), "('tests.settings')\n", (253, 271), False, 'from pyapp.conf import loaders\n'), ((466, 512), 'pyapp.conf.loaders.ModuleLoader', 'loaders.ModuleLoader', (['"""tests.unknown.settings"""'], {}), "('tests.unknown.settings')\n", (486, 512), False, 'from pyapp.conf import loaders\n'), ((958, 988), 'pyapp.conf.loaders.ObjectLoader', 'loaders.ObjectLoader', (['MyObject'], {}), '(MyObject)\n', (978, 988), False, 'from pyapp.conf import loaders\n'), ((1167, 1199), 'pyapp.conf.loaders.SettingsLoaderRegistry', 'loaders.SettingsLoaderRegistry', ([], {}), '()\n', (1197, 1199), False, 'from pyapp.conf import loaders\n'), ((1755, 1787), 'pyapp.conf.loaders.SettingsLoaderRegistry', 'loaders.SettingsLoaderRegistry', ([], {}), '()\n', (1785, 1787), False, 'from pyapp.conf import loaders\n'), ((527, 562), 'pytest.raises', 'pytest.raises', (['InvalidConfiguration'], {}), '(InvalidConfiguration)\n', (540, 562), False, 'import pytest\n'), ((720, 754), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (733, 754), False, 'import pytest\n'), ((3415, 3450), 'pytest.raises', 'pytest.raises', (['InvalidConfiguration'], {}), '(InvalidConfiguration)\n', (3428, 3450), False, 'import pytest\n'), ((798, 805), 'yarl.URL', 'URL', (['""""""'], {}), "('')\n", (801, 805), False, 'from yarl import URL\n')]
|
# PyAlgoTrade
#
# Copyright 2011-2015 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import pyalgotrade.logger
from pyalgotrade.optimizer import base
from pyalgotrade.optimizer import xmlrpcserver
logger = pyalgotrade.logger.getLogger(__name__)
class Results(object):
"""The results of the strategy executions."""
def __init__(self, parameters, result):
self.__parameters = parameters
self.__result = result
def getParameters(self):
"""Returns a sequence of parameter values."""
return self.__parameters
def getResult(self):
"""Returns the result for a given set of parameters."""
return self.__result
def serve(barFeed, strategyParameters, address, port):
"""Executes a server that will provide bars and strategy parameters for workers to use.
:param barFeed: The bar feed that each worker will use to backtest the strategy.
:type barFeed: :class:`pyalgotrade.barfeed.BarFeed`.
:param strategyParameters: The set of parameters to use for backtesting. An iterable object where **each element is a tuple that holds parameter values**.
:param address: The address to listen for incoming worker connections.
:type address: string.
:param port: The port to listen for incoming worker connections.
:type port: int.
:rtype: A :class:`Results` instance with the best results found or None if no results were obtained.
"""
paramSource = base.ParameterSource(strategyParameters)
resultSinc = base.ResultSinc()
s = xmlrpcserver.Server(paramSource, resultSinc, barFeed, address, port)
logger.info("Starting server")
s.serve()
logger.info("Server finished")
ret = None
bestResult, bestParameters = resultSinc.getBest()
if bestResult is not None:
logger.info("Best final result %s with parameters %s" % (bestResult, bestParameters.args))
ret = Results(bestParameters.args, bestResult)
else:
logger.error("No results. All jobs failed or no jobs were processed.")
return ret
|
[
"pyalgotrade.optimizer.xmlrpcserver.Server",
"pyalgotrade.optimizer.base.ParameterSource",
"pyalgotrade.optimizer.base.ResultSinc"
] |
[((1995, 2035), 'pyalgotrade.optimizer.base.ParameterSource', 'base.ParameterSource', (['strategyParameters'], {}), '(strategyParameters)\n', (2015, 2035), False, 'from pyalgotrade.optimizer import base\n'), ((2053, 2070), 'pyalgotrade.optimizer.base.ResultSinc', 'base.ResultSinc', ([], {}), '()\n', (2068, 2070), False, 'from pyalgotrade.optimizer import base\n'), ((2079, 2147), 'pyalgotrade.optimizer.xmlrpcserver.Server', 'xmlrpcserver.Server', (['paramSource', 'resultSinc', 'barFeed', 'address', 'port'], {}), '(paramSource, resultSinc, barFeed, address, port)\n', (2098, 2147), False, 'from pyalgotrade.optimizer import xmlrpcserver\n')]
|
# -*- coding: utf-8 -*-
from scrapy.spiders import CrawlSpider, Rule
from scrapy.linkextractors import LinkExtractor
SEARCH_QUERY = (
'https://www.imdb.com/search/title?'
'title_type=feature&'
'user_rating=1.0,10.0&'
'countries=us&'
'languages=en&'
'count=250&'
'view=simple'
)
class MovieSpider(CrawlSpider):
name = 'movie'
allowed_domains = ['imdb.com']
start_urls = [SEARCH_QUERY]
rules = (Rule(
LinkExtractor(restrict_css=('div.desc a')),
follow=True,
callback='parse_query_page',
),)
def parse_query_page(self, response):
links = response.css('span.lister-item-header a::attr(href)').extract()
for link in links:
yield response.follow(link, callback=self.parse_movie_detail_page)
def parse_movie_detail_page(self, response):
data = {}
data['title'] = response.css('h1::text').extract_first().strip()
data['rating'] = response.css(
'.subtext::text').extract_first().strip() or None
data['year'] = response.css('#titleYear a::text').extract_first()
data['users_rating'] = response.xpath(
'//span[contains(@itemprop, "ratingValue")]/text()').extract_first()
data['votes'] = response.xpath(
'//span[contains(@itemprop, "ratingCount")]/text()').extract_first()
data['metascore'] = response.xpath(
'//div[contains(@class, "metacriticScore")]/span/text()').extract_first()
data['img_url'] = response.xpath(
'//div[contains(@class, "poster")]/a/img/@src').extract_first()
countries = response.xpath(
'//div[contains(@class, "txt-block") and contains(.//h4, "Country")]/a/text()').extract()
data['countries'] = [country.strip() for country in countries]
languages = response.xpath(
'//div[contains(@class, "txt-block") and contains(.//h4, "Language")]/a/text()').extract()
data['languages'] = [language.strip() for language in languages]
actors = response.xpath('//td[not(@class)]/a/text()').extract()
data['actors'] = [actor.strip() for actor in actors]
genres = response.xpath(
"//div[contains(.//h4, 'Genres')]/a/text()").extract()
data['genre'] = [genre.strip() for genre in genres]
tagline = response.xpath(
'//div[contains(string(), "Tagline")]/text()').extract()
data['tagline'] = ''.join(tagline).strip() or None
data['description'] = response.xpath(
'//div[contains(@class, "summary_text")]/text()').extract_first().strip() or None
directors = response.xpath(
"//div[contains(@class, 'credit_summary_item') and contains(.//h4, 'Director')]/a/text()").extract() or None
if directors:
data['directors'] = [director.strip() for director in directors]
data['runtime'] = response.xpath(
"//div[contains(@class, 'txt-block') and contains(.//h4, 'Runtime')]/time/text()").extract_first() or None
data['imdb_url'] = response.url.replace('?ref_=adv_li_tt', '')
yield data
|
[
"scrapy.linkextractors.LinkExtractor"
] |
[((455, 495), 'scrapy.linkextractors.LinkExtractor', 'LinkExtractor', ([], {'restrict_css': '"""div.desc a"""'}), "(restrict_css='div.desc a')\n", (468, 495), False, 'from scrapy.linkextractors import LinkExtractor\n')]
|
# -*- coding: utf-8 -*-
# @Author: <NAME>
# @Email: <EMAIL>
# @Date: 2019-08-18 21:14:43
# @Last Modified by: <NAME>
# @Last Modified time: 2021-06-14 11:33:09
import matplotlib.pyplot as plt
from PySONIC.parsers import *
from .plt import SectionGroupedTimeSeries, SectionCompTimeSeries
from .models import models_dict
from .constants import *
class SpatiallyExtendedParser(Parser):
def __init__(self):
super().__init__()
self.addSection()
def addResistivity(self):
self.add_argument(
'--rs', nargs='+', type=float, help='Intracellular resistivity (Ohm.cm)')
def addSection(self):
self.add_argument(
'--section', nargs='+', type=str, help='Section of interest for plot')
def addSectionID(self):
self.add_argument(
'--secid', nargs='+', type=str, help='Section ID')
def parse(self, args=None):
if args is None:
args = super().parse()
return args
@staticmethod
def parseSimInputs(args):
return [args[k] for k in ['rs']]
@staticmethod
def parsePlot(args, output):
render_args = {}
if 'spikes' in args:
render_args['spikes'] = args['spikes']
if args['section'] == ['all']:
raise ValueError('sections names must be explicitly specified')
if args['compare']:
if args['plot'] == ['all']:
logger.error('Specific variables must be specified for comparative plots')
return
for key in ['cmap', 'cscale']:
render_args[key] = args[key]
for pltvar in args['plot']:
comp_plot = SectionCompTimeSeries(output, pltvar, args['section'])
if render_args['cmap'] is None:
del render_args['cmap']
comp_plot.render(**render_args)
else:
for key in args['section']:
scheme_plot = SectionGroupedTimeSeries(key, output, pltscheme=args['pltscheme'])
scheme_plot.render(**render_args)
plt.show()
class FiberParser(SpatiallyExtendedParser):
def __init__(self):
super().__init__()
self.defaults.update({'type': 'senn', 'fiberD': 20., 'nnodes': 21})
self.factors.update({'fiberD': 1 / M_TO_UM})
self.addType()
self.addFiberDiameter()
self.addNnodes()
def addResistivity(self):
pass
def addType(self):
self.add_argument(
'--type', nargs='+', type=str, help='Fiber model type')
def addFiberDiameter(self):
self.add_argument(
'-d', '--fiberD', nargs='+', type=float, help='Fiber diameter (um)')
def addNnodes(self):
self.add_argument(
'--nnodes', nargs='+', type=int, help='Number of nodes of Ranvier')
def parsePlot(self, args, output):
if args['section'] == ['all']:
args['section'] = [f'node{i}' for i in range(args['nnodes'][0])]
return SpatiallyExtendedParser.parsePlot(args, output)
@staticmethod
def parseSimInputs(args):
return SpatiallyExtendedParser.parseSimInputs(args)
def parse(self, args=None):
args = super().parse(args=args)
args['type'] = [models_dict[model_key] for model_key in args['type']]
for key in ['fiberD']:
if len(args[key]) > 1 or args[key][0] is not None:
args[key] = self.parse2array(args, key, factor=self.factors[key])
return args
class EStimFiberParser(FiberParser, PWSimParser):
def __init__(self):
PWSimParser.__init__(self)
FiberParser.__init__(self)
self.defaults.update({'tstim': 0.1, 'toffset': 3.})
self.allowed.update({'mode': ['cathode', 'anode']})
self.addElectrodeMode()
self.addAstim()
def addElectrodeMode(self):
self.add_argument(
'--mode', type=str, help='Electrode polarity mode ("cathode" or "anode")')
def addAstim(self):
self.add_argument(
'-A', '--amp', nargs='+', type=float,
help=f'Point-source current amplitude ({self.amp_unit})')
self.add_argument(
'--Arange', type=str, nargs='+',
help=f'Point-source current amplitude range {self.dist_str} ({self.amp_unit})')
self.to_parse['amp'] = self.parseAmplitude
def parseAmplitude(self, args):
return EStimParser.parseAmplitude(self, args)
def parse(self):
args = FiberParser.parse(self, args=PWSimParser.parse(self))
if isIterable(args['mode']):
args['mode'] = args['mode'][0]
return args
@staticmethod
def parseSimInputs(args):
return PWSimParser.parseSimInputs(args) + SpatiallyExtendedParser.parseSimInputs(args)
def parsePlot(self, *args):
return FiberParser.parsePlot(self, *args)
class IextraFiberParser(EStimFiberParser):
amp_unit = 'mA'
def __init__(self):
super().__init__()
self.defaults.update({'xps': 0., 'zps': None, 'mode': 'cathode', 'amp': -0.7})
self.factors.update({'amp': MA_TO_A, 'xps': 1 / M_TO_MM, 'zps': 1 / M_TO_MM})
self.addPointSourcePosition()
def addPointSourcePosition(self):
self.add_argument(
'--xps', nargs='+', type=float, help='Point source x-position (mm)')
self.add_argument(
'--zps', nargs='+', type=float, help='Point source z-position (mm)')
def parse(self):
args = super().parse()
for key in ['xps', 'zps']:
if len(args[key]) > 1 or args[key][0] is not None:
args[key] = self.parse2array(args, key, factor=self.factors[key])
return args
class IintraFiberParser(EStimFiberParser):
amp_unit = 'nA'
def __init__(self):
super().__init__()
self.defaults.update({'secid': None, 'mode': 'anode', 'amp': 2.0})
self.factors.update({'amp': 1 / A_TO_NA})
self.addSectionID()
class AStimFiberParser(FiberParser, AStimParser):
def __init__(self):
AStimParser.__init__(self)
FiberParser.__init__(self)
for x in [self.defaults, self.allowed, self.to_parse]:
x.pop('method')
self.defaults.update({'tstim': 0.1, 'toffset': 3.})
@staticmethod
def parseSimInputs(args):
return AStimParser.parseSimInputs(args) + SpatiallyExtendedParser.parseSimInputs(args)
def parsePlot(self, *args):
return FiberParser.parsePlot(self, *args)
class SectionAStimFiberParser(AStimFiberParser):
amp_unit = 'kPa'
def __init__(self):
super().__init__()
self.defaults.update({'sec_id': None})
self.addSectionID()
def parseAmplitude(self, args):
return AStimParser.parseAmplitude(self, args)
def parse(self):
args = super().parse()
args['secid'] = [args['secid']]
return args
class SpatiallyExtendedTimeSeriesParser(TimeSeriesParser):
def __init__(self):
super().__init__()
self.addSection()
def addSection(self):
SpatiallyExtendedParser.addSection(self)
class TestNodeNetworkParser(TestParser):
def __init__(self, valid_subsets):
super().__init__(valid_subsets)
self.addConnect()
def addConnect(self):
self.add_argument(
'--connect', default=False, action='store_true', help='Connect nodes')
|
[
"matplotlib.pyplot.show"
] |
[((2086, 2096), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2094, 2096), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-04-27 03:32
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('crm', '0003_auto_20170421_0932'),
]
operations = [
migrations.CreateModel(
name='ContractTemplate',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('content', models.TextField()),
('date', models.DateField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='PaymentRecord',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('payment_type', models.SmallIntegerField(choices=[(0, '报名费'), (1, '学费'), (2, '退费')], default=0)),
('amount', models.IntegerField(default=500, verbose_name='费用')),
('date', models.DateTimeField(auto_now_add=True)),
('consultant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='crm.UserProfile')),
],
),
migrations.CreateModel(
name='StudentEnrollment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contract_agreed', models.BooleanField(default=False)),
('contract_signed_date', models.DateTimeField(blank=True, null=True)),
('contract_approved', models.BooleanField(default=False)),
('contract_approved_date', models.DateTimeField(blank=True, null=True, verbose_name='合同审核时间')),
('class_grade', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='crm.ClassList')),
('consultant', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='crm.UserProfile')),
],
),
migrations.AlterModelOptions(
name='customerinfo',
options={'verbose_name': '客户信息', 'verbose_name_plural': '客户信息'},
),
migrations.AlterField(
model_name='student',
name='customer',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='crm.CustomerInfo'),
),
migrations.AddField(
model_name='studentenrollment',
name='customer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='crm.CustomerInfo'),
),
migrations.AddField(
model_name='paymentrecord',
name='enrollment',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='crm.StudentEnrollment'),
),
migrations.AddField(
model_name='classlist',
name='contract_template',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='crm.ContractTemplate'),
),
]
|
[
"django.db.models.OneToOneField",
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.SmallIntegerField",
"django.db.models.IntegerField",
"django.db.migrations.AlterModelOptions",
"django.db.models.DateField",
"django.db.models.DateTimeField"
] |
[((2154, 2272), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""customerinfo"""', 'options': "{'verbose_name': '客户信息', 'verbose_name_plural': '客户信息'}"}), "(name='customerinfo', options={'verbose_name':\n '客户信息', 'verbose_name_plural': '客户信息'})\n", (2182, 2272), False, 'from django.db import migrations, models\n'), ((2417, 2510), 'django.db.models.OneToOneField', 'models.OneToOneField', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""crm.CustomerInfo"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'crm.CustomerInfo')\n", (2437, 2510), False, 'from django.db import migrations, models\n'), ((2638, 2728), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""crm.CustomerInfo"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'crm.CustomerInfo')\n", (2655, 2728), False, 'from django.db import migrations, models\n'), ((2854, 2949), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""crm.StudentEnrollment"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'crm.StudentEnrollment')\n", (2871, 2949), False, 'from django.db import migrations, models\n'), ((3078, 3195), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""crm.ContractTemplate"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.CASCADE, to='crm.ContractTemplate')\n", (3095, 3195), False, 'from django.db import migrations, models\n'), ((434, 527), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (450, 527), False, 'from django.db import migrations, models\n'), ((551, 582), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (567, 582), False, 'from django.db import migrations, models\n'), ((613, 631), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (629, 631), False, 'from django.db import migrations, models\n'), ((659, 694), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (675, 694), False, 'from django.db import migrations, models\n'), ((833, 926), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (849, 926), False, 'from django.db import migrations, models\n'), ((958, 1037), 'django.db.models.SmallIntegerField', 'models.SmallIntegerField', ([], {'choices': "[(0, '报名费'), (1, '学费'), (2, '退费')]", 'default': '(0)'}), "(choices=[(0, '报名费'), (1, '学费'), (2, '退费')], default=0)\n", (982, 1037), False, 'from django.db import migrations, models\n'), ((1067, 1118), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(500)', 'verbose_name': '"""费用"""'}), "(default=500, verbose_name='费用')\n", (1086, 1118), False, 'from django.db import migrations, models\n'), ((1146, 1185), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1166, 1185), False, 'from django.db import migrations, models\n'), ((1219, 1308), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""crm.UserProfile"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'crm.UserProfile')\n", (1236, 1308), False, 'from django.db import migrations, models\n'), ((1446, 1539), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1462, 1539), False, 'from django.db import migrations, models\n'), ((1574, 1608), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1593, 1608), False, 'from django.db import migrations, models\n'), ((1652, 1695), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1672, 1695), False, 'from django.db import migrations, models\n'), ((1736, 1770), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1755, 1770), False, 'from django.db import migrations, models\n'), ((1816, 1882), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""合同审核时间"""'}), "(blank=True, null=True, verbose_name='合同审核时间')\n", (1836, 1882), False, 'from django.db import migrations, models\n'), ((1917, 2004), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""crm.ClassList"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'crm.ClassList')\n", (1934, 2004), False, 'from django.db import migrations, models\n'), ((2033, 2122), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""crm.UserProfile"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'crm.UserProfile')\n", (2050, 2122), False, 'from django.db import migrations, models\n')]
|
from dataclasses import dataclass
from typing import Optional, Union
import numpy as np
import torch
from transformers.modeling_utils import PreTrainedModel
from transformers.tokenization_utils_base import PreTrainedTokenizerBase, PaddingStrategy, BatchEncoding
DEPTH_SPECIAL_TOKENS = {
-1: 48900,
0: 48613,
1: 48983,
2: 48936,
3: 48712,
4: 49130,
5: 49216
}
ACTION_SPECIAL_TOKENS = {
"UP": 49908,
"HOLD": 49859,
"DOWN": 49452
}
@dataclass
class DataCollatorForLinearTitle:
"""
Data collator that will dynamically pad the inputs received, as well as the labels.
Args:
tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):
The tokenizer used for encoding the data.
model (:class:`~transformers.PreTrainedModel`):
The model that is being trained. If set and has the `prepare_decoder_input_ids_from_labels`, use it to
prepare the `decoder_input_ids`
This is useful when using `label_smoothing` to avoid calculating loss twice.
padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):
Select a strategy to pad the returned sequences (according to the model's padding side and padding index)
among:
* :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single
sequence is provided).
* :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the
maximum acceptable input length for the model if that argument is not provided.
* :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of
different lengths).
max_length (:obj:`int`, `optional`):
Maximum length of the returned list and optionally padding length (see above).
pad_to_multiple_of (:obj:`int`, `optional`):
If set will pad the sequence to a multiple of the provided value.
This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=
7.5 (Volta).
label_pad_token_id (:obj:`int`, `optional`, defaults to -100):
The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).
"""
tokenizer: PreTrainedTokenizerBase
model: Optional[PreTrainedModel] = None
padding: Union[bool, str, PaddingStrategy] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = -100
def __call__(self, features):
labels = [feature["labels"] for feature in features] if "labels" in features[0].keys() else None
# We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the
# same length to return tensors.
if labels is not None:
max_label_length = max(len(l) for l in labels)
padding_side = self.tokenizer.padding_side
for feature in features:
remainder = [self.label_pad_token_id] * (max_label_length - len(feature["labels"]))
feature["labels"] = (
feature["labels"] + remainder if padding_side == "right" else remainder + feature["labels"]
)
labels = torch.tensor([feature['labels'] for feature in features], dtype=torch.long)
input_ids = []
attention_masks = []
global_attention_masks = []
for feature in features:
input_id = feature["tokenized_inputs"]
attention_mask = [1] * len(input_id)
global_attention_mask = [0] * len(input_id)
global_attention_mask[0] = 1
input_id = input_id[:self.max_length][:-1] + [2]
attention_mask = attention_mask[:self.max_length]
global_attention_mask = global_attention_mask[:self.max_length]
input_ids.append(input_id)
attention_masks.append(attention_mask)
global_attention_masks.append(global_attention_mask)
max_input_length = max(len(input_id) for input_id in input_ids)
if self.pad_to_multiple_of is not None and (max_input_length % self.pad_to_multiple_of != 0):
max_input_length = ((max_input_length // self.pad_to_multiple_of) + 1) * self.pad_to_multiple_of
# padding
input_ids = [input_id + [self.tokenizer.pad_token_id] * (max_input_length - len(input_id)) for input_id in input_ids]
attention_masks = [attention_mask + [0] * (max_input_length - len(attention_mask)) for attention_mask in attention_masks]
global_attention_masks = [global_attention_mask + [0] * (max_input_length - len(global_attention_mask)) for global_attention_mask in global_attention_masks]
input_ids = torch.tensor(input_ids, dtype=torch.long)
attention_masks = torch.tensor(attention_masks, dtype=torch.long)
global_attention_masks = torch.tensor(global_attention_masks, dtype=torch.long)
all_features = {
"input_ids": input_ids,
"attention_mask": attention_masks,
"global_attention_mask": global_attention_masks,
"labels": labels
}
# prepare decoder_input_ids
if self.model is not None and hasattr(self.model, "prepare_decoder_input_ids_from_labels"):
decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(labels=labels)
all_features["decoder_input_ids"] = decoder_input_ids
return all_features
|
[
"torch.tensor"
] |
[((3515, 3590), 'torch.tensor', 'torch.tensor', (["[feature['labels'] for feature in features]"], {'dtype': 'torch.long'}), "([feature['labels'] for feature in features], dtype=torch.long)\n", (3527, 3590), False, 'import torch\n'), ((5011, 5052), 'torch.tensor', 'torch.tensor', (['input_ids'], {'dtype': 'torch.long'}), '(input_ids, dtype=torch.long)\n', (5023, 5052), False, 'import torch\n'), ((5079, 5126), 'torch.tensor', 'torch.tensor', (['attention_masks'], {'dtype': 'torch.long'}), '(attention_masks, dtype=torch.long)\n', (5091, 5126), False, 'import torch\n'), ((5160, 5214), 'torch.tensor', 'torch.tensor', (['global_attention_masks'], {'dtype': 'torch.long'}), '(global_attention_masks, dtype=torch.long)\n', (5172, 5214), False, 'import torch\n')]
|
import argparse
import json
from multiprocessing.util import Finalize
from typing import Dict, List, Tuple
from multiprocessing import Pool as ProcessPool
import itertools
import pickle
import numpy as np
import os
from os.path import join
from tqdm import tqdm
from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, \
IterativeQuestionAndParagraphs
from hotpot.data_handling.dataset import QuestionAndParagraphsSpec
from hotpot.encoding.paragraph_encoder import SentenceEncoderSingleContext, SentenceEncoderIterativeModel
from hotpot.tfidf_retriever.doc_db import DocDB
from hotpot.tokenizers import CoreNLPTokenizer
from hotpot.utils import ResourceLoader
PROCESS_TOK = None
PROCESS_DB = None
def init():
global PROCESS_TOK, PROCESS_DB
PROCESS_TOK = CoreNLPTokenizer()
Finalize(PROCESS_TOK, PROCESS_TOK.shutdown, exitpriority=100)
PROCESS_DB = DocDB()
Finalize(PROCESS_DB, PROCESS_DB.close, exitpriority=100)
def fetch_sentences(doc_title):
global PROCESS_DB
return PROCESS_DB.get_doc_sentences(doc_title)
def tokenize(text):
global PROCESS_TOK
return PROCESS_TOK.tokenize(text)
def tokenize_document(doc: Tuple[str, List[str]]) -> Dict[str, List[List[str]]]:
return {doc[0]: [tokenize(x).words() for x in doc[1]]}
def tokenize_from_db(title: str) -> Dict[str, List[List[str]]]:
return {title: [tokenize(' '.join(fetch_sentences(title))).words()]}
# class DocumentsEncodingSaver(object):
# def __init__(self, encodings_path: str):
# self.encodings_path = encodings_path
# self.encodings = None
# self.title2idx2par_name = None
#
# def _load_encodings(self):
# self.encodings = np.load(self.encodings_path)
#
# def get_document_encodings(self, title: str):
# if self.encodings is None:
# self._load_encodings()
# return self.encodings[title]
#
# def build_document_encodings_from_paragraphs(self, par_name2enc: Dict[str, np.ndarray]):
# par_names = list(par_name2enc.keys())
# title2par_names = {title: list(par_names)
# for title, par_names in
# itertools.groupby(sorted(par_names, key=par_name_to_title), key=par_name_to_title)}
# title2encs = {}
# self.title2idx2par_name = {}
# for title, p_names in tqdm(title2par_names.items()):
# par2ids = {}
# reps = []
# total_sentences = 0
# for p_name in p_names:
# rep = par_name2enc[p_name]
# par2ids[p_name] = list(range(total_sentences, total_sentences + len(rep)))
# reps.append(rep)
# total_sentences += len(rep)
# id2par = {i: p for p, ids in par2ids.items() for i in ids}
# reps = np.concatenate(reps, axis=0)
# title2encs[title] = reps
# self.title2idx2par_name[title] = id2par
# np.savez_compressed(self.encodings_path, **title2encs)
class DocumentEncodingHandler(object):
def __init__(self, encodings_dir: str):
self.encodings_dir = os.path.abspath(encodings_dir)
self.titles2filenames = self._get_titles_to_filenames()
def _title_to_filename_json(self):
return join(self.encodings_dir, "title_to_filenames.json")
def _get_titles_to_filenames(self):
titles2files = {}
if not os.path.exists(self._title_to_filename_json()):
with open(self._title_to_filename_json(), 'w') as f:
pass
return {}
with open(self._title_to_filename_json(), 'r') as f:
for line in f:
titles2files.update(json.loads(line))
return titles2files
def _title_to_npy(self, title: str):
return join(self.encodings_dir, f"{self.titles2filenames[title]}.npy")
def _title_to_idx2parname(self, title: str):
return join(self.encodings_dir, f"{self.titles2filenames[title]}_idx2pname.pkl")
def get_document_encodings(self, title: str) -> np.ndarray:
return np.load(self._title_to_npy(title))
def get_document_idx2pname(self, title: str) -> Dict[int, str]:
with open(self._title_to_idx2parname(title), 'rb') as f:
return pickle.load(f)
def save_document_encoding(self, par_name2enc: Dict[str, np.ndarray], overwrite=False):
title = par_name_to_title(next(iter(par_name2enc)))
if title in self.titles2filenames and not overwrite:
raise ValueError(f"Overwrite enabled, {title} encodings already exist")
par2ids = {}
reps = []
total_sentences = 0
for p_name in par_name2enc:
if par_name_to_title(p_name) != title:
raise ValueError("All paragraphs must belong to the same title")
rep = par_name2enc[p_name]
par2ids[p_name] = list(range(total_sentences, total_sentences + len(rep)))
reps.append(rep)
total_sentences += len(rep)
id2par = {i: p for p, ids in par2ids.items() for i in ids}
reps = np.concatenate(reps, axis=0)
if title not in self.titles2filenames:
self.titles2filenames[title] = str(len(self.titles2filenames))
with open(self._title_to_filename_json(), 'a') as f:
json.dump({title: self.titles2filenames[title]}, f)
f.write(os.linesep)
with open(self._title_to_idx2parname(title), 'wb') as f:
pickle.dump(id2par, f)
np.save(self._title_to_npy(title), reps)
def save_multiple_documents(self, par_name2enc: Dict[str, np.ndarray], overwrite=False):
par_names = list(par_name2enc.keys())
title2par_names = {title: list(par_names)
for title, par_names in
itertools.groupby(sorted(par_names, key=par_name_to_title), key=par_name_to_title)}
for title, p_names in tqdm(title2par_names.items()):
self.save_document_encoding({p_name: par_name2enc[p_name] for p_name in p_names}, overwrite=overwrite)
# def convert_single_file_to_current_format(self, old_saver: DocumentsEncodingSaver):
# for title in tqdm(old_saver.title2idx2par_name.keys()):
# encs = old_saver.get_document_encodings(title)
# idx2par_names = old_saver.title2idx2par_name[title]
# self.titles2filenames[title] = str(len(self.titles2filenames))
# with open(self._title_to_filename_json(), 'a') as f:
# json.dump({title: self.titles2filenames[title]}, f)
# f.write(os.linesep)
# with open(self._title_to_idx2parname(title), 'wb') as f:
# pickle.dump(idx2par_names, f)
# np.save(self._title_to_npy(title), encs)
def par_name_to_title(par_name):
return '_'.join(par_name.split('_')[:-1])
def encode_from_file(docs_file, questions_file, encodings_dir, encoder_model, num_workers, hotpot: bool,
long_batch: int, short_batch: int, use_chars: bool, use_ema: bool, checkpoint: str,
document_chunk_size=1000, samples=None, encode_all_db=False):
"""
:param out_file: .npz file to dump the encodings
:param docs_file: path to json file whose structure is [{title: list of paragraphs}, ...]
:return:
"""
doc_encs_handler = DocumentEncodingHandler(encodings_dir)
# Setup worker pool
workers = ProcessPool(
num_workers,
initializer=init,
initargs=[]
)
if docs_file is not None:
with open(docs_file, 'r') as f:
documents = json.load(f)
documents = {k: v for k, v in documents.items() if k not in doc_encs_handler.titles2filenames}
tokenized_documents = {}
tupled_doc_list = [(title, pars) for title, pars in documents.items()]
if samples is not None:
print(f"sampling {samples} samples")
tupled_doc_list = tupled_doc_list[:samples]
print("Tokenizing from file...")
with tqdm(total=len(tupled_doc_list), ncols=80) as pbar:
for tok_doc in tqdm(workers.imap_unordered(tokenize_document, tupled_doc_list)):
tokenized_documents.update(tok_doc)
pbar.update()
else:
if questions_file is not None:
with open(questions_file, 'r') as f:
questions = json.load(f)
all_titles = list(set([title for q in questions for title in q['top_titles']]))
else:
print("encoding all DB!")
all_titles = DocDB().get_doc_titles()
if samples is not None:
print(f"sampling {samples} samples")
all_titles = all_titles[:samples]
all_titles = [t for t in all_titles if t not in doc_encs_handler.titles2filenames]
tokenized_documents = {}
print("Tokenizing from DB...")
with tqdm(total=len(all_titles), ncols=80) as pbar:
for tok_doc in tqdm(workers.imap_unordered(tokenize_from_db, all_titles)):
tokenized_documents.update(tok_doc)
pbar.update()
workers.close()
workers.join()
voc = set()
for paragraphs in tokenized_documents.values():
for par in paragraphs:
voc.update(par)
if not hotpot:
spec = QuestionAndParagraphsSpec(batch_size=None, max_num_contexts=1,
max_num_question_words=None, max_num_context_words=None)
encoder = SentenceEncoderSingleContext(model_dir_path=encoder_model, vocabulary=voc, spec=spec,
loader=ResourceLoader(), use_char_inputs=use_chars,
use_ema=use_ema, checkpoint=checkpoint)
else:
spec = QuestionAndParagraphsSpec(batch_size=None, max_num_contexts=2,
max_num_question_words=None, max_num_context_words=None)
encoder = SentenceEncoderIterativeModel(model_dir_path=encoder_model, vocabulary=voc, spec=spec,
loader=ResourceLoader(), use_char_inputs=use_chars,
use_ema=use_ema, checkpoint=checkpoint)
tokenized_documents_items = list(tokenized_documents.items())
for tokenized_doc_chunk in tqdm([tokenized_documents_items[i:i + document_chunk_size]
for i in range(0, len(tokenized_documents_items), document_chunk_size)],
ncols=80):
flattened_pars_with_names = [(f"{title}_{i}", par)
for title, pars in tokenized_doc_chunk for i, par in enumerate(pars)]
# filtering out empty paragraphs (probably had some short string the tokenization removed)
# important to notice that the filtered paragraphs will have no representation,
# but they still exist in the numbering of paragraphs for consistency with the docs.
flattened_pars_with_names = [(name, par) for name, par in flattened_pars_with_names if len(par) > 0]
# sort such that longer paragraphs are first to identify OOMs early on
flattened_pars_with_names = sorted(flattened_pars_with_names, key=lambda x: len(x[1]), reverse=True)
long_paragraphs_ids = [i for i, name_par in enumerate(flattened_pars_with_names) if len(name_par[1]) >= 900]
short_paragraphs_ids = [i for i, name_par in enumerate(flattened_pars_with_names) if len(name_par[1]) < 900]
# print(f"Encoding {len(flattened_pars_with_names)} paragraphs...")
name2enc = {}
dummy_question = "Hello Hello".split()
if not hotpot:
model_paragraphs = [BinaryQuestionAndParagraphs(question=dummy_question,
paragraphs=[x], label=1,
num_distractors=0, question_id='dummy')
for _, x in flattened_pars_with_names]
else:
# todo allow precomputed sentence segments
model_paragraphs = [IterativeQuestionAndParagraphs(question=dummy_question,
paragraphs=[x, dummy_question],
first_label=1, second_label=1,
question_id='dummy', sentence_segments=None)
for _, x in flattened_pars_with_names]
# print("Encoding long paragraphs...")
long_pars = [model_paragraphs[i] for i in long_paragraphs_ids]
name2enc.update({flattened_pars_with_names[long_paragraphs_ids[i]][0]: enc
for i, enc in
enumerate(encoder.encode_paragraphs(long_pars, batch_size=long_batch, show_progress=True)
if not hotpot
else encoder.encode_first_paragraphs(long_pars, batch_size=long_batch,
show_progress=True))})
# print("Encoding short paragraphs...")
short_pars = [model_paragraphs[i] for i in short_paragraphs_ids]
name2enc.update({flattened_pars_with_names[short_paragraphs_ids[i]][0]: enc
for i, enc in enumerate(encoder.encode_paragraphs(short_pars, batch_size=short_batch,
show_progress=True)
if not hotpot
else encoder.encode_first_paragraphs(short_pars,
batch_size=short_batch,
show_progress=True)
)})
doc_encs_handler.save_multiple_documents(name2enc)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Encode a dataset')
parser.add_argument('encodings_dir', help="directory to dump the encodings")
parser.add_argument('encoder_model', help="model to encode with")
parser.add_argument('--docs_file', default=None, help="a document json filename from which to load the top-k dataset")
parser.add_argument('--questions_file', default=None,
help="a questions json filename from which to load the top-k dataset."
" For hotpot, loads docs from DB")
parser.add_argument('--encode-all-db', action='store_true')
parser.add_argument('--checkpoint', type=str, default='best', choices=['best', 'latest'])
parser.add_argument('--ema', action='store_true')
parser.add_argument('--num-workers', type=int, default=16)
parser.add_argument('--hotpot', action='store_true')
parser.add_argument('--long-batch', type=int, default=8)
parser.add_argument('--short-batch', type=int, default=128)
parser.add_argument('--use-chars', action='store_true')
parser.add_argument('--doc-chunk', type=int, default=1000)
parser.add_argument('--samples', type=int, default=None)
args = parser.parse_args()
if (args.docs_file and args.questions_file) or (not args.docs_file and not args.questions_file):
if not args.encode_all_db or (args.encode_all_db and (args.docs_file or args.questions_file)):
raise ValueError("please, questions file or docs file")
if not args.hotpot and not args.docs_file:
raise ValueError("only hotpot supports retrieving from db")
encode_from_file(args.docs_file, args.questions_file, args.encodings_dir,
args.encoder_model, args.num_workers, hotpot=args.hotpot,
long_batch=args.long_batch, short_batch=args.short_batch, use_chars=args.use_chars,
document_chunk_size=args.doc_chunk, use_ema=args.ema, checkpoint=args.checkpoint,
samples=args.samples, encode_all_db=args.encode_all_db)
|
[
"json.dump",
"os.path.abspath",
"multiprocessing.util.Finalize",
"pickle.dump",
"argparse.ArgumentParser",
"json.load",
"hotpot.data_handling.dataset.QuestionAndParagraphsSpec",
"json.loads",
"hotpot.tokenizers.CoreNLPTokenizer",
"pickle.load",
"hotpot.utils.ResourceLoader",
"hotpot.data_handling.relevance_training_data.BinaryQuestionAndParagraphs",
"hotpot.data_handling.relevance_training_data.IterativeQuestionAndParagraphs",
"multiprocessing.Pool",
"hotpot.tfidf_retriever.doc_db.DocDB",
"os.path.join",
"numpy.concatenate"
] |
[((800, 818), 'hotpot.tokenizers.CoreNLPTokenizer', 'CoreNLPTokenizer', ([], {}), '()\n', (816, 818), False, 'from hotpot.tokenizers import CoreNLPTokenizer\n'), ((823, 884), 'multiprocessing.util.Finalize', 'Finalize', (['PROCESS_TOK', 'PROCESS_TOK.shutdown'], {'exitpriority': '(100)'}), '(PROCESS_TOK, PROCESS_TOK.shutdown, exitpriority=100)\n', (831, 884), False, 'from multiprocessing.util import Finalize\n'), ((902, 909), 'hotpot.tfidf_retriever.doc_db.DocDB', 'DocDB', ([], {}), '()\n', (907, 909), False, 'from hotpot.tfidf_retriever.doc_db import DocDB\n'), ((914, 970), 'multiprocessing.util.Finalize', 'Finalize', (['PROCESS_DB', 'PROCESS_DB.close'], {'exitpriority': '(100)'}), '(PROCESS_DB, PROCESS_DB.close, exitpriority=100)\n', (922, 970), False, 'from multiprocessing.util import Finalize\n'), ((7459, 7514), 'multiprocessing.Pool', 'ProcessPool', (['num_workers'], {'initializer': 'init', 'initargs': '[]'}), '(num_workers, initializer=init, initargs=[])\n', (7470, 7514), True, 'from multiprocessing import Pool as ProcessPool\n'), ((14155, 14210), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Encode a dataset"""'}), "(description='Encode a dataset')\n", (14178, 14210), False, 'import argparse\n'), ((3136, 3166), 'os.path.abspath', 'os.path.abspath', (['encodings_dir'], {}), '(encodings_dir)\n', (3151, 3166), False, 'import os\n'), ((3286, 3337), 'os.path.join', 'join', (['self.encodings_dir', '"""title_to_filenames.json"""'], {}), "(self.encodings_dir, 'title_to_filenames.json')\n", (3290, 3337), False, 'from os.path import join\n'), ((3803, 3866), 'os.path.join', 'join', (['self.encodings_dir', 'f"""{self.titles2filenames[title]}.npy"""'], {}), "(self.encodings_dir, f'{self.titles2filenames[title]}.npy')\n", (3807, 3866), False, 'from os.path import join\n'), ((3932, 4005), 'os.path.join', 'join', (['self.encodings_dir', 'f"""{self.titles2filenames[title]}_idx2pname.pkl"""'], {}), "(self.encodings_dir, f'{self.titles2filenames[title]}_idx2pname.pkl')\n", (3936, 4005), False, 'from os.path import join\n'), ((5099, 5127), 'numpy.concatenate', 'np.concatenate', (['reps'], {'axis': '(0)'}), '(reps, axis=0)\n', (5113, 5127), True, 'import numpy as np\n'), ((9347, 9470), 'hotpot.data_handling.dataset.QuestionAndParagraphsSpec', 'QuestionAndParagraphsSpec', ([], {'batch_size': 'None', 'max_num_contexts': '(1)', 'max_num_question_words': 'None', 'max_num_context_words': 'None'}), '(batch_size=None, max_num_contexts=1,\n max_num_question_words=None, max_num_context_words=None)\n', (9372, 9470), False, 'from hotpot.data_handling.dataset import QuestionAndParagraphsSpec\n'), ((9823, 9946), 'hotpot.data_handling.dataset.QuestionAndParagraphsSpec', 'QuestionAndParagraphsSpec', ([], {'batch_size': 'None', 'max_num_contexts': '(2)', 'max_num_question_words': 'None', 'max_num_context_words': 'None'}), '(batch_size=None, max_num_contexts=2,\n max_num_question_words=None, max_num_context_words=None)\n', (9848, 9946), False, 'from hotpot.data_handling.dataset import QuestionAndParagraphsSpec\n'), ((4274, 4288), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4285, 4288), False, 'import pickle\n'), ((5496, 5518), 'pickle.dump', 'pickle.dump', (['id2par', 'f'], {}), '(id2par, f)\n', (5507, 5518), False, 'import pickle\n'), ((7640, 7652), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7649, 7652), False, 'import json\n'), ((5331, 5382), 'json.dump', 'json.dump', (['{title: self.titles2filenames[title]}', 'f'], {}), '({title: self.titles2filenames[title]}, f)\n', (5340, 5382), False, 'import json\n'), ((8415, 8427), 'json.load', 'json.load', (['f'], {}), '(f)\n', (8424, 8427), False, 'import json\n'), ((9666, 9682), 'hotpot.utils.ResourceLoader', 'ResourceLoader', ([], {}), '()\n', (9680, 9682), False, 'from hotpot.utils import ResourceLoader\n'), ((10144, 10160), 'hotpot.utils.ResourceLoader', 'ResourceLoader', ([], {}), '()\n', (10158, 10160), False, 'from hotpot.utils import ResourceLoader\n'), ((11771, 11893), 'hotpot.data_handling.relevance_training_data.BinaryQuestionAndParagraphs', 'BinaryQuestionAndParagraphs', ([], {'question': 'dummy_question', 'paragraphs': '[x]', 'label': '(1)', 'num_distractors': '(0)', 'question_id': '"""dummy"""'}), "(question=dummy_question, paragraphs=[x], label=\n 1, num_distractors=0, question_id='dummy')\n", (11798, 11893), False, 'from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, IterativeQuestionAndParagraphs\n'), ((12181, 12352), 'hotpot.data_handling.relevance_training_data.IterativeQuestionAndParagraphs', 'IterativeQuestionAndParagraphs', ([], {'question': 'dummy_question', 'paragraphs': '[x, dummy_question]', 'first_label': '(1)', 'second_label': '(1)', 'question_id': '"""dummy"""', 'sentence_segments': 'None'}), "(question=dummy_question, paragraphs=[x,\n dummy_question], first_label=1, second_label=1, question_id='dummy',\n sentence_segments=None)\n", (12211, 12352), False, 'from hotpot.data_handling.relevance_training_data import BinaryQuestionAndParagraphs, IterativeQuestionAndParagraphs\n'), ((3700, 3716), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (3710, 3716), False, 'import json\n'), ((8597, 8604), 'hotpot.tfidf_retriever.doc_db.DocDB', 'DocDB', ([], {}), '()\n', (8602, 8604), False, 'from hotpot.tfidf_retriever.doc_db import DocDB\n')]
|
"""Prepare the ImageNet dataset"""
import os
import argparse
import tarfile
import pickle
import gzip
import subprocess
from tqdm import tqdm
import subprocess
from encoding.utils import check_sha1, download, mkdir
_TARGET_DIR = os.path.expanduser('~/.encoding/data/ILSVRC2012')
_TRAIN_TAR = 'ILSVRC2012_img_train.tar'
_TRAIN_TAR_SHA1 = '43eda4fe35c1705d6606a6a7a633bc965d194284'
_VAL_TAR = 'ILSVRC2012_img_val.tar'
_VAL_TAR_SHA1 = '5f3f73da3395154b60528b2b2a2caf2374f5f178'
def parse_args():
parser = argparse.ArgumentParser(
description='Setup the ImageNet dataset.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--download-dir', required=True,
help="The directory that contains downloaded tar files")
parser.add_argument('--target-dir', default=_TARGET_DIR,
help="The directory to store extracted images")
parser.add_argument('--checksum', action='store_true',
help="If check integrity before extracting.")
parser.add_argument('--with-rec', action='store_true',
help="If build image record files.")
parser.add_argument('--num-thread', type=int, default=1,
help="Number of threads to use when building image record file.")
args = parser.parse_args()
return args
def check_file(filename, checksum, sha1):
if not os.path.exists(filename):
raise ValueError('File not found: '+filename)
if checksum and not check_sha1(filename, sha1):
raise ValueError('Corrupted file: '+filename)
def extract_train(tar_fname, target_dir, with_rec=False, num_thread=1):
mkdir(target_dir)
with tarfile.open(tar_fname) as tar:
print("Extracting "+tar_fname+"...")
# extract each class one-by-one
pbar = tqdm(total=len(tar.getnames()))
for class_tar in tar:
pbar.set_description('Extract '+class_tar.name)
tar.extract(class_tar, target_dir)
class_fname = os.path.join(target_dir, class_tar.name)
class_dir = os.path.splitext(class_fname)[0]
os.mkdir(class_dir)
with tarfile.open(class_fname) as f:
f.extractall(class_dir)
os.remove(class_fname)
pbar.update(1)
pbar.close()
def extract_val(tar_fname, target_dir, with_rec=False, num_thread=1):
mkdir(target_dir)
print('Extracting ' + tar_fname)
with tarfile.open(tar_fname) as tar:
tar.extractall(target_dir)
# build rec file before images are moved into subfolders
# move images to proper subfolders
subprocess.call(["wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash"],
cwd=target_dir, shell=True)
def main():
args = parse_args()
target_dir = os.path.expanduser(args.target_dir)
#if os.path.exists(target_dir):
# raise ValueError('Target dir ['+target_dir+'] exists. Remove it first')
download_dir = os.path.expanduser(args.download_dir)
train_tar_fname = os.path.join(download_dir, _TRAIN_TAR)
check_file(train_tar_fname, args.checksum, _TRAIN_TAR_SHA1)
val_tar_fname = os.path.join(download_dir, _VAL_TAR)
check_file(val_tar_fname, args.checksum, _VAL_TAR_SHA1)
build_rec = args.with_rec
if build_rec:
os.makedirs(os.path.join(target_dir, 'rec'))
extract_train(train_tar_fname, os.path.join(target_dir, 'train'), build_rec, args.num_thread)
extract_val(val_tar_fname, os.path.join(target_dir, 'val'), build_rec, args.num_thread)
if __name__ == '__main__':
main()
|
[
"os.path.expanduser",
"os.mkdir",
"os.remove",
"argparse.ArgumentParser",
"os.path.exists",
"encoding.utils.check_sha1",
"encoding.utils.mkdir",
"subprocess.call",
"os.path.splitext",
"tarfile.open",
"os.path.join"
] |
[((230, 279), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.encoding/data/ILSVRC2012"""'], {}), "('~/.encoding/data/ILSVRC2012')\n", (248, 279), False, 'import os\n'), ((508, 634), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Setup the ImageNet dataset."""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Setup the ImageNet dataset.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (531, 634), False, 'import argparse\n'), ((1683, 1700), 'encoding.utils.mkdir', 'mkdir', (['target_dir'], {}), '(target_dir)\n', (1688, 1700), False, 'from encoding.utils import check_sha1, download, mkdir\n'), ((2414, 2431), 'encoding.utils.mkdir', 'mkdir', (['target_dir'], {}), '(target_dir)\n', (2419, 2431), False, 'from encoding.utils import check_sha1, download, mkdir\n'), ((2649, 2805), 'subprocess.call', 'subprocess.call', (["['wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash'\n ]"], {'cwd': 'target_dir', 'shell': '(True)'}), "([\n 'wget -qO- https://raw.githubusercontent.com/soumith/imagenetloader.torch/master/valprep.sh | bash'\n ], cwd=target_dir, shell=True)\n", (2664, 2805), False, 'import subprocess\n'), ((2876, 2911), 'os.path.expanduser', 'os.path.expanduser', (['args.target_dir'], {}), '(args.target_dir)\n', (2894, 2911), False, 'import os\n'), ((3049, 3086), 'os.path.expanduser', 'os.path.expanduser', (['args.download_dir'], {}), '(args.download_dir)\n', (3067, 3086), False, 'import os\n'), ((3109, 3147), 'os.path.join', 'os.path.join', (['download_dir', '_TRAIN_TAR'], {}), '(download_dir, _TRAIN_TAR)\n', (3121, 3147), False, 'import os\n'), ((3232, 3268), 'os.path.join', 'os.path.join', (['download_dir', '_VAL_TAR'], {}), '(download_dir, _VAL_TAR)\n', (3244, 3268), False, 'import os\n'), ((1420, 1444), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1434, 1444), False, 'import os\n'), ((1710, 1733), 'tarfile.open', 'tarfile.open', (['tar_fname'], {}), '(tar_fname)\n', (1722, 1733), False, 'import tarfile\n'), ((2478, 2501), 'tarfile.open', 'tarfile.open', (['tar_fname'], {}), '(tar_fname)\n', (2490, 2501), False, 'import tarfile\n'), ((3466, 3499), 'os.path.join', 'os.path.join', (['target_dir', '"""train"""'], {}), "(target_dir, 'train')\n", (3478, 3499), False, 'import os\n'), ((3560, 3591), 'os.path.join', 'os.path.join', (['target_dir', '"""val"""'], {}), "(target_dir, 'val')\n", (3572, 3591), False, 'import os\n'), ((1524, 1550), 'encoding.utils.check_sha1', 'check_sha1', (['filename', 'sha1'], {}), '(filename, sha1)\n', (1534, 1550), False, 'from encoding.utils import check_sha1, download, mkdir\n'), ((2037, 2077), 'os.path.join', 'os.path.join', (['target_dir', 'class_tar.name'], {}), '(target_dir, class_tar.name)\n', (2049, 2077), False, 'import os\n'), ((2147, 2166), 'os.mkdir', 'os.mkdir', (['class_dir'], {}), '(class_dir)\n', (2155, 2166), False, 'import os\n'), ((2268, 2290), 'os.remove', 'os.remove', (['class_fname'], {}), '(class_fname)\n', (2277, 2290), False, 'import os\n'), ((3398, 3429), 'os.path.join', 'os.path.join', (['target_dir', '"""rec"""'], {}), "(target_dir, 'rec')\n", (3410, 3429), False, 'import os\n'), ((2102, 2131), 'os.path.splitext', 'os.path.splitext', (['class_fname'], {}), '(class_fname)\n', (2118, 2131), False, 'import os\n'), ((2184, 2209), 'tarfile.open', 'tarfile.open', (['class_fname'], {}), '(class_fname)\n', (2196, 2209), False, 'import tarfile\n')]
|
from setuptools import setup, find_packages
from cana import __package__, __title__, __description__, __version__
def readme():
with open('README.md') as f:
return f.read()
setup(
name=__package__,
version=__version__,
description=__description__,
long_description=__description__,
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Information Analysis',
],
keywords="boolean networks canalization redundancy dynamical systems computational biology",
url="http://github.com/rionbr/CANA",
author="<NAME> & <NAME>",
author_email="<EMAIL>",
license="MIT",
packages=find_packages(),
package_data={
'datasets': [
'cana.datasets/*.txt',
'cana.datasets/bns/*.cnet',
'cana.datasets/cell_collective/*.txt'
],
},
install_requires=[
'numpy',
'scipy',
'networkx',
'pandas'
],
include_package_data=True,
zip_safe=False,
)
|
[
"setuptools.find_packages"
] |
[((748, 763), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (761, 763), False, 'from setuptools import setup, find_packages\n')]
|
# -*- coding: utf-8 -*-
"""load_map contains several shortcut functions to quickly load maps.
Custom map-loading routines will probably be desired, but load_map
can be useful for testing new heuristics, pathfinding algorithms, etc."""
import nodes
import algorithms
import metrics
START = '0'
BLANK = ' '
WALL = '#'
TARGET = '@'
def read_tiles(f):
"""Read file `f` and yield (position, char) tuples"""
# currently outputs in cartesian coordinates-- bad idea?
lines = f.read().splitlines()
for y, line in enumerate(reversed(lines)):
for x, char in enumerate(line):
yield (x, y), char
def file_to_tile(f,
start=START, blank=BLANK, wall=WALL, target=TARGET,
heuristic=metrics.manhattan):
"""Take an input file `f` and return RectNodes start_node, target_node
(if a target is not found, start_node, None will be returned instead """
walkable = {}
start_pos = target_pos = None
for pos, char in read_tiles(f):
if char == wall:
walkable[pos] = False
elif char == target:
target_pos = pos
elif char == start:
start_pos = pos
elif char == blank:
pass
else:
raise ValueError("Unknown tile type: '%s'" % char)
if start_pos is None:
raise ValueError("No starting position in map")
start_node = nodes.RectNode(start_pos,
walkable=walkable, heuristic=heuristic)
if target_pos is None:
# target position is optional
target_node = None
else:
target_node = nodes.RectNode(target_pos,
walkable=walkable, heuristic=heuristic)
return start_node, target_node
|
[
"nodes.RectNode"
] |
[((1377, 1442), 'nodes.RectNode', 'nodes.RectNode', (['start_pos'], {'walkable': 'walkable', 'heuristic': 'heuristic'}), '(start_pos, walkable=walkable, heuristic=heuristic)\n', (1391, 1442), False, 'import nodes\n'), ((1576, 1642), 'nodes.RectNode', 'nodes.RectNode', (['target_pos'], {'walkable': 'walkable', 'heuristic': 'heuristic'}), '(target_pos, walkable=walkable, heuristic=heuristic)\n', (1590, 1642), False, 'import nodes\n')]
|
import cv2
import uuid
import os
COLORS = {
'thief': (255, 0, 0),
'policeman1': (0, 255, 0),
'policeman2': (0, 0, 255)
}
FONT = cv2.FONT_HERSHEY_SIMPLEX
FONT_SCALE = 1
LINE_TYPE = 2
class Camera:
@staticmethod
def get_fake_gaming_board():
frame = cv2.imread('../resources/gaming_board.jpg')
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
return frame
def __init__(self, camera_id=0, draw=True, save=True, save_path='../data/pics', num_skip=10, window_name='main'):
self.draw = draw
self.save = save
self.save_path = save_path
self.cap = cv2.VideoCapture(camera_id)
self.num_skip = num_skip
self.window_name = window_name
if self.draw:
cv2.namedWindow(window_name)
def __del__(self):
self.cap.release()
if self.draw:
cv2.destroyWindow(self.window_name)
def _skip_frames(self):
for i in range(self.num_skip):
self.cap.read()
def get_image(self):
self._skip_frames()
flag, frame = self.cap.read()
if flag:
if self.save:
cv2.imwrite(os.path.join(self.save_path, '{}.jpg'.format(uuid.uuid1())), frame)
if self.draw:
self.display(frame)
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
else:
image = None
return image
@staticmethod
def draw_boxes(image, object_list):
if len(object_list) > 0:
for key, value in object_list.items():
height, width = image.shape[0], image.shape[1]
x = int(value['center'][0] * width)
y = int(value['center'][1] * height)
size_width = value['size'][0] * width
size_height = value['size'][1] * height
x1 = int(x - size_width / 2)
y1 = int(y - size_height / 2)
x2 = int(x + size_width / 2)
y2 = int(y + size_height / 2)
color = COLORS.get(key, (255, 255, 255))
cv2.rectangle(image, (x1, y1), (x2, y2), color, 2)
cv2.putText(image, key,
(x1 - 10, y1 - 10),
FONT,
FONT_SCALE,
color,
LINE_TYPE)
return image
def display(self, image):
cv2.imshow(self.window_name, image)
cv2.waitKey(1)
@staticmethod
def bgr_to_rgb(frame):
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return image
@staticmethod
def rgb_to_bgr(frame):
image = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
return image
if __name__ == '__main__':
camera = Camera(0, save=False, num_skip=0)
while True:
image = camera.get_image()
image = camera.rgb_to_bgr(image)
camera.display(image)
|
[
"cv2.putText",
"cv2.cvtColor",
"cv2.waitKey",
"cv2.VideoCapture",
"cv2.imread",
"uuid.uuid1",
"cv2.destroyWindow",
"cv2.rectangle",
"cv2.imshow",
"cv2.namedWindow"
] |
[((278, 321), 'cv2.imread', 'cv2.imread', (['"""../resources/gaming_board.jpg"""'], {}), "('../resources/gaming_board.jpg')\n", (288, 321), False, 'import cv2\n'), ((338, 376), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2BGR'], {}), '(frame, cv2.COLOR_RGB2BGR)\n', (350, 376), False, 'import cv2\n'), ((621, 648), 'cv2.VideoCapture', 'cv2.VideoCapture', (['camera_id'], {}), '(camera_id)\n', (637, 648), False, 'import cv2\n'), ((2436, 2471), 'cv2.imshow', 'cv2.imshow', (['self.window_name', 'image'], {}), '(self.window_name, image)\n', (2446, 2471), False, 'import cv2\n'), ((2480, 2494), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2491, 2494), False, 'import cv2\n'), ((2557, 2595), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (2569, 2595), False, 'import cv2\n'), ((2679, 2717), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_RGB2BGR'], {}), '(frame, cv2.COLOR_RGB2BGR)\n', (2691, 2717), False, 'import cv2\n'), ((755, 783), 'cv2.namedWindow', 'cv2.namedWindow', (['window_name'], {}), '(window_name)\n', (770, 783), False, 'import cv2\n'), ((869, 904), 'cv2.destroyWindow', 'cv2.destroyWindow', (['self.window_name'], {}), '(self.window_name)\n', (886, 904), False, 'import cv2\n'), ((1314, 1352), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2RGB'], {}), '(frame, cv2.COLOR_BGR2RGB)\n', (1326, 1352), False, 'import cv2\n'), ((2089, 2139), 'cv2.rectangle', 'cv2.rectangle', (['image', '(x1, y1)', '(x2, y2)', 'color', '(2)'], {}), '(image, (x1, y1), (x2, y2), color, 2)\n', (2102, 2139), False, 'import cv2\n'), ((2156, 2235), 'cv2.putText', 'cv2.putText', (['image', 'key', '(x1 - 10, y1 - 10)', 'FONT', 'FONT_SCALE', 'color', 'LINE_TYPE'], {}), '(image, key, (x1 - 10, y1 - 10), FONT, FONT_SCALE, color, LINE_TYPE)\n', (2167, 2235), False, 'import cv2\n'), ((1209, 1221), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (1219, 1221), False, 'import uuid\n')]
|
import pytest
from numpy import allclose, array, asarray, add, ndarray, generic
from lightning import series, image
pytestmark = pytest.mark.usefixtures("eng")
def test_first(eng):
data = series.fromlist([array([1, 2, 3]), array([4, 5, 6])], engine=eng)
assert allclose(data.first(), [1, 2, 3])
data = image.fromlist([array([[1, 2], [3, 4]]), array([[5, 6], [7, 8]])], engine=eng)
assert allclose(data.first(), [[1, 2], [3, 4]])
def test_asarray(eng):
data = series.fromlist([array([1, 2, 3]), array([4, 5, 6])], engine=eng)
converted = asarray(data)
assert allclose(converted, [[1, 2, 3], [4, 5, 6]])
def test_casting(eng):
data = series.fromlist([array([1, 2, 3], 'int16')], engine=eng)
assert data.astype('int64').toarray().dtype == 'int64'
assert data.astype('float32').toarray().dtype == 'float32'
assert data.astype('float64').toarray().dtype == 'float64'
assert data.astype('float16', casting='unsafe').toarray().dtype == 'float16'
def test_slicing(eng):
data = series.fromlist([array([1, 2, 3]), array([4, 5, 6])], engine=eng)
assert data.toarray().shape == (2, 3)
assert data[:, :].shape == (2, 3)
assert data[:, :].toarray().shape == (2, 3)
assert data[0, :].shape == (1, 3)
assert data[0, :].toarray().shape == (3,)
def test_toarray(eng):
original = [array([1, 2, 3]), array([4, 5, 6])]
data = series.fromlist(original, engine=eng)
assert allclose(data.toarray(), original)
original = [array([[1, 2], [3, 4]]), array([[5, 6], [7, 8]])]
data = image.fromlist(original, engine=eng)
assert allclose(data.toarray(), original)
def test_elementwise(eng):
mat1raw = asarray([[1, 2, 3], [4, 5, 6]])
mat2raw = asarray([[7, 8, 9], [10, 11, 12]])
mat1 = series.fromlist(mat1raw, engine=eng)
mat2 = series.fromlist(mat2raw, engine=eng)
result = mat1.element_wise(mat2, add)
truth = mat1raw + mat2raw
assert allclose(result.toarray(), truth)
assert allclose(result.index, range(3))
def test_elementwise_scalar(eng):
matraw = asarray([[1, 2, 3], [4, 5, 6]])
mat = series.fromlist(matraw, engine=eng)
result = mat.element_wise(2, add)
truth = matraw + 2
assert allclose(result.toarray(), truth)
assert allclose(result.index, range(3))
def test_elementwise_plus(eng):
mat1raw = asarray([[1, 2, 3], [4, 5, 6]])
mat2raw = asarray([[7, 8, 9], [10, 11, 12]])
mat1 = series.fromlist(mat1raw, engine=eng)
mat2 = series.fromlist(mat2raw, engine=eng)
result = mat1.plus(mat2)
truth = mat1raw + mat2raw
assert allclose(result.toarray(), truth)
assert allclose(result.index, range(3))
def test_reduce(eng):
data = series.fromlist([array([1, 2, 3]), array([4, 5, 6])], engine=eng)
reduced = data.reduce(lambda x, y: x + y)
assert allclose(reduced.shape, [1, 3])
assert allclose(reduced.toarray(), [5, 7, 9])
def test_map(eng):
data = series.fromlist([array([1, 2, 3]), array([4, 5, 6])], engine=eng)
mapped = data.map(lambda x: x.sum())
assert allclose(mapped.shape, [2, 1])
assert allclose(mapped.toarray(), [6, 15])
mapped = data.map(lambda x: x + 1)
assert allclose(mapped.shape, [2, 3])
assert allclose(mapped.toarray(), [[2, 3, 4], [5, 6, 7]])
def test_map_with_keys(eng):
data = series.fromlist([array([1, 2, 3]), array([4, 5, 6])], engine=eng)
mapped = data.map(lambda kv: kv[0] + kv[1], with_keys=True)
assert allclose(mapped.shape, [2, 3])
assert allclose(mapped.toarray(), [[1, 2, 3], [5, 6, 7]])
data = image.fromlist([array([[1, 1], [1, 1]]), array([[2, 2], [2, 2]])], engine=eng)
mapped = data.map(lambda kv: kv[0] + kv[1], with_keys=True)
assert allclose(mapped.shape, [2, 2, 2])
assert allclose(mapped.toarray(), [[[1, 1], [1, 1]], [[3, 3], [3, 3]]])
def test_repartition(eng):
if eng is not None:
data = image.fromlist([array([1, 1]), array([2, 2]), array([3, 3]), array([4, 4]),
array([5, 5]), array([6, 6]), array([7, 7]), array([8, 8]),
array([9, 9]), array([10, 10]), array([11, 11]), array([12, 12])],
engine=eng, npartitions=10)
assert allclose(data.first(), array([1, 1]))
assert isinstance(data.first(), (ndarray, generic))
data = data.repartition(3)
assert allclose(data.first(), array([1, 1]))
data = series.fromlist([array([1, 1]), array([2, 2]), array([3, 3]), array([4, 4]),
array([5, 5]), array([6, 6]), array([7, 7]), array([8, 8]),
array([9, 9]), array([10, 10]), array([11, 11]), array([12, 12])],
engine=eng, npartitions=10)
assert allclose(data.first(), array([1, 1]))
data = data.repartition(3)
assert allclose(data.first(), array([1, 1]))
assert isinstance(data.first(), (ndarray, generic))
|
[
"numpy.asarray",
"numpy.allclose",
"numpy.array",
"lightning.image.fromlist",
"lightning.series.fromlist",
"pytest.mark.usefixtures"
] |
[((131, 161), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""eng"""'], {}), "('eng')\n", (154, 161), False, 'import pytest\n'), ((569, 582), 'numpy.asarray', 'asarray', (['data'], {}), '(data)\n', (576, 582), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((595, 638), 'numpy.allclose', 'allclose', (['converted', '[[1, 2, 3], [4, 5, 6]]'], {}), '(converted, [[1, 2, 3], [4, 5, 6]])\n', (603, 638), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1400, 1437), 'lightning.series.fromlist', 'series.fromlist', (['original'], {'engine': 'eng'}), '(original, engine=eng)\n', (1415, 1437), False, 'from lightning import series, image\n'), ((1561, 1597), 'lightning.image.fromlist', 'image.fromlist', (['original'], {'engine': 'eng'}), '(original, engine=eng)\n', (1575, 1597), False, 'from lightning import series, image\n'), ((1687, 1718), 'numpy.asarray', 'asarray', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (1694, 1718), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1733, 1767), 'numpy.asarray', 'asarray', (['[[7, 8, 9], [10, 11, 12]]'], {}), '([[7, 8, 9], [10, 11, 12]])\n', (1740, 1767), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1779, 1815), 'lightning.series.fromlist', 'series.fromlist', (['mat1raw'], {'engine': 'eng'}), '(mat1raw, engine=eng)\n', (1794, 1815), False, 'from lightning import series, image\n'), ((1827, 1863), 'lightning.series.fromlist', 'series.fromlist', (['mat2raw'], {'engine': 'eng'}), '(mat2raw, engine=eng)\n', (1842, 1863), False, 'from lightning import series, image\n'), ((2074, 2105), 'numpy.asarray', 'asarray', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (2081, 2105), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((2116, 2151), 'lightning.series.fromlist', 'series.fromlist', (['matraw'], {'engine': 'eng'}), '(matraw, engine=eng)\n', (2131, 2151), False, 'from lightning import series, image\n'), ((2350, 2381), 'numpy.asarray', 'asarray', (['[[1, 2, 3], [4, 5, 6]]'], {}), '([[1, 2, 3], [4, 5, 6]])\n', (2357, 2381), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((2396, 2430), 'numpy.asarray', 'asarray', (['[[7, 8, 9], [10, 11, 12]]'], {}), '([[7, 8, 9], [10, 11, 12]])\n', (2403, 2430), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((2442, 2478), 'lightning.series.fromlist', 'series.fromlist', (['mat1raw'], {'engine': 'eng'}), '(mat1raw, engine=eng)\n', (2457, 2478), False, 'from lightning import series, image\n'), ((2490, 2526), 'lightning.series.fromlist', 'series.fromlist', (['mat2raw'], {'engine': 'eng'}), '(mat2raw, engine=eng)\n', (2505, 2526), False, 'from lightning import series, image\n'), ((2833, 2864), 'numpy.allclose', 'allclose', (['reduced.shape', '[1, 3]'], {}), '(reduced.shape, [1, 3])\n', (2841, 2864), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3065, 3095), 'numpy.allclose', 'allclose', (['mapped.shape', '[2, 1]'], {}), '(mapped.shape, [2, 1])\n', (3073, 3095), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3193, 3223), 'numpy.allclose', 'allclose', (['mapped.shape', '[2, 3]'], {}), '(mapped.shape, [2, 3])\n', (3201, 3223), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3469, 3499), 'numpy.allclose', 'allclose', (['mapped.shape', '[2, 3]'], {}), '(mapped.shape, [2, 3])\n', (3477, 3499), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3727, 3760), 'numpy.allclose', 'allclose', (['mapped.shape', '[2, 2, 2]'], {}), '(mapped.shape, [2, 2, 2])\n', (3735, 3760), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1353, 1369), 'numpy.array', 'array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1358, 1369), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1371, 1387), 'numpy.array', 'array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (1376, 1387), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1500, 1523), 'numpy.array', 'array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (1505, 1523), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1525, 1548), 'numpy.array', 'array', (['[[5, 6], [7, 8]]'], {}), '([[5, 6], [7, 8]])\n', (1530, 1548), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((213, 229), 'numpy.array', 'array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (218, 229), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((231, 247), 'numpy.array', 'array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (236, 247), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((334, 357), 'numpy.array', 'array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (339, 357), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((359, 382), 'numpy.array', 'array', (['[[5, 6], [7, 8]]'], {}), '([[5, 6], [7, 8]])\n', (364, 382), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((503, 519), 'numpy.array', 'array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (508, 519), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((521, 537), 'numpy.array', 'array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (526, 537), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((692, 717), 'numpy.array', 'array', (['[1, 2, 3]', '"""int16"""'], {}), "([1, 2, 3], 'int16')\n", (697, 717), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1051, 1067), 'numpy.array', 'array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1056, 1067), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((1069, 1085), 'numpy.array', 'array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (1074, 1085), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((2727, 2743), 'numpy.array', 'array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2732, 2743), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((2745, 2761), 'numpy.array', 'array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (2750, 2761), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((2964, 2980), 'numpy.array', 'array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (2969, 2980), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((2982, 2998), 'numpy.array', 'array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (2987, 2998), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3345, 3361), 'numpy.array', 'array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (3350, 3361), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3363, 3379), 'numpy.array', 'array', (['[4, 5, 6]'], {}), '([4, 5, 6])\n', (3368, 3379), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3589, 3612), 'numpy.array', 'array', (['[[1, 1], [1, 1]]'], {}), '([[1, 1], [1, 1]])\n', (3594, 3612), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3614, 3637), 'numpy.array', 'array', (['[[2, 2], [2, 2]]'], {}), '([[2, 2], [2, 2]])\n', (3619, 3637), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4269, 4282), 'numpy.array', 'array', (['[1, 1]'], {}), '([1, 1])\n', (4274, 4282), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4417, 4430), 'numpy.array', 'array', (['[1, 1]'], {}), '([1, 1])\n', (4422, 4430), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4813, 4826), 'numpy.array', 'array', (['[1, 1]'], {}), '([1, 1])\n', (4818, 4826), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4901, 4914), 'numpy.array', 'array', (['[1, 1]'], {}), '([1, 1])\n', (4906, 4914), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3921, 3934), 'numpy.array', 'array', (['[1, 1]'], {}), '([1, 1])\n', (3926, 3934), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3936, 3949), 'numpy.array', 'array', (['[2, 2]'], {}), '([2, 2])\n', (3941, 3949), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3951, 3964), 'numpy.array', 'array', (['[3, 3]'], {}), '([3, 3])\n', (3956, 3964), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((3966, 3979), 'numpy.array', 'array', (['[4, 4]'], {}), '([4, 4])\n', (3971, 3979), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4013, 4026), 'numpy.array', 'array', (['[5, 5]'], {}), '([5, 5])\n', (4018, 4026), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4028, 4041), 'numpy.array', 'array', (['[6, 6]'], {}), '([6, 6])\n', (4033, 4041), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4043, 4056), 'numpy.array', 'array', (['[7, 7]'], {}), '([7, 7])\n', (4048, 4056), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4058, 4071), 'numpy.array', 'array', (['[8, 8]'], {}), '([8, 8])\n', (4063, 4071), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4105, 4118), 'numpy.array', 'array', (['[9, 9]'], {}), '([9, 9])\n', (4110, 4118), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4120, 4135), 'numpy.array', 'array', (['[10, 10]'], {}), '([10, 10])\n', (4125, 4135), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4137, 4152), 'numpy.array', 'array', (['[11, 11]'], {}), '([11, 11])\n', (4142, 4152), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4154, 4169), 'numpy.array', 'array', (['[12, 12]'], {}), '([12, 12])\n', (4159, 4169), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4465, 4478), 'numpy.array', 'array', (['[1, 1]'], {}), '([1, 1])\n', (4470, 4478), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4480, 4493), 'numpy.array', 'array', (['[2, 2]'], {}), '([2, 2])\n', (4485, 4493), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4495, 4508), 'numpy.array', 'array', (['[3, 3]'], {}), '([3, 3])\n', (4500, 4508), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4510, 4523), 'numpy.array', 'array', (['[4, 4]'], {}), '([4, 4])\n', (4515, 4523), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4557, 4570), 'numpy.array', 'array', (['[5, 5]'], {}), '([5, 5])\n', (4562, 4570), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4572, 4585), 'numpy.array', 'array', (['[6, 6]'], {}), '([6, 6])\n', (4577, 4585), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4587, 4600), 'numpy.array', 'array', (['[7, 7]'], {}), '([7, 7])\n', (4592, 4600), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4602, 4615), 'numpy.array', 'array', (['[8, 8]'], {}), '([8, 8])\n', (4607, 4615), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4649, 4662), 'numpy.array', 'array', (['[9, 9]'], {}), '([9, 9])\n', (4654, 4662), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4664, 4679), 'numpy.array', 'array', (['[10, 10]'], {}), '([10, 10])\n', (4669, 4679), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4681, 4696), 'numpy.array', 'array', (['[11, 11]'], {}), '([11, 11])\n', (4686, 4696), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n'), ((4698, 4713), 'numpy.array', 'array', (['[12, 12]'], {}), '([12, 12])\n', (4703, 4713), False, 'from numpy import allclose, array, asarray, add, ndarray, generic\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 5 08:18:05 2022
https://thatascience.com/learn-machine-learning/pipeline-in-scikit-learn/
@author: qian.cao
"""
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
import os
import sys
sys.path.append("../bonebox/metrics/")
from FeaturesRadiomics import *
import matplotlib.pyplot as plt
if __name__ == "__main__":
outDir = "/gpfs_projects/qian.cao/BoneBox-out/test_20220422_bin_cross_parallel_biomarker_C/"
os.makedirs(outDir,exist_ok = True)
featuresDir = "/gpfs_projects/qian.cao/BoneBox-out/test_20220422_bin_cross_parallel/"
# Copied from bin_cross_parallel
nScales = np.linspace(1.2, 0.2, 60) # change noise only # sweeps across noise and resolution settings
rScales = np.linspace(1, 0.3, 40)
# Size of the test split
num_bones_test = 7 # number of bones reserved for testing
test_split_size = num_bones_test/16
num_test = int(num_bones_test*13)
featureNames = getRadiomicFeatureNames() # TODO: save and read from file
features = np.load(featuresDir+"featuresArray.npy")
fem_dir = "../data/"
roi_vm_mean = np.load(fem_dir+"roi_vm_mean.npy")
# Training and testing scores
y_preds = np.zeros((num_test,features.shape[2],features.shape[3],features.shape[4]))
r2Test = np.zeros((features.shape[2],features.shape[3],features.shape[4]))
importances = np.zeros((features.shape[1],features.shape[2],features.shape[3],features.shape[4]))
# remember to save y_test as well, this is constant throughout the script
# for cind in range(features.shape[2]): # imaging condition
#%% Reference Configuration
ref_config = (30,15,1)
indNoise, indResolution, sind = ref_config # TODO: think about just having random sind
feat = features[:,:,indNoise,indResolution,sind]
X = feat
y = roi_vm_mean
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_split_size, random_state = 3, shuffle=False)
rf_pipe = Pipeline([('scl', StandardScaler()),
('reg',RandomForestRegressor(n_estimators=100, min_samples_split=10, random_state=0, n_jobs=-1))])
rf_pipe.fit(X_train, y_train)
#%% Run through all scenarios
for indNoise, nscale in enumerate(nScales):
for indResolution, rscale in enumerate(rScales):
print(f"noise: {indNoise}, resolution: {indResolution}")
for sind in range(features.shape[4]): # seed, instance
# # feature
feat = features[:,:,indNoise,indResolution,sind]
# # data and target
X = feat
y = roi_vm_mean
# # Splitting data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_split_size, random_state = 3, shuffle=False)
# # Random forest Tree Regression Pipeline
# rf_pipe = Pipeline([('scl', StandardScaler()),
# ('reg',RandomForestRegressor(n_estimators=100, min_samples_split=10, random_state=0, n_jobs=-1))])
# rf_pipe.fit(X_train, y_train)
y_pred = rf_pipe.predict(X_test)
# scoreTest[cind,sind] = rf_pipe.score(y_pred, y_test)
# Save output
y_preds[:,indNoise,indResolution,sind] = y_pred
r2Test[indNoise,indResolution,sind] = np.corrcoef(y_pred, y_test)[0,1]**2
importances[:,indNoise,indResolution,sind] = rf_pipe['reg'].feature_importances_
# Correlation plot
plt.ioff()
plt.figure()
plt.plot(y_test, y_pred,'b.')
plt.plot(*(np.linspace(0,np.max(y)),)*2,'k--')
plt.xlabel("True")
plt.ylabel("Predicted")
plt.xlim([0,np.max(y)])
plt.ylim([0,np.max(y)])
plt.title(f"r2: {r2Test[indNoise,indResolution,sind]} Noise: {indNoise}, Resolution: {indResolution}, Instance: {sind}")
plt.savefig(outDir+f"correlation_{indNoise}_{indResolution}_{sind}.png")
plt.close("all")
np.save(outDir+"y_preds",y_preds)
np.save(outDir+"y_test",y_test)
np.save(outDir+"r2Test",r2Test)
np.save(outDir+"importances",importances)
#%% Figures and Analysis
y_preds = np.load(outDir+"y_preds.npy")
y_test = np.load(outDir+"y_test.npy")
r2Test = np.load(outDir+"r2Test.npy")
importances = np.load(outDir+"importances.npy")
plt.ion()
fig = plt.figure(figsize=(7,8))
cax = fig.axes
im = plt.imshow(np.mean(r2Test,axis=2))
plt.xlabel("Resolution")
plt.ylabel("Noise Level")
plt.title("r2 mean")
plt.colorbar()
plt.savefig(outDir+"fig-r2-mean.png")
plt.figure(figsize=(7,8))
plt.imshow(np.std(r2Test,axis=2),cmap="inferno")
plt.xlabel("Resolution")
plt.ylabel("Noise Level")
plt.title("r2 std")
plt.colorbar()
plt.savefig(outDir+"fig-r2-std.png")
#%% feature importances
for ind in range(importances.shape[0]):
print(ind)
img = importances[ind,:,:,:]
fn = featureNames[ind]
plt.ioff()
fig = plt.figure(figsize=(7,8))
cax = fig.axes
im = plt.imshow(np.mean(img,axis=2),cmap="YlGn")
plt.xlabel("Resolution")
plt.ylabel("Noise Level")
plt.title(f"Importance Mean: {fn}")
plt.colorbar()
plt.savefig(outDir+f"fig-imp-{fn}-mean.png")
plt.figure(figsize=(7,8))
plt.imshow(np.std(img,axis=2),cmap="BuPu")
plt.xlabel("Resolution")
plt.ylabel("Noise Level")
plt.title(f"Importance Std: {fn}")
plt.colorbar()
plt.savefig(outDir+f"fig-imp-{fn}-std.png")
plt.close("all")
#%%
|
[
"matplotlib.pyplot.title",
"numpy.load",
"sklearn.preprocessing.StandardScaler",
"sklearn.model_selection.train_test_split",
"matplotlib.pyplot.figure",
"numpy.mean",
"sys.path.append",
"numpy.std",
"matplotlib.pyplot.close",
"matplotlib.pyplot.colorbar",
"numpy.max",
"numpy.linspace",
"numpy.save",
"numpy.corrcoef",
"sklearn.ensemble.RandomForestRegressor",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.ylabel",
"os.makedirs",
"matplotlib.pyplot.ioff",
"matplotlib.pyplot.plot",
"numpy.zeros",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] |
[((528, 566), 'sys.path.append', 'sys.path.append', (['"""../bonebox/metrics/"""'], {}), "('../bonebox/metrics/')\n", (543, 566), False, 'import sys\n'), ((762, 796), 'os.makedirs', 'os.makedirs', (['outDir'], {'exist_ok': '(True)'}), '(outDir, exist_ok=True)\n', (773, 796), False, 'import os\n'), ((949, 974), 'numpy.linspace', 'np.linspace', (['(1.2)', '(0.2)', '(60)'], {}), '(1.2, 0.2, 60)\n', (960, 974), True, 'import numpy as np\n'), ((1055, 1078), 'numpy.linspace', 'np.linspace', (['(1)', '(0.3)', '(40)'], {}), '(1, 0.3, 40)\n', (1066, 1078), True, 'import numpy as np\n'), ((1350, 1392), 'numpy.load', 'np.load', (["(featuresDir + 'featuresArray.npy')"], {}), "(featuresDir + 'featuresArray.npy')\n", (1357, 1392), True, 'import numpy as np\n'), ((1439, 1475), 'numpy.load', 'np.load', (["(fem_dir + 'roi_vm_mean.npy')"], {}), "(fem_dir + 'roi_vm_mean.npy')\n", (1446, 1475), True, 'import numpy as np\n'), ((1527, 1604), 'numpy.zeros', 'np.zeros', (['(num_test, features.shape[2], features.shape[3], features.shape[4])'], {}), '((num_test, features.shape[2], features.shape[3], features.shape[4]))\n', (1535, 1604), True, 'import numpy as np\n'), ((1615, 1682), 'numpy.zeros', 'np.zeros', (['(features.shape[2], features.shape[3], features.shape[4])'], {}), '((features.shape[2], features.shape[3], features.shape[4]))\n', (1623, 1682), True, 'import numpy as np\n'), ((1699, 1790), 'numpy.zeros', 'np.zeros', (['(features.shape[1], features.shape[2], features.shape[3], features.shape[4])'], {}), '((features.shape[1], features.shape[2], features.shape[3], features\n .shape[4]))\n', (1707, 1790), True, 'import numpy as np\n'), ((2220, 2305), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_split_size', 'random_state': '(3)', 'shuffle': '(False)'}), '(X, y, test_size=test_split_size, random_state=3, shuffle=False\n )\n', (2236, 2305), False, 'from sklearn.model_selection import train_test_split\n'), ((4943, 4974), 'numpy.load', 'np.load', (["(outDir + 'y_preds.npy')"], {}), "(outDir + 'y_preds.npy')\n", (4950, 4974), True, 'import numpy as np\n'), ((4986, 5016), 'numpy.load', 'np.load', (["(outDir + 'y_test.npy')"], {}), "(outDir + 'y_test.npy')\n", (4993, 5016), True, 'import numpy as np\n'), ((5028, 5058), 'numpy.load', 'np.load', (["(outDir + 'r2Test.npy')"], {}), "(outDir + 'r2Test.npy')\n", (5035, 5058), True, 'import numpy as np\n'), ((5075, 5110), 'numpy.load', 'np.load', (["(outDir + 'importances.npy')"], {}), "(outDir + 'importances.npy')\n", (5082, 5110), True, 'import numpy as np\n'), ((5118, 5127), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (5125, 5127), True, 'import matplotlib.pyplot as plt\n'), ((5143, 5169), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 8)'}), '(figsize=(7, 8))\n', (5153, 5169), True, 'import matplotlib.pyplot as plt\n'), ((5236, 5260), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Resolution"""'], {}), "('Resolution')\n", (5246, 5260), True, 'import matplotlib.pyplot as plt\n'), ((5265, 5290), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Noise Level"""'], {}), "('Noise Level')\n", (5275, 5290), True, 'import matplotlib.pyplot as plt\n'), ((5295, 5315), 'matplotlib.pyplot.title', 'plt.title', (['"""r2 mean"""'], {}), "('r2 mean')\n", (5304, 5315), True, 'import matplotlib.pyplot as plt\n'), ((5320, 5334), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5332, 5334), True, 'import matplotlib.pyplot as plt\n'), ((5339, 5378), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outDir + 'fig-r2-mean.png')"], {}), "(outDir + 'fig-r2-mean.png')\n", (5350, 5378), True, 'import matplotlib.pyplot as plt\n'), ((5386, 5412), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 8)'}), '(figsize=(7, 8))\n', (5396, 5412), True, 'import matplotlib.pyplot as plt\n'), ((5469, 5493), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Resolution"""'], {}), "('Resolution')\n", (5479, 5493), True, 'import matplotlib.pyplot as plt\n'), ((5498, 5523), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Noise Level"""'], {}), "('Noise Level')\n", (5508, 5523), True, 'import matplotlib.pyplot as plt\n'), ((5528, 5547), 'matplotlib.pyplot.title', 'plt.title', (['"""r2 std"""'], {}), "('r2 std')\n", (5537, 5547), True, 'import matplotlib.pyplot as plt\n'), ((5552, 5566), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5564, 5566), True, 'import matplotlib.pyplot as plt\n'), ((5571, 5609), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outDir + 'fig-r2-std.png')"], {}), "(outDir + 'fig-r2-std.png')\n", (5582, 5609), True, 'import matplotlib.pyplot as plt\n'), ((5208, 5231), 'numpy.mean', 'np.mean', (['r2Test'], {'axis': '(2)'}), '(r2Test, axis=2)\n', (5215, 5231), True, 'import numpy as np\n'), ((5427, 5449), 'numpy.std', 'np.std', (['r2Test'], {'axis': '(2)'}), '(r2Test, axis=2)\n', (5433, 5449), True, 'import numpy as np\n'), ((5803, 5813), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (5811, 5813), True, 'import matplotlib.pyplot as plt\n'), ((5837, 5863), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 8)'}), '(figsize=(7, 8))\n', (5847, 5863), True, 'import matplotlib.pyplot as plt\n'), ((5951, 5975), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Resolution"""'], {}), "('Resolution')\n", (5961, 5975), True, 'import matplotlib.pyplot as plt\n'), ((5984, 6009), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Noise Level"""'], {}), "('Noise Level')\n", (5994, 6009), True, 'import matplotlib.pyplot as plt\n'), ((6018, 6053), 'matplotlib.pyplot.title', 'plt.title', (['f"""Importance Mean: {fn}"""'], {}), "(f'Importance Mean: {fn}')\n", (6027, 6053), True, 'import matplotlib.pyplot as plt\n'), ((6062, 6076), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6074, 6076), True, 'import matplotlib.pyplot as plt\n'), ((6085, 6131), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outDir + f'fig-imp-{fn}-mean.png')"], {}), "(outDir + f'fig-imp-{fn}-mean.png')\n", (6096, 6131), True, 'import matplotlib.pyplot as plt\n'), ((6147, 6173), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 8)'}), '(figsize=(7, 8))\n', (6157, 6173), True, 'import matplotlib.pyplot as plt\n'), ((6232, 6256), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Resolution"""'], {}), "('Resolution')\n", (6242, 6256), True, 'import matplotlib.pyplot as plt\n'), ((6265, 6290), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Noise Level"""'], {}), "('Noise Level')\n", (6275, 6290), True, 'import matplotlib.pyplot as plt\n'), ((6299, 6333), 'matplotlib.pyplot.title', 'plt.title', (['f"""Importance Std: {fn}"""'], {}), "(f'Importance Std: {fn}')\n", (6308, 6333), True, 'import matplotlib.pyplot as plt\n'), ((6342, 6356), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6354, 6356), True, 'import matplotlib.pyplot as plt\n'), ((6365, 6410), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outDir + f'fig-imp-{fn}-std.png')"], {}), "(outDir + f'fig-imp-{fn}-std.png')\n", (6376, 6410), True, 'import matplotlib.pyplot as plt\n'), ((6426, 6442), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (6435, 6442), True, 'import matplotlib.pyplot as plt\n'), ((4706, 4742), 'numpy.save', 'np.save', (["(outDir + 'y_preds')", 'y_preds'], {}), "(outDir + 'y_preds', y_preds)\n", (4713, 4742), True, 'import numpy as np\n'), ((4752, 4786), 'numpy.save', 'np.save', (["(outDir + 'y_test')", 'y_test'], {}), "(outDir + 'y_test', y_test)\n", (4759, 4786), True, 'import numpy as np\n'), ((4796, 4830), 'numpy.save', 'np.save', (["(outDir + 'r2Test')", 'r2Test'], {}), "(outDir + 'r2Test', r2Test)\n", (4803, 4830), True, 'import numpy as np\n'), ((4840, 4884), 'numpy.save', 'np.save', (["(outDir + 'importances')", 'importances'], {}), "(outDir + 'importances', importances)\n", (4847, 4884), True, 'import numpy as np\n'), ((5910, 5930), 'numpy.mean', 'np.mean', (['img'], {'axis': '(2)'}), '(img, axis=2)\n', (5917, 5930), True, 'import numpy as np\n'), ((6192, 6211), 'numpy.std', 'np.std', (['img'], {'axis': '(2)'}), '(img, axis=2)\n', (6198, 6211), True, 'import numpy as np\n'), ((2335, 2351), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2349, 2351), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2385, 2478), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(100)', 'min_samples_split': '(10)', 'random_state': '(0)', 'n_jobs': '(-1)'}), '(n_estimators=100, min_samples_split=10, random_state=\n 0, n_jobs=-1)\n', (2406, 2478), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((3168, 3253), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': 'test_split_size', 'random_state': '(3)', 'shuffle': '(False)'}), '(X, y, test_size=test_split_size, random_state=3, shuffle=False\n )\n', (3184, 3253), False, 'from sklearn.model_selection import train_test_split\n'), ((4114, 4124), 'matplotlib.pyplot.ioff', 'plt.ioff', ([], {}), '()\n', (4122, 4124), True, 'import matplotlib.pyplot as plt\n'), ((4141, 4153), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4151, 4153), True, 'import matplotlib.pyplot as plt\n'), ((4170, 4200), 'matplotlib.pyplot.plot', 'plt.plot', (['y_test', 'y_pred', '"""b."""'], {}), "(y_test, y_pred, 'b.')\n", (4178, 4200), True, 'import matplotlib.pyplot as plt\n'), ((4279, 4297), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""True"""'], {}), "('True')\n", (4289, 4297), True, 'import matplotlib.pyplot as plt\n'), ((4314, 4337), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Predicted"""'], {}), "('Predicted')\n", (4324, 4337), True, 'import matplotlib.pyplot as plt\n'), ((4434, 4566), 'matplotlib.pyplot.title', 'plt.title', (['f"""r2: {r2Test[indNoise, indResolution, sind]} Noise: {indNoise}, Resolution: {indResolution}, Instance: {sind}"""'], {}), "(\n f'r2: {r2Test[indNoise, indResolution, sind]} Noise: {indNoise}, Resolution: {indResolution}, Instance: {sind}'\n )\n", (4443, 4566), True, 'import matplotlib.pyplot as plt\n'), ((4571, 4645), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(outDir + f'correlation_{indNoise}_{indResolution}_{sind}.png')"], {}), "(outDir + f'correlation_{indNoise}_{indResolution}_{sind}.png')\n", (4582, 4645), True, 'import matplotlib.pyplot as plt\n'), ((4660, 4676), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (4669, 4676), True, 'import matplotlib.pyplot as plt\n'), ((3913, 3940), 'numpy.corrcoef', 'np.corrcoef', (['y_pred', 'y_test'], {}), '(y_pred, y_test)\n', (3924, 3940), True, 'import numpy as np\n'), ((4366, 4375), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (4372, 4375), True, 'import numpy as np\n'), ((4406, 4415), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (4412, 4415), True, 'import numpy as np\n'), ((4241, 4250), 'numpy.max', 'np.max', (['y'], {}), '(y)\n', (4247, 4250), True, 'import numpy as np\n')]
|
#!/usr/bin/env python
# Python Standard Library
pass
# Third-Party Libraries
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import to_rgb
# Local Library
import mivp
# ------------------------------------------------------------------------------
grey_4 = to_rgb("#ced4da")
# ------------------------------------------------------------------------------
def Q(f, xs, ys):
X, Y = np.meshgrid(xs, ys)
v = np.vectorize
fx = v(lambda x, y: f([x, y])[0])
fy = v(lambda x, y: f([x, y])[1])
return X, Y, fx(X, Y), fy(X, Y)
# ------------------------------------------------------------------------------
# Vector field
def fun(t, xy):
x, y = xy
dx = - y + 0.5*np.cos(0.5*t)
dy = x - np.sin(0.5*t)
return [dx, dy]
# Time span & frame rate
t_span = (0.0, 20.0)
df = 60.0
dt = 1.0 / df
t = np.arange(t_span[0], t_span[1], dt)
t = np.r_[t, t_span[1]]
# Initial set boundary
y0 = [0.0, 0.0]
radius = 0.5
n = 10
xc, yc = y0
def vectorize(fun):
return np.vectorize(fun, signature="()->(n)")
@vectorize
def boundary(s):
if 0 <= s < 0.25:
return np.array([-0.5 + 4 * s, 0.5])
elif 0.25 <= s < 0.5:
return np.array([0.5, 0.5 - 4 * (s - 0.25)])
elif 0.5 <= s < 0.75:
return np.array([0.5 - 4 * (s - 0.5), -0.5])
else:
return np.array([-0.5, -0.5 + 4 * (s - 0.75)])
# Precision
rtol = 1e-9 # default: 1e-3
atol = 1e-12 # default: 1e-6
# ------------------------------------------------------------------------------
fig = plt.figure()
x = y = np.linspace(-1.0, 1.0, 1000)
#plt.streamplot(*Q(lambda xy: fun(0, xy), x, y), color=grey_4, zorder=-100)
c = cx, cy = np.array([0.0, 0.0])
plt.plot([cx], [cy], lw=3.0, marker="o", ms=10.0, markevery=[-1],
markeredgecolor="white", color="black")
plt.axis("square")
plt.axis("off")
data = mivp.solve_alt(
fun=fun,
t_eval=t,
boundary=lambda s: 0.5*boundary(s),
boundary_rtol=0.0,
boundary_atol=0.05,
rtol=rtol,
atol=atol,
method="LSODA",
)
circle = None
def display_radius(i, axes):
global circle
if circle:
circle.remove()
x, y = data[i]
r = max(np.sqrt((x - cx)**2 + (y - cy)**2))
theta = np.linspace(0, 2*np.pi, 1000)
circle = axes.plot(
cx+r*np.cos(theta), cy+r*np.sin(theta),
linestyle='dashed', color="k", linewidth=1.0,
)[0]
plt.axis([-4/3, 4/3, -1, 1])
mivp.generate_movie(data, filename="hausdorff.mp4", axes=fig.axes[0], fps=df, hook=display_radius)
|
[
"numpy.meshgrid",
"numpy.vectorize",
"matplotlib.pyplot.plot",
"mivp.generate_movie",
"matplotlib.pyplot.axis",
"matplotlib.colors.to_rgb",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.arange",
"numpy.linspace",
"numpy.cos",
"numpy.sqrt"
] |
[((289, 306), 'matplotlib.colors.to_rgb', 'to_rgb', (['"""#ced4da"""'], {}), "('#ced4da')\n", (295, 306), False, 'from matplotlib.colors import to_rgb\n'), ((858, 893), 'numpy.arange', 'np.arange', (['t_span[0]', 't_span[1]', 'dt'], {}), '(t_span[0], t_span[1], dt)\n', (867, 893), True, 'import numpy as np\n'), ((1544, 1556), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1554, 1556), True, 'import matplotlib.pyplot as plt\n'), ((1565, 1593), 'numpy.linspace', 'np.linspace', (['(-1.0)', '(1.0)', '(1000)'], {}), '(-1.0, 1.0, 1000)\n', (1576, 1593), True, 'import numpy as np\n'), ((1683, 1703), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (1691, 1703), True, 'import numpy as np\n'), ((1704, 1813), 'matplotlib.pyplot.plot', 'plt.plot', (['[cx]', '[cy]'], {'lw': '(3.0)', 'marker': '"""o"""', 'ms': '(10.0)', 'markevery': '[-1]', 'markeredgecolor': '"""white"""', 'color': '"""black"""'}), "([cx], [cy], lw=3.0, marker='o', ms=10.0, markevery=[-1],\n markeredgecolor='white', color='black')\n", (1712, 1813), True, 'import matplotlib.pyplot as plt\n'), ((1818, 1836), 'matplotlib.pyplot.axis', 'plt.axis', (['"""square"""'], {}), "('square')\n", (1826, 1836), True, 'import matplotlib.pyplot as plt\n'), ((1837, 1852), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1845, 1852), True, 'import matplotlib.pyplot as plt\n'), ((2430, 2533), 'mivp.generate_movie', 'mivp.generate_movie', (['data'], {'filename': '"""hausdorff.mp4"""', 'axes': 'fig.axes[0]', 'fps': 'df', 'hook': 'display_radius'}), "(data, filename='hausdorff.mp4', axes=fig.axes[0], fps=\n df, hook=display_radius)\n", (2449, 2533), False, 'import mivp\n'), ((419, 438), 'numpy.meshgrid', 'np.meshgrid', (['xs', 'ys'], {}), '(xs, ys)\n', (430, 438), True, 'import numpy as np\n'), ((1023, 1061), 'numpy.vectorize', 'np.vectorize', (['fun'], {'signature': '"""()->(n)"""'}), "(fun, signature='()->(n)')\n", (1035, 1061), True, 'import numpy as np\n'), ((2224, 2255), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * np.pi)', '(1000)'], {}), '(0, 2 * np.pi, 1000)\n', (2235, 2255), True, 'import numpy as np\n'), ((2398, 2430), 'matplotlib.pyplot.axis', 'plt.axis', (['[-4 / 3, 4 / 3, -1, 1]'], {}), '([-4 / 3, 4 / 3, -1, 1])\n', (2406, 2430), True, 'import matplotlib.pyplot as plt\n'), ((746, 761), 'numpy.sin', 'np.sin', (['(0.5 * t)'], {}), '(0.5 * t)\n', (752, 761), True, 'import numpy as np\n'), ((1129, 1158), 'numpy.array', 'np.array', (['[-0.5 + 4 * s, 0.5]'], {}), '([-0.5 + 4 * s, 0.5])\n', (1137, 1158), True, 'import numpy as np\n'), ((2176, 2214), 'numpy.sqrt', 'np.sqrt', (['((x - cx) ** 2 + (y - cy) ** 2)'], {}), '((x - cx) ** 2 + (y - cy) ** 2)\n', (2183, 2214), True, 'import numpy as np\n'), ((719, 734), 'numpy.cos', 'np.cos', (['(0.5 * t)'], {}), '(0.5 * t)\n', (725, 734), True, 'import numpy as np\n'), ((1200, 1237), 'numpy.array', 'np.array', (['[0.5, 0.5 - 4 * (s - 0.25)]'], {}), '([0.5, 0.5 - 4 * (s - 0.25)])\n', (1208, 1237), True, 'import numpy as np\n'), ((1279, 1316), 'numpy.array', 'np.array', (['[0.5 - 4 * (s - 0.5), -0.5]'], {}), '([0.5 - 4 * (s - 0.5), -0.5])\n', (1287, 1316), True, 'import numpy as np\n'), ((1342, 1381), 'numpy.array', 'np.array', (['[-0.5, -0.5 + 4 * (s - 0.75)]'], {}), '([-0.5, -0.5 + 4 * (s - 0.75)])\n', (1350, 1381), True, 'import numpy as np\n'), ((2291, 2304), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2297, 2304), True, 'import numpy as np\n'), ((2311, 2324), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2317, 2324), True, 'import numpy as np\n')]
|
from flask.ext.login import LoginManager
from flask.ext.micropub import MicropubClient
from flask.ext.sqlalchemy import SQLAlchemy
from flask_debugtoolbar import DebugToolbarExtension
db = SQLAlchemy()
micropub = MicropubClient(client_id='https://woodwind.xyz/')
login_mgr = LoginManager()
login_mgr.login_view = 'views.index'
#toolbar = DebugToolbarExtension()
def init_app(app):
db.init_app(app)
micropub.init_app(app)
login_mgr.init_app(app)
# toolbar.init_app(app)
|
[
"flask.ext.sqlalchemy.SQLAlchemy",
"flask.ext.login.LoginManager",
"flask.ext.micropub.MicropubClient"
] |
[((191, 203), 'flask.ext.sqlalchemy.SQLAlchemy', 'SQLAlchemy', ([], {}), '()\n', (201, 203), False, 'from flask.ext.sqlalchemy import SQLAlchemy\n'), ((215, 264), 'flask.ext.micropub.MicropubClient', 'MicropubClient', ([], {'client_id': '"""https://woodwind.xyz/"""'}), "(client_id='https://woodwind.xyz/')\n", (229, 264), False, 'from flask.ext.micropub import MicropubClient\n'), ((277, 291), 'flask.ext.login.LoginManager', 'LoginManager', ([], {}), '()\n', (289, 291), False, 'from flask.ext.login import LoginManager\n')]
|
#!/usr/bin/env python3
"""
--- Day 2: Dive! ---
https://adventofcode.com/2021/day/2
"""
from abc import ABC, abstractmethod
import argparse
from enum import Enum
import sys
from typing import List, NamedTuple
class Direction(Enum):
FORWARD = 1
DOWN = 2
UP = 3
class Step(NamedTuple):
direction: Direction
distance: int
def parse_course(raw_course: str) -> List[Step]:
"""Parses a planned course (the puzzle input.)"""
course: List[Step] = []
raw_direction: str
raw_distance: str
for line in raw_course.strip().splitlines():
raw_direction, raw_distance = line.split()
course.append(
Step(Direction.__members__[raw_direction.upper()], int(raw_distance)))
return course
class Submarine(ABC):
"""A submarine."""
def __init__(self):
self.horizontal: int = 0
self.depth: int = 0
@abstractmethod
def move(self, step: Step):
pass
def follow(self, course: List[Step]) -> int:
"""Follows a course and returns the submarine's position.
:param course: a course
:return: horizontal position * depth
"""
for step in course:
self.move(step)
return self.horizontal * self.depth
class Part1Submarine(Submarine):
"""Submarine interpreting a course as described in Part 1."""
def move(self, step: Step):
"""Move a submarine by `step`.
- `forward X` increases the horizontal position by `X` units.
- `down X` *increases* the depth by `X` units.
- `up X` *decreases* the depth by `X` units.
"""
if step.direction == Direction.FORWARD:
self.horizontal += step.distance
elif step.direction == Direction.DOWN:
self.depth += step.distance
elif step.direction == Direction.UP:
self.depth -= step.distance
class Part2Submarine(Submarine):
"""Submarine interpreting a course as described in Part 2."""
def __init__(self):
super().__init__()
self.aim: int = 0
def move(self, step: Step):
"""Move a submarine by `step`.
- `down X` *increases* your aim by `X` units.
- `up X` *decreases* your aim by X units.
- `forward X` does two things:
- It increases your horizontal position by `X` units.
- It increases your depth by your aim *multiplied by* `X`.
"""
if step.direction == Direction.FORWARD:
self.horizontal += step.distance
self.depth += self.aim * step.distance
elif step.direction == Direction.DOWN:
self.aim += step.distance
elif step.direction == Direction.UP:
self.aim -= step.distance
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Day 2: Dive!")
parser.add_argument('infile', nargs='?', type=argparse.FileType('r'),
default=sys.stdin)
args = parser.parse_args()
course = parse_course(args.infile.read())
print('[Part 1] position:', Part1Submarine().follow(course))
print('[Part 2] position:', Part2Submarine().follow(course))
|
[
"argparse.ArgumentParser",
"argparse.FileType"
] |
[((2559, 2610), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Day 2: Dive!"""'}), "(description='Day 2: Dive!')\n", (2582, 2610), False, 'import argparse\n'), ((2659, 2681), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (2676, 2681), False, 'import argparse\n')]
|
import sys
import policy_api_requests
import json
protocol = "https"
nbmaster = ""
username = ""
password = ""
domainName = ""
domainType = ""
port = 1556
def print_disclaimer():
print("-------------------------------------------------------------------------------------------------")
print("-- This script requires Python3.5 or higher. --")
print("-- If your current system does not have Python3.5 or higher installed, this will not work. --")
print("-------------------------------------------------------------------------------------------------\n")
print("Executing this library requires some additional python3.5 libraries like \n\t'requests'.\n\n")
print("You will, however, require 'requests' library to make the API calls.\n")
print("You can install the dependent libraries using the following commands: ")
print("pip install requests")
print("-------------------------------------------------------------------------------------------------\n\n\n")
print("You can specify the 'nbmaster', 'username', 'password', 'domainName' and 'domainType' as command-line parameters\n")
print_usage()
def print_usage():
print("Example:")
print("python -W ignore create_policy_in_one_step.py -nbmaster <masterServer> -username <username> -password <password> [-domainName <domainName>] [-domainType <domainType>]\n\n\n")
def read_command_line_arguments():
if len(sys.argv)%2 == 0:
print_usage()
exit()
global nbmaster
global username
global password
global domainName
global domainType
for i in range(1, len(sys.argv), 2):
if sys.argv[i] == "-nbmaster":
nbmaster = sys.argv[i + 1]
elif sys.argv[i] == "-username":
username = sys.argv[i + 1]
elif sys.argv[i] == "-password":
password = sys.argv[i + 1]
elif sys.argv[i] == "-domainName":
domainName = sys.argv[i + 1]
elif sys.argv[i] == "-domainType":
domainType = sys.argv[i + 1]
else:
print_usage()
exit()
if nbmaster == "":
print("Please provide the value for 'nbmaster'")
exit()
elif username == "":
print("Please provide the value for 'username'")
exit()
elif password == "":
print("Please provide the value for 'password'")
exit()
elif domainName == "":
print("Please provide the value for 'domainName'")
exit()
elif domainType == "":
print("Please provide the value for 'domainType'")
exit()
print_disclaimer()
read_command_line_arguments()
base_url = protocol + "://" + nbmaster + ":" + str(port) + "/netbackup"
jwt = policy_api_requests.perform_login(username, password, domainName, domainType, base_url)
policy_api_requests.post_netbackup_VMwarePolicy(jwt, base_url)
policy_api_requests.get_netbackup_policies(jwt, base_url)
policy_api_requests.get_netbackup_policy(jwt, base_url)
policy_api_requests.put_netbackup_policy(jwt, base_url)
policy_api_requests.get_netbackup_policy(jwt, base_url)
policy_api_requests.delete_VMware_netbackup_policy(jwt, base_url)
policy_api_requests.get_netbackup_policies(jwt, base_url)
|
[
"policy_api_requests.post_netbackup_VMwarePolicy",
"policy_api_requests.perform_login",
"policy_api_requests.get_netbackup_policies",
"policy_api_requests.delete_VMware_netbackup_policy",
"policy_api_requests.put_netbackup_policy",
"policy_api_requests.get_netbackup_policy"
] |
[((2603, 2694), 'policy_api_requests.perform_login', 'policy_api_requests.perform_login', (['username', 'password', 'domainName', 'domainType', 'base_url'], {}), '(username, password, domainName,\n domainType, base_url)\n', (2636, 2694), False, 'import policy_api_requests\n'), ((2694, 2756), 'policy_api_requests.post_netbackup_VMwarePolicy', 'policy_api_requests.post_netbackup_VMwarePolicy', (['jwt', 'base_url'], {}), '(jwt, base_url)\n', (2741, 2756), False, 'import policy_api_requests\n'), ((2760, 2817), 'policy_api_requests.get_netbackup_policies', 'policy_api_requests.get_netbackup_policies', (['jwt', 'base_url'], {}), '(jwt, base_url)\n', (2802, 2817), False, 'import policy_api_requests\n'), ((2821, 2876), 'policy_api_requests.get_netbackup_policy', 'policy_api_requests.get_netbackup_policy', (['jwt', 'base_url'], {}), '(jwt, base_url)\n', (2861, 2876), False, 'import policy_api_requests\n'), ((2880, 2935), 'policy_api_requests.put_netbackup_policy', 'policy_api_requests.put_netbackup_policy', (['jwt', 'base_url'], {}), '(jwt, base_url)\n', (2920, 2935), False, 'import policy_api_requests\n'), ((2939, 2994), 'policy_api_requests.get_netbackup_policy', 'policy_api_requests.get_netbackup_policy', (['jwt', 'base_url'], {}), '(jwt, base_url)\n', (2979, 2994), False, 'import policy_api_requests\n'), ((2998, 3063), 'policy_api_requests.delete_VMware_netbackup_policy', 'policy_api_requests.delete_VMware_netbackup_policy', (['jwt', 'base_url'], {}), '(jwt, base_url)\n', (3048, 3063), False, 'import policy_api_requests\n'), ((3067, 3124), 'policy_api_requests.get_netbackup_policies', 'policy_api_requests.get_netbackup_policies', (['jwt', 'base_url'], {}), '(jwt, base_url)\n', (3109, 3124), False, 'import policy_api_requests\n')]
|
from __future__ import unicode_literals
from django.contrib.auth.models import User
# from django.core.validators import MaxValueValidator
from django.db import models
from django.db.models.signals import post_save
from django.dispatch import receiver
class Usuario(models.Model):
""" usuario """
usuario = models.OneToOneField(User, null=True)
nombre = models.CharField(max_length=30, blank=True)
mail = models.EmailField(max_length=70, default='email')
def __string__(self):
return str(self.nombre)
@receiver(post_save, sender=User)
def crear_perfil_para_usuario_nuevo(sender, created, instance, **kwargs):
if created:
perfil = Usuario(usuario=instance)
perfil.save()
|
[
"django.db.models.CharField",
"django.db.models.OneToOneField",
"django.dispatch.receiver",
"django.db.models.EmailField"
] |
[((537, 569), 'django.dispatch.receiver', 'receiver', (['post_save'], {'sender': 'User'}), '(post_save, sender=User)\n', (545, 569), False, 'from django.dispatch import receiver\n'), ((319, 356), 'django.db.models.OneToOneField', 'models.OneToOneField', (['User'], {'null': '(True)'}), '(User, null=True)\n', (339, 356), False, 'from django.db import models\n'), ((370, 413), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'blank': '(True)'}), '(max_length=30, blank=True)\n', (386, 413), False, 'from django.db import models\n'), ((425, 474), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(70)', 'default': '"""email"""'}), "(max_length=70, default='email')\n", (442, 474), False, 'from django.db import models\n')]
|
import json
from os.path import basename
from typing import Dict, List, Any, Union
from ui.backend import BackendClient
_RESERVED_NAMES = {"list", "validate", "create"}
class BackendController:
def __init__(self, backend_url: str, launcher_url: str):
self._backend = BackendClient(backend_url=backend_url, launcher_url=launcher_url)
@property
def backend_url(self) -> str:
return self._backend.backend_url
@property
def launcher_url(self) -> str:
return self._backend.launcher_url
def login(self, user: str, password: str) -> Union[str, None]:
try:
result = self._backend.post("api/auth", data={"user": user, "password": password}, auth_token=None)
return result.get("token", None)
except:
return None
def user_info(self, auth_token: Union[str, None]) -> Dict:
return self._backend.get("api/user/info", auth_token=auth_token)
def list_workers(self, auth_token: Union[str, None]) -> List:
return self._backend.get("api/workers", auth_token=auth_token) or []
def list_tasks(self, auth_token: Union[str, None]) -> List:
return self._backend.get("api/workers/queue", auth_token=auth_token) or []
def get_worker_types(self, auth_token: Union[str, None]) -> List:
return list({w['type']: {
'type': w['type'],
'extensions': w['extensions'],
'description': w['description'],
'parameters': json.dumps(w['parameters']),
'docs': w.get('docs', '')
} for w in self.list_workers(auth_token=auth_token)}.values())
def edit_worker(self, action: str, name: str, auth_token: Union[str, None]) -> None:
if action == "reset":
self._backend.post("api/workers/status", data={"name": name}, auth_token=auth_token)
elif action == "delete":
self._backend.delete("api/workers", data={"name": name}, auth_token=auth_token)
def edit_task(self, action: str, task_id: str, auth_token: Union[str, None]) -> None:
if action == "reset":
self._backend.patch("api/workers/queue", data={"task_id": task_id}, auth_token=auth_token)
elif action == "cancel":
self._backend.delete("api/workers/queue", data={"task_id": task_id}, auth_token=auth_token)
def list_stores(self, auth_token: Union[str, None]) -> List:
return self._backend.get("api/list", auth_token=auth_token) or []
def list_projects(self, store_id: str, auth_token: Union[str, None]) -> List:
return self._backend.get("api/{}/list".format(store_id), params={"metadata": True}, auth_token=auth_token)
def create_project(self, store_id: str, project_id: str, project_name: str, groups: List[str], auth_token: Union[str, None]) -> None:
self._backend.post("api/{}/create".format(store_id), data={"id": project_id, "name": project_name, "groups": groups}, auth_token=auth_token)
def get_project(self, store_id: str, project_id: str, auth_token: Union[str, None]) -> Dict:
return self._backend.get("/api/{}/{}/projectMeta".format(store_id, project_id), auth_token=auth_token)
def edit_project(self, store_id: str, project_id: str, project_data: Dict, auth_token: Union[str, None]) -> Dict:
return self._backend.post("/api/{}/{}/projectMeta".format(store_id, project_id), data=project_data, auth_token=auth_token)
def get_auth_groups(self, auth_token: Union[str, None]) -> List:
return self._backend.get("/api/user/groups", auth_token=auth_token)
def get_access_meta(self, store_id: str, project_id: str, auth_token: Union[str, None]) -> Dict:
return self._backend.get("/api/{}/{}/projectAccessMeta".format(store_id, project_id), auth_token=auth_token)
def edit_access_meta(self, store_id: str, project_id: str, meta: Dict, auth_token: Union[str, None]) -> Dict:
return self._backend.post("/api/{}/{}/projectAccessMeta".format(store_id, project_id), data=meta, auth_token=auth_token)
def list_assets(self, store_id: str, project_id: str, auth_token: Union[str, None]) -> List:
return [_mutate(d, "short_index", basename(d.get("index_file", ""))) for d in self._backend.get("api/{}/{}/list".format(store_id, project_id), auth_token=auth_token)]
def list_files(self, store_id: str, project_id: str, asset_id: str, auth_token: Union[str, None], hierarchical: bool = False, version: str = None) -> List[Dict]:
params = {}
if version:
params["version"] = version
files = self._backend.get("api/{}/{}/files/{}".format(store_id, project_id, asset_id), params=params, auth_token=auth_token)
if hierarchical:
file_tree = []
if files:
for file in files:
_add_item(item=file["name"], url=file["url"], node_type="leaf", results=file_tree)
return file_tree
else:
return files
def get_asset(self, store_id: str, project_id: str, asset_id: str, auth_token: Union[str, None]) -> Dict:
return self._backend.get("api/{}/{}/meta/{}".format(store_id, project_id, asset_id), auth_token=auth_token)
def create_asset(self, store_id: str, project_id: str, asset: Dict, auth_token: Union[str, None]) -> Dict:
self._backend.post("api/{}/{}/create".format(store_id, project_id), data=asset, auth_token=auth_token)
return self.get_asset(store_id=store_id, project_id=project_id, asset_id=asset.get("id", ""), auth_token=auth_token)
def edit_asset(self, store_id: str, project_id: str, asset: Dict, auth_token: Union[str, None]) -> Dict:
return self._backend.post("/api/{}/{}/meta/{}".format(store_id, project_id, asset.get("id", "")), data=asset, auth_token=auth_token)
def upload_asset(self, store_id: str, project_id: str, asset_id: str, filename: str, stream: Any, auth_token: Union[str, None],
update: bool = False, create: bool = False) -> None:
self._backend.upload(api_url="api/{}/{}/upload/{}".format(store_id, project_id, asset_id), stream=stream,
params={"filename": filename, "update": update, "create": create}, auth_token=auth_token)
def schedule_worker(self, store_id: str, project_id: str, asset_id: str, worker_type: str, parameters: Dict, auth_token: Union[str, None]):
data = {"store_id": store_id, "project_id": project_id, "asset_id": asset_id, "worker_type": worker_type, "parameters": parameters}
self._backend.post("api/workers/queue", data=data, auth_token=auth_token)
def check_objects(self, store_id: str, project_id: str, object_ids: List[str], auth_token: Union[str, None]) -> List[Dict]:
return [self.get_object_info(store_id=store_id, project_id=project_id, object_id=item, auth_token=auth_token)
for item in object_ids if self.has_object(store_id=store_id, project_id=project_id, object_id=item, auth_token=auth_token)]
def has_object(self, store_id: str, project_id: str, object_id: str, auth_token: Union[str, None]) -> bool:
return self._backend.head("api/{}/{}/object/{}".format(store_id, project_id, object_id), auth_token=auth_token)
def get_object(self, store_id: str, project_id: str, object_id: str, auth_token: Union[str, None]) -> Union[None, Dict]:
return self._backend.get("api/{}/{}/object/{}".format(store_id, project_id, object_id), auth_token=auth_token)
def get_object_info(self, store_id: str, project_id: str, object_id: str, auth_token: Union[str, None]) -> Union[None, Dict]:
return self._backend.get("api/{}/{}/object/{}/info".format(store_id, project_id, object_id), auth_token=auth_token)
def set_object(self, store_id: str, project_id: str, object_id: str, object_data: Dict, auth_token: Union[str, None]) -> None:
return self._backend.put("api/{}/{}/object/{}".format(store_id, project_id, object_id), data=object_data, auth_token=auth_token)
def create_project_version(self, store_id: str, project_id: str, version_name: str, version_description: str):
self._backend.post("api/{}/{}/version".format(store_id, project_id),
data={"version_name": version_name, "version_description": version_description})
def _mutate(d: Dict, field: str, value: Any) -> Dict:
d[field] = value
return d
def _add_item(item: str, node_type: str, results: List[Dict], url: str = None):
if len(item) > 0:
path = item.rsplit("/", maxsplit=1)
if len(path) == 1:
if not _has_item(key=item, results=results):
results.append({"id": item, "type": node_type, "url": url, "parent": "#", "text": item})
else:
if not _has_item(key=item, results=results):
results.append({"id": item, "type": node_type, "url": url, "parent": path[0], "text": path[1]})
_add_item(item=path[0], node_type="parent", results=results)
def _has_item(key: str, results: List[Dict]) -> bool:
return any([x.get("id", "") == key for x in results])
|
[
"ui.backend.BackendClient",
"json.dumps"
] |
[((283, 348), 'ui.backend.BackendClient', 'BackendClient', ([], {'backend_url': 'backend_url', 'launcher_url': 'launcher_url'}), '(backend_url=backend_url, launcher_url=launcher_url)\n', (296, 348), False, 'from ui.backend import BackendClient\n'), ((1488, 1515), 'json.dumps', 'json.dumps', (["w['parameters']"], {}), "(w['parameters'])\n", (1498, 1515), False, 'import json\n')]
|
"""
Unit tests for the density class
"""
from unittest import TestCase
import sys
sys.path.append('../src')
import numpy as np
import unittest
import suftware as sw
import os
class Density1d(TestCase):
def setUp(self):
self.N = 5
self.data = sw.simulate_density_data(distribution_type='uniform', N=self.N,seed=1)
def tearDown(self):
pass
# method that checks the main calculation of deft_1d by calling _run and ensuring that we get the correct Q_star
def test_density(self):
actual_Q_star = Q = sw.DensityEstimator(self.data)
expected_Q_star = np.array([.56458204, 1.66943372, 1.56915093, 1.29922676, 0.94761056, 0.60883489, 0.34458301])
self.assertEqual(actual_Q_star.Q_star.evaluate(actual_Q_star.grid).all(),expected_Q_star.all())
# helper method for test_get_data_file_hand()
def raiseFileNotFoundError(self):
return FileNotFoundError
suite = unittest.TestLoader().loadTestsFromTestCase(Density1d)
unittest.TextTestRunner(verbosity=2).run(suite)
|
[
"sys.path.append",
"unittest.TextTestRunner",
"suftware.simulate_density_data",
"numpy.array",
"unittest.TestLoader",
"suftware.DensityEstimator"
] |
[((84, 109), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (99, 109), False, 'import sys\n'), ((268, 339), 'suftware.simulate_density_data', 'sw.simulate_density_data', ([], {'distribution_type': '"""uniform"""', 'N': 'self.N', 'seed': '(1)'}), "(distribution_type='uniform', N=self.N, seed=1)\n", (292, 339), True, 'import suftware as sw\n'), ((551, 581), 'suftware.DensityEstimator', 'sw.DensityEstimator', (['self.data'], {}), '(self.data)\n', (570, 581), True, 'import suftware as sw\n'), ((608, 707), 'numpy.array', 'np.array', (['[0.56458204, 1.66943372, 1.56915093, 1.29922676, 0.94761056, 0.60883489, \n 0.34458301]'], {}), '([0.56458204, 1.66943372, 1.56915093, 1.29922676, 0.94761056, \n 0.60883489, 0.34458301])\n', (616, 707), True, 'import numpy as np\n'), ((942, 963), 'unittest.TestLoader', 'unittest.TestLoader', ([], {}), '()\n', (961, 963), False, 'import unittest\n'), ((997, 1033), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (1020, 1033), False, 'import unittest\n')]
|
"""
@author waziz
"""
import chisel.mteval as mteval
import logging
from _bleu import BLEU, DecodingBLEU, TrainingBLEU
class WrappedBLEU(mteval.LossFunction):
def __init__(self, alias):
self.alias_ = alias
self.bleu_config_ = {}
self.decoding_bleu_wrapper_ = None
self.training_bleu_wrapper_ = None
@property
def alias(self):
return self.alias_
def configure(self, config):
# copies configuration
self.bleu_config_ = dict(config)
# sets default values if necessary
if 'max_order' not in self.bleu_config_:
logging.info('BLEU using default max_order=%d', BLEU.DEFAULT_MAX_ORDER)
self.bleu_config_['max_order'] = BLEU.DEFAULT_MAX_ORDER
if 'smoothing' not in self.bleu_config_:
logging.info('BLEU using default smoothing=%s', BLEU.DEFAULT_SMOOTHING)
self.bleu_config_['smoothing'] = BLEU.DEFAULT_SMOOTHING
def prepare_decoding(self, src, evidence, hypotheses):
"""
Compute sufficient statistics for BLEU in decoding mode
:param src:
:param EmpiricalDistribution evidence:
:param EmpiricalDistribution hypotheses:
"""
assert evidence is hypotheses, 'For now BLEU decoding is supported with Yh == Ye'
self.decoding_bleu_wrapper_ = DecodingBLEU(evidence, evidence.copy_posterior(), **self.bleu_config_)
def prepare_training(self, source, references, hypotheses):
"""
Compute sufficient statistic for BLEU in training mode
:param source:
:param references:
:param EmpiricalDistribution hypotheses:
:return:
"""
self.training_bleu_wrapper_ = TrainingBLEU(references, hypotheses, **self.bleu_config_)
def training_loss(self, c):
return 1 - self.training_bleu_wrapper_.bleu(c)
def loss(self, c, r):
return 1 - self.decoding_bleu_wrapper_.bleu(c, r)
def coloss(self, c):
return 1 - self.decoding_bleu_wrapper_.cobleu(c)
def cleanup(self):
self.decoding_bleu_wrapper_ = None
def reset(self):
pass
def construct(alias):
return WrappedBLEU(alias)
|
[
"logging.info",
"_bleu.TrainingBLEU"
] |
[((1721, 1778), '_bleu.TrainingBLEU', 'TrainingBLEU', (['references', 'hypotheses'], {}), '(references, hypotheses, **self.bleu_config_)\n', (1733, 1778), False, 'from _bleu import BLEU, DecodingBLEU, TrainingBLEU\n'), ((611, 682), 'logging.info', 'logging.info', (['"""BLEU using default max_order=%d"""', 'BLEU.DEFAULT_MAX_ORDER'], {}), "('BLEU using default max_order=%d', BLEU.DEFAULT_MAX_ORDER)\n", (623, 682), False, 'import logging\n'), ((812, 883), 'logging.info', 'logging.info', (['"""BLEU using default smoothing=%s"""', 'BLEU.DEFAULT_SMOOTHING'], {}), "('BLEU using default smoothing=%s', BLEU.DEFAULT_SMOOTHING)\n", (824, 883), False, 'import logging\n')]
|
from django.db import models
import MySQLdb as mysql
import pytest
from pyquery import PyQuery as pq
from olympia.addons.models import Addon
from olympia.amo.tests import reverse_ns
@pytest.yield_fixture
def read_only_mode(client, settings, db):
def _db_error(*args, **kwargs):
raise mysql.OperationalError("You can't do this in read-only mode.")
settings.SLAVE_DATABASES = ['default']
models.signals.pre_save.connect(_db_error)
models.signals.pre_delete.connect(_db_error)
from olympia.lib.settings_base import read_only_mode
env = {key: getattr(settings, key) for key in settings._explicit_settings}
read_only_mode(env)
for key, value in env.items():
setattr(settings, key, value)
client.handler.load_middleware()
yield
models.signals.pre_save.disconnect(_db_error)
models.signals.pre_delete.disconnect(_db_error)
def test_db_error(read_only_mode):
with pytest.raises(mysql.OperationalError):
Addon.objects.create(id=12)
def test_bail_on_post(read_only_mode, client):
response = client.post('/en-US/firefox/')
assert response.status_code == 503
title = pq(response.content)('title').text()
assert title.startswith('Maintenance in progress'), title
@pytest.mark.parametrize('method', ('post', 'put', 'delete', 'patch'))
def test_api_bail_on_write_method(read_only_mode, client, method):
response = getattr(client, method)(reverse_ns('abusereportuser-list'))
assert response.status_code == 503
assert 'website maintenance' in response.json()['error']
|
[
"django.db.models.signals.pre_save.connect",
"pyquery.PyQuery",
"olympia.addons.models.Addon.objects.create",
"olympia.amo.tests.reverse_ns",
"django.db.models.signals.pre_delete.disconnect",
"django.db.models.signals.pre_delete.connect",
"pytest.raises",
"django.db.models.signals.pre_save.disconnect",
"olympia.lib.settings_base.read_only_mode",
"pytest.mark.parametrize",
"MySQLdb.OperationalError"
] |
[((1262, 1331), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""method"""', "('post', 'put', 'delete', 'patch')"], {}), "('method', ('post', 'put', 'delete', 'patch'))\n", (1285, 1331), False, 'import pytest\n'), ((412, 454), 'django.db.models.signals.pre_save.connect', 'models.signals.pre_save.connect', (['_db_error'], {}), '(_db_error)\n', (443, 454), False, 'from django.db import models\n'), ((459, 503), 'django.db.models.signals.pre_delete.connect', 'models.signals.pre_delete.connect', (['_db_error'], {}), '(_db_error)\n', (492, 503), False, 'from django.db import models\n'), ((647, 666), 'olympia.lib.settings_base.read_only_mode', 'read_only_mode', (['env'], {}), '(env)\n', (661, 666), False, 'from olympia.lib.settings_base import read_only_mode\n'), ((795, 840), 'django.db.models.signals.pre_save.disconnect', 'models.signals.pre_save.disconnect', (['_db_error'], {}), '(_db_error)\n', (829, 840), False, 'from django.db import models\n'), ((845, 892), 'django.db.models.signals.pre_delete.disconnect', 'models.signals.pre_delete.disconnect', (['_db_error'], {}), '(_db_error)\n', (881, 892), False, 'from django.db import models\n'), ((301, 363), 'MySQLdb.OperationalError', 'mysql.OperationalError', (['"""You can\'t do this in read-only mode."""'], {}), '("You can\'t do this in read-only mode.")\n', (323, 363), True, 'import MySQLdb as mysql\n'), ((939, 976), 'pytest.raises', 'pytest.raises', (['mysql.OperationalError'], {}), '(mysql.OperationalError)\n', (952, 976), False, 'import pytest\n'), ((986, 1013), 'olympia.addons.models.Addon.objects.create', 'Addon.objects.create', ([], {'id': '(12)'}), '(id=12)\n', (1006, 1013), False, 'from olympia.addons.models import Addon\n'), ((1438, 1472), 'olympia.amo.tests.reverse_ns', 'reverse_ns', (['"""abusereportuser-list"""'], {}), "('abusereportuser-list')\n", (1448, 1472), False, 'from olympia.amo.tests import reverse_ns\n'), ((1160, 1180), 'pyquery.PyQuery', 'pq', (['response.content'], {}), '(response.content)\n', (1162, 1180), True, 'from pyquery import PyQuery as pq\n')]
|
##############################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the "license" file accompanying this file. This file is #
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #
# KIND, express or implied. See the License for the specific language #
# governing permissions and limitations under the License. #
##############################################################################
from moto import mock_ssm
from utils.logger import Logger
from manifest.cfn_params_handler import CFNParamsHandler
from aws.services.ssm import SSM
log_level = 'info'
logger = Logger(loglevel=log_level)
cph = CFNParamsHandler(logger)
ssm = SSM(logger)
def test_update_alfred_ssm():
keyword_ssm = 'alfred_ssm_not_exist_alfred_ssm'
value_ssm = 'parameter_store_value'
value_ssm, param_flag = cph._update_alfred_ssm(
keyword_ssm, value_ssm, False)
assert param_flag is True
@mock_ssm
def test_update_alfred_genkeypair():
ssm.put_parameter('testkeyname', 'testvalue', 'A test parameter', 'String')
param = {
"ssm_parameters": [
{
"name": "keymaterial",
"value": "$[keymaterial]"
},
{
"name": "keyfingerprint",
"value": "$[keyfingerprint]"
},
{
"name": "testkeyname",
"value": "$[keyname]"
}
]
}
account = 1234567890
region = 'us-east-1'
value = cph._update_alfred_genkeypair(param, account, region)
assert value == 'testvalue'
@mock_ssm
def test_update_alfred_genpass():
ssm.put_parameter('testkeyname', 'testvalue', 'A test parameter', 'String')
param = {
"ssm_parameters": [
{
"name": "testkeyname",
"value": "$[password]"
}
]
}
keyword = 'alfred_genpass_10'
value = ''
value = cph._update_alfred_genpass(keyword, param)
assert value == '_get_ssm_secure_string_testkeyname'
@mock_ssm
def test_update_alfred_genaz():
ssm.put_parameter('testkeyname', 'testvalue', 'A test parameter', 'String')
param = {
"ssm_parameters": [
{
"name": "testkeyname",
"value": "$[az]"
}
]
}
keyword = 'alfred_genaz_1'
account = 1234567890
region = 'us-east-1'
value = ''
value = cph._update_alfred_genaz(keyword, param, account, region)
assert value == 'testvalue'
@mock_ssm
def test_random_password():
ssm.put_parameter('testkeyname', 'testvalue', 'A test parameter', 'String')
length = 10
key_password = '<PASSWORD>'
alphanum = False
value = cph.random_password(length, key_password, alphanum)
assert value == '_get_ssm_secure_string_testkeyname'
|
[
"manifest.cfn_params_handler.CFNParamsHandler",
"utils.logger.Logger",
"aws.services.ssm.SSM"
] |
[((1283, 1309), 'utils.logger.Logger', 'Logger', ([], {'loglevel': 'log_level'}), '(loglevel=log_level)\n', (1289, 1309), False, 'from utils.logger import Logger\n'), ((1317, 1341), 'manifest.cfn_params_handler.CFNParamsHandler', 'CFNParamsHandler', (['logger'], {}), '(logger)\n', (1333, 1341), False, 'from manifest.cfn_params_handler import CFNParamsHandler\n'), ((1348, 1359), 'aws.services.ssm.SSM', 'SSM', (['logger'], {}), '(logger)\n', (1351, 1359), False, 'from aws.services.ssm import SSM\n')]
|
import datetime
import json
import requests
def send_message(
webhook_url: str,
content_msg="",
title="",
title_url="",
color=00000000,
timestamp=datetime.datetime.now().isoformat(),
footer_icon="",
footer="",
thumbnail_url="",
author="",
author_url="",
author_icon_url="",
text_name="",
text="",
):
payload = {
"content": content_msg,
"embeds": [
{
"title": title,
"url": title_url,
"color": color,
"timestamp": timestamp,
"footer": {
"icon_url": footer_icon,
"text": footer,
},
"thumbnail": {"url": thumbnail_url},
"author": {
"name": author,
"url": author_url,
"icon_url": author_icon_url,
},
"fields": [
{
"name": text_name,
"value": text,
}
],
}
],
}
print(">> Sending To WebHook...")
payload = json.dumps(payload)
headers = {"Content-Type": "application/json"}
response = requests.post(webhook_url, headers=headers, data=payload)
return response
def example_calling():
webhook_url = "your_webhook_url"
response = send_message(
webhook_url,
content_msg="Some random text",
title="Discord Embed example",
title_url="https://discordjs.guide/popular-topics/embeds.html#embed-preview",
color=15335679,
footer_icon="https://github.githubassets.com/favicons/favicon-dark.png",
footer="May the Force be with you",
thumbnail_url="https://avatars.githubusercontent.com/u/55619686",
author="OjusWiZard",
author_url="https://github.com/OjusWiZard/",
author_icon_url="https://avatars.githubusercontent.com/u/55619686",
text_name=":point_down: :point_down: :point_down:",
text="This is a test message",
)
print("Status: ", response.status_code)
if __name__ == "__main__":
example_calling()
|
[
"requests.post",
"datetime.datetime.now",
"json.dumps"
] |
[((1186, 1205), 'json.dumps', 'json.dumps', (['payload'], {}), '(payload)\n', (1196, 1205), False, 'import json\n'), ((1272, 1329), 'requests.post', 'requests.post', (['webhook_url'], {'headers': 'headers', 'data': 'payload'}), '(webhook_url, headers=headers, data=payload)\n', (1285, 1329), False, 'import requests\n'), ((172, 195), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (193, 195), False, 'import datetime\n')]
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from collections import OrderedDict, namedtuple
from inspect import Signature, signature
import logging
import sys
from threading import Lock
from django.http import Http404
from django.utils import six
from django.conf import urls as django_urls
import wrapt
from django.utils.module_loading import import_string
logger = logging.getLogger(__name__)
class TypeError404(Http404):
pass
def to_int(value):
try:
return int(value)
except ValueError:
raise TypeError404("Could not convert {v!r} to an integer".format(v=value))
def noop(value):
return value
class apply_named_typecasts(object):
__slots__ = (
# 'unnamed_typecasts',
# 'named_typecasts',
'converters',
'signature',
)
def __init__(self, signature, unnamed_args, named_args, ignores):
params = signature.parameters
bindables = [[k, None] for k, v in params.items()
if v.kind == v.POSITIONAL_OR_KEYWORD]
# remove `request`
request = ['request', None]
varself = ['self', None]
if request in bindables:
bindables.remove(request)
if varself in bindables:
bindables.remove(varself)
import pdb; pdb.set_trace()
# replaces None with actual transformers
# for index, unnamed_arg in enumerate(unnamed_args):
# assert callable(unnamed_arg), "Not a callable"
# bindables[index][1] = unnamed_arg
final_args = OrderedDict(bindables)
# Apply named argument handlers
for named_arg, named_arg_caster in named_args.items():
if named_arg in final_args and final_args[named_arg] is None:
final_args[named_arg] = named_arg_caster
self.converters = final_args
self.signature = signature
@wrapt.decorator
def __call__(self, wrapped, instance, args, kwargs):
import pdb; pdb.set_trace()
params = self.signature.bind(*args, **kwargs).arguments
def convert(k, v):
converters = self.converters
if k in converters and callable(converters[k]):
return converters[k](v)
return v
all_args = OrderedDict((k, convert(k,v)) for k, v in params.items())
# defined = wrapped.__code__.co_argcount
# unwrap = namedtuple('unwrap', wrapped.__code__.co_varnames[:5])(*args, **kwargs)
# converters = self.converters
# all_args = {k: converters[k](v) if k in converters and converters[k] is not None else v
# for k, v in unwrap._asdict().items()}
return wrapped(**all_args)
class UrlPower(object):
__slots__ = (
'named_typecasts',
'_lock',
)
def __init__(self, named_typecasts=None):
self.named_typecasts = {}
self._lock = Lock()
if named_typecasts is not None:
self.register(named_typecasts=named_typecasts)
def register(self, named_typecasts):
"""
>>> x = UrlPower()
>>> def somefunc(value): return 2
>>> x.register({'test': int, 'custom': somefunc)})
"""
self._lock.acquire()
try:
for k, v in named_typecasts.items():
if isinstance(v, six.text_type):
v = import_string(dotted_path=v)
self.named_typecasts[k] = v
finally:
self._lock.release()
def include(self, *args, **kwargs):
original_include = django_urls.include(*args, **kwargs)
return original_include
def url(self, regex, view, kwargs=None, name=None, prefix='',
unnamed_args=None, named_args=None, ignores=()):
# pass through to normal handler.
if unnamed_args is None and named_args is None:
return django_urls.url(regex=regex, view=view, kwargs=kwargs,
name=name, prefix=prefix)
sig = signature(view)
view = apply_named_typecasts(signature=sig,
unnamed_args=unnamed_args or (),
named_args=named_args or {},
ignores=ignores)(view)
original_url = django_urls.url(regex=regex, view=view, kwargs=kwargs,
name=name, prefix=prefix)
return original_url
urlpower = UrlPower()
include = urlpower.include
url = urlpower.url
|
[
"django.utils.module_loading.import_string",
"django.conf.urls.include",
"threading.Lock",
"inspect.signature",
"pdb.set_trace",
"django.conf.urls.url",
"collections.OrderedDict",
"logging.getLogger"
] |
[((388, 415), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (405, 415), False, 'import logging\n'), ((1306, 1321), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (1319, 1321), False, 'import pdb\n'), ((1563, 1585), 'collections.OrderedDict', 'OrderedDict', (['bindables'], {}), '(bindables)\n', (1574, 1585), False, 'from collections import OrderedDict, namedtuple\n'), ((1991, 2006), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (2004, 2006), False, 'import pdb\n'), ((2905, 2911), 'threading.Lock', 'Lock', ([], {}), '()\n', (2909, 2911), False, 'from threading import Lock\n'), ((3560, 3596), 'django.conf.urls.include', 'django_urls.include', (['*args'], {}), '(*args, **kwargs)\n', (3579, 3596), True, 'from django.conf import urls as django_urls\n'), ((4004, 4019), 'inspect.signature', 'signature', (['view'], {}), '(view)\n', (4013, 4019), False, 'from inspect import Signature, signature\n'), ((4292, 4377), 'django.conf.urls.url', 'django_urls.url', ([], {'regex': 'regex', 'view': 'view', 'kwargs': 'kwargs', 'name': 'name', 'prefix': 'prefix'}), '(regex=regex, view=view, kwargs=kwargs, name=name, prefix=prefix\n )\n', (4307, 4377), True, 'from django.conf import urls as django_urls\n'), ((3874, 3959), 'django.conf.urls.url', 'django_urls.url', ([], {'regex': 'regex', 'view': 'view', 'kwargs': 'kwargs', 'name': 'name', 'prefix': 'prefix'}), '(regex=regex, view=view, kwargs=kwargs, name=name, prefix=prefix\n )\n', (3889, 3959), True, 'from django.conf import urls as django_urls\n'), ((3369, 3397), 'django.utils.module_loading.import_string', 'import_string', ([], {'dotted_path': 'v'}), '(dotted_path=v)\n', (3382, 3397), False, 'from django.utils.module_loading import import_string\n')]
|
from typing import List, Dict
import matplotlib.pyplot as plt
import numpy as np
from mushroom_rl.algorithms.value.td.q_learning import QLearning
from mushroom_rl.core import Core, Agent, Environment
from mushroom_rl.policy import EpsGreedy
from mushroom_rl.utils.dataset import compute_J
from mushroom_rl.utils.parameters import Parameter
from mdp.algo.model_free.env.deep_sea import DeepSea
from mdp.algo.model_free.g_learning import GLearning
from mdp.algo.model_free.mirl import MIRL
from mdp.algo.model_free.psi_learning import PsiLearning
from mdp.experiment.model_free import Experiment
def experiment_deepsea(agent: Agent, env: Environment, n_episodes: int, k: int) -> List[np.ndarray]:
reward_k = list()
for seed in range(k):
# Set the seed
np.random.seed(seed)
# Reinforcement learning experiment
core = Core(agent, env)
# Train
core.learn(n_episodes=n_episodes, n_steps_per_fit=1, render=False, quiet=True)
# Evaluate results for n_episodes
dataset_q = core.evaluate(n_episodes=1, render=False, quiet=True)
# Compute the average objective value
r = np.mean(compute_J(dataset_q, 1))
reward_k.append(r)
return reward_k
def run():
max_steps = 7
steps = list()
k = 25
n_episodes = 100
agents = dict(
q=QLearning,
psi=PsiLearning,
g=GLearning,
mirl=MIRL
)
q = [10, 50, 90]
labels: map[List[str]] = map(lambda l: [f'{l}_median', f'{l}_10:90'], agents.keys())
markers = ['o', '^', '>', '<']
alphas = [.3, .25, .2, .15]
rewards: Dict[str, List[List[np.ndarray]]] = dict()
for key in agents.keys():
l_q = list()
for _ in q:
l_q.append(list())
rewards[key] = l_q
best_reward = list()
for exponent in range(1, max_steps + 1):
size = np.power(2, exponent)
steps.append(size)
print('Step: {}, size: {}'.format(exponent, size))
# Create the grid environment
env = DeepSea(size, start=(0, 0), goal=(size - 1, size - 1))
# Use an epsilon-greedy policy
epsilon = .1
pi = EpsGreedy(epsilon=epsilon)
learning_rate = Parameter(.1 / 10)
for key, value in agents.items():
agent = value(env.info, pi, learning_rate=learning_rate)
reward_k = experiment_deepsea(agent, env, n_episodes, k)
# q_p10, q_p50, q_p90
q_p = np.percentile(reward_k, q)
reward_list = rewards[key]
for r_i, q_pi in zip(reward_list, q_p):
r_i.append(q_pi)
sum_reward = 0
for j in range(size - 2):
sum_reward -= 1 ** j * (0.01 / size)
best_reward.append(1 + (0.01 / size) + sum_reward)
steps = np.array(steps)
for label, marker, alpha, key in zip(labels, markers, alphas, agents.keys()):
q_p10, q_p50, q_p90 = rewards[key]
plt.plot(steps, np.array(q_p50), marker=marker, label=label[0])
plt.fill_between(steps, q_p10, q_p90, alpha=alpha)
plt.plot(steps, best_reward, label='Best reward')
plt.xlabel('Size of gridworld')
plt.ylabel('Cumulative average reward after 100 episodes')
plt.title('Deep Sea Experiment')
plt.legend()
plt.tight_layout()
plt.grid(True)
plt.show()
if __name__ == '__main__':
result, time = Experiment.benchmark(run)
print(time)
|
[
"matplotlib.pyplot.title",
"mdp.algo.model_free.env.deep_sea.DeepSea",
"numpy.random.seed",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.tight_layout",
"mushroom_rl.utils.dataset.compute_J",
"numpy.power",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"numpy.percentile",
"mushroom_rl.core.Core",
"matplotlib.pyplot.ylabel",
"mushroom_rl.policy.EpsGreedy",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.plot",
"mushroom_rl.utils.parameters.Parameter",
"numpy.array",
"mdp.experiment.model_free.Experiment.benchmark",
"matplotlib.pyplot.xlabel"
] |
[((2804, 2819), 'numpy.array', 'np.array', (['steps'], {}), '(steps)\n', (2812, 2819), True, 'import numpy as np\n'), ((3081, 3130), 'matplotlib.pyplot.plot', 'plt.plot', (['steps', 'best_reward'], {'label': '"""Best reward"""'}), "(steps, best_reward, label='Best reward')\n", (3089, 3130), True, 'import matplotlib.pyplot as plt\n'), ((3135, 3166), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Size of gridworld"""'], {}), "('Size of gridworld')\n", (3145, 3166), True, 'import matplotlib.pyplot as plt\n'), ((3171, 3229), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cumulative average reward after 100 episodes"""'], {}), "('Cumulative average reward after 100 episodes')\n", (3181, 3229), True, 'import matplotlib.pyplot as plt\n'), ((3234, 3266), 'matplotlib.pyplot.title', 'plt.title', (['"""Deep Sea Experiment"""'], {}), "('Deep Sea Experiment')\n", (3243, 3266), True, 'import matplotlib.pyplot as plt\n'), ((3271, 3283), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3281, 3283), True, 'import matplotlib.pyplot as plt\n'), ((3288, 3306), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3304, 3306), True, 'import matplotlib.pyplot as plt\n'), ((3311, 3325), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (3319, 3325), True, 'import matplotlib.pyplot as plt\n'), ((3330, 3340), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3338, 3340), True, 'import matplotlib.pyplot as plt\n'), ((3389, 3414), 'mdp.experiment.model_free.Experiment.benchmark', 'Experiment.benchmark', (['run'], {}), '(run)\n', (3409, 3414), False, 'from mdp.experiment.model_free import Experiment\n'), ((778, 798), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (792, 798), True, 'import numpy as np\n'), ((859, 875), 'mushroom_rl.core.Core', 'Core', (['agent', 'env'], {}), '(agent, env)\n', (863, 875), False, 'from mushroom_rl.core import Core, Agent, Environment\n'), ((1879, 1900), 'numpy.power', 'np.power', (['(2)', 'exponent'], {}), '(2, exponent)\n', (1887, 1900), True, 'import numpy as np\n'), ((2040, 2094), 'mdp.algo.model_free.env.deep_sea.DeepSea', 'DeepSea', (['size'], {'start': '(0, 0)', 'goal': '(size - 1, size - 1)'}), '(size, start=(0, 0), goal=(size - 1, size - 1))\n', (2047, 2094), False, 'from mdp.algo.model_free.env.deep_sea import DeepSea\n'), ((2169, 2195), 'mushroom_rl.policy.EpsGreedy', 'EpsGreedy', ([], {'epsilon': 'epsilon'}), '(epsilon=epsilon)\n', (2178, 2195), False, 'from mushroom_rl.policy import EpsGreedy\n'), ((2221, 2240), 'mushroom_rl.utils.parameters.Parameter', 'Parameter', (['(0.1 / 10)'], {}), '(0.1 / 10)\n', (2230, 2240), False, 'from mushroom_rl.utils.parameters import Parameter\n'), ((3025, 3075), 'matplotlib.pyplot.fill_between', 'plt.fill_between', (['steps', 'q_p10', 'q_p90'], {'alpha': 'alpha'}), '(steps, q_p10, q_p90, alpha=alpha)\n', (3041, 3075), True, 'import matplotlib.pyplot as plt\n'), ((1161, 1184), 'mushroom_rl.utils.dataset.compute_J', 'compute_J', (['dataset_q', '(1)'], {}), '(dataset_q, 1)\n', (1170, 1184), False, 'from mushroom_rl.utils.dataset import compute_J\n'), ((2473, 2499), 'numpy.percentile', 'np.percentile', (['reward_k', 'q'], {}), '(reward_k, q)\n', (2486, 2499), True, 'import numpy as np\n'), ((2969, 2984), 'numpy.array', 'np.array', (['q_p50'], {}), '(q_p50)\n', (2977, 2984), True, 'import numpy as np\n')]
|
"""
Project: python_assessment_3
Author: <NAME>. <<EMAIL>>
Created at: 10/11/2020 7:34 pm
File: client.py
"""
import socket
from colorama import Fore, Style
def request(question: str, host: str, port: int):
"""Creates a client socket and requests an answer from the server based on the provided question.
:param question: Question to be sent to the server
:param host: Server host
:param port: Server port
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((host, port))
print(f'[CLIENT] Connected...sending this question: {question}')
s.sendall(bytes(question, encoding='utf-8'))
data = s.recv(1024)
print(f'{Fore.GREEN}[CLIENT] Received answer from server: {data.decode(encoding="utf-8")}')
print(Style.RESET_ALL)
|
[
"socket.socket"
] |
[((443, 492), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (456, 492), False, 'import socket\n')]
|
# Copyright 2019 <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import heapq
import os
import random
import sys
import time
import math
import warnings
warnings.simplefilter(action='ignore', category=UserWarning)
import gym
from gym import spaces
from gym.envs.registration import register
from gym.utils import seeding
import numpy as np
from common import sender_obs
from common.utils import pcc_aurora_reward, read_json_file
from simulator.trace import Trace
import pandas as pd
MAX_CWND = 5000
MIN_CWND = 4
MAX_RATE = 20000
MIN_RATE = 5
REWARD_SCALE = 0.001
EVENT_TYPE_SEND = 'S'
EVENT_TYPE_ACK = 'A'
BYTES_PER_PACKET = 1500
LATENCY_PENALTY = 1.0
LOSS_PENALTY = 1.0
USE_LATENCY_NOISE = True
MAX_LATENCY_NOISE = 1.1
# DEBUG = True
DEBUG = False
MI_RTT_PROPORTION = 1.0
# PACKET_LOG_FLAG = False
PACKET_LOG_FLAG = True
def debug_print(msg):
if DEBUG:
print(msg, file=sys.stderr, flush=True)
class EmuReplay:
def __init__(self, ):
df = pd.read_csv('aurora_emulation_log.csv')
self.ts = df['timestamp'].tolist()
self.send_rate = df['send_rate'].tolist()
self.idx = 0
def get_ts(self):
if self.idx > len(self.ts):
self.idx = len(self.ts) -1
ts = self.ts[self.idx]
self.idx += 1
return ts
def get_rate(self):
return self.send_rate[self.idx] / 8 / BYTES_PER_PACKET
def reset(self):
self.idx = 0
class Link():
def __init__(self, trace: Trace):
self.trace = trace
self.queue_delay = 0.0
self.queue_delay_update_time = 0.0
self.queue_size = self.trace.get_queue_size()
self.pkt_in_queue = 0
def get_cur_queue_delay(self, event_time):
self.pkt_in_queue = max(0, self.pkt_in_queue -
(event_time - self.queue_delay_update_time) *
self.get_bandwidth(event_time))
self.queue_delay_update_time = event_time
cur_queue_delay = math.ceil(
self.pkt_in_queue) / self.get_bandwidth(event_time)
return cur_queue_delay
def get_cur_latency(self, event_time):
q_delay = self.get_cur_queue_delay(event_time)
# print('queue delay: ', q_delay)
return self.trace.get_delay(event_time) / 1000.0 + q_delay
def packet_enters_link(self, event_time):
if (random.random() < self.trace.get_loss_rate()):
return False
self.queue_delay = self.get_cur_queue_delay(event_time)
extra_delay = 1.0 / self.get_bandwidth(event_time)
if 1 + math.ceil(self.pkt_in_queue) > self.queue_size:
# print("{}\tDrop!".format(event_time))
return False
self.queue_delay += extra_delay
self.pkt_in_queue += 1
return True
def print_debug(self):
print("Link:")
# TODO: Do not use timestamp 0.
print("Bandwidth: %.3fMbps" % (self.trace.get_bandwidth(0)))
# TODO: Do not use timestamp 0.
print("Delay: %.3fms" % (self.trace.get_delay(0)))
print("Queue Delay: %.3fms" % (self.queue_delay * 1000))
print("One Packet Queue Delay: %.3fms" % (
1000.0 * 1 / (self.trace.get_bandwidth(0) * 1e6 / 8 / BYTES_PER_PACKET)))
print("Queue size: %dpackets" % self.queue_size)
print("Loss: %.4f" % self.trace.get_loss_rate())
def reset(self):
self.queue_delay = 0.0
self.queue_delay_update_time = 0.0
self.pkt_in_queue = 0
def get_bandwidth(self, ts):
return self.trace.get_bandwidth(ts) * 1e6 / 8 / BYTES_PER_PACKET
class Network():
def __init__(self, senders, links, env):
self.event_count = 0
self.q = []
self.cur_time = 0.0
self.senders = senders
self.links = links
self.queue_initial_packets()
self.env = env
self.pkt_log = []
def queue_initial_packets(self):
for sender in self.senders:
sender.register_network(self)
sender.reset_obs()
heapq.heappush(self.q, (0, sender, EVENT_TYPE_SEND,
0, 0.0, False, self.event_count, sender.rto, 0))
self.event_count += 1
def reset(self):
self.pkt_log = []
self.cur_time = 0.0
self.q = []
[link.reset() for link in self.links]
[sender.reset() for sender in self.senders]
self.queue_initial_packets()
def get_cur_time(self):
return self.cur_time
def run_for_dur(self, dur, action=None):
# if self.cur_time > 1.75:
# pass
# else:
# self.senders[0].rate = self.env.replay.get_rate()
# dur = self.env.replay.get_ts() - self.cur_time
end_time = min(self.cur_time + dur, self.env.current_trace.timestamps[-1])
debug_print('MI from {} to {}, dur {}'.format(
self.cur_time, end_time, dur))
for sender in self.senders:
sender.reset_obs()
while True:
event_time, sender, event_type, next_hop, cur_latency, dropped, \
event_id, rto, event_queue_delay = self.q[0]
if event_time >= end_time:
self.cur_time = end_time
break
event_time, sender, event_type, next_hop, cur_latency, dropped, \
event_id, rto, event_queue_delay = heapq.heappop(self.q)
self.cur_time = event_time
new_event_time = event_time
new_event_type = event_type
new_next_hop = next_hop
new_latency = cur_latency
new_dropped = dropped
new_event_queue_delay = event_queue_delay
push_new_event = False
debug_print("Got %d event %s, to link %d, latency %f at time %f, "
"next_hop %d, dropped %s, event_q length %f, "
"sender rate %f, duration: %f, queue_size: %f, "
"rto: %f, cwnd: %f, ssthresh: %f, sender rto %f, "
"pkt in flight %d, wait time %d" % (
event_id, event_type, next_hop, cur_latency,
event_time, next_hop, dropped, len(self.q),
sender.rate, dur, self.links[0].queue_size,
rto, sender.cwnd, sender.ssthresh, sender.rto,
int(sender.bytes_in_flight/BYTES_PER_PACKET),
sender.pkt_loss_wait_time))
if event_type == EVENT_TYPE_ACK:
if next_hop == len(sender.path):
# if cur_latency > 1.0:
# sender.timeout(cur_latency)
# sender.on_packet_lost(cur_latency)
if rto >= 0 and cur_latency > rto and sender.pkt_loss_wait_time <= 0:
sender.timeout()
dropped = True
new_dropped = True
elif dropped:
sender.on_packet_lost(cur_latency)
if PACKET_LOG_FLAG:
self.pkt_log.append([self.cur_time, event_id, 'lost',
BYTES_PER_PACKET])
else:
sender.on_packet_acked(cur_latency)
debug_print('Ack packet at {}'.format(self.cur_time))
# log packet acked
if PACKET_LOG_FLAG:
self.pkt_log.append([self.cur_time, event_id, 'acked',
BYTES_PER_PACKET, cur_latency,
event_queue_delay])
else:
new_next_hop = next_hop + 1
new_event_queue_delay += sender.path[next_hop].get_cur_queue_delay(
self.cur_time)
link_latency = sender.path[next_hop].get_cur_latency(
self.cur_time)
# link_latency *= self.env.current_trace.get_delay_noise_replay(self.cur_time)
# if USE_LATENCY_NOISE:
# link_latency *= random.uniform(1.0, MAX_LATENCY_NOISE)
new_latency += link_latency
new_event_time += link_latency
push_new_event = True
elif event_type == EVENT_TYPE_SEND:
if next_hop == 0:
if sender.can_send_packet():
sender.on_packet_sent()
# print('Send packet at {}'.format(self.cur_time))
if PACKET_LOG_FLAG:
self.pkt_log.append([self.cur_time, event_id, 'sent',
BYTES_PER_PACKET])
push_new_event = True
heapq.heappush(self.q, (self.cur_time + (1.0 / sender.rate),
sender, EVENT_TYPE_SEND, 0, 0.0,
False, self.event_count, sender.rto,
0))
self.event_count += 1
else:
push_new_event = True
if next_hop == sender.dest:
new_event_type = EVENT_TYPE_ACK
new_next_hop = next_hop + 1
new_event_queue_delay += sender.path[next_hop].get_cur_queue_delay(
self.cur_time)
link_latency = sender.path[next_hop].get_cur_latency(
self.cur_time)
# if USE_LATENCY_NOISE:
# link_latency *= random.uniform(1.0, MAX_LATENCY_NOISE)
# link_latency += self.env.current_trace.get_delay_noise(self.cur_time) / 1000
# link_latency *= self.env.current_trace.get_delay_noise_replay(self.cur_time)
new_latency += link_latency
new_event_time += link_latency
new_dropped = not sender.path[next_hop].packet_enters_link(
self.cur_time)
if not new_dropped:
sender.queue_delay_samples.append(new_event_queue_delay)
if push_new_event:
heapq.heappush(self.q, (new_event_time, sender, new_event_type,
new_next_hop, new_latency, new_dropped,
event_id, rto, new_event_queue_delay))
for sender in self.senders:
sender.record_run()
sender_mi = self.senders[0].get_run_data()
throughput = sender_mi.get("recv rate") # bits/sec
latency = sender_mi.get("avg latency") # second
loss = sender_mi.get("loss ratio")
debug_print("thpt %f, delay %f, loss %f, bytes sent %f, bytes acked %f" % (
throughput/1e6, latency, loss, sender_mi.bytes_sent, sender_mi.bytes_acked))
reward = pcc_aurora_reward(
throughput / 8 / BYTES_PER_PACKET, latency, loss,
np.mean(self.env.current_trace.bandwidths) * 1e6 / 8 / BYTES_PER_PACKET)
if latency > 0.0:
self.env.run_dur = MI_RTT_PROPORTION * sender_mi.get("avg latency") + (1 / self.links[0].get_bandwidth(self.cur_time))
# self.env.run_dur = max(MI_RTT_PROPORTION * sender_mi.get("avg latency"), 5 * (1 / self.senders[0].rate))
# print(self.env.run_dur)
return reward * REWARD_SCALE
class Sender():
def __init__(self, rate, path, dest, features, cwnd=25, history_len=10,
delta_scale=1):
self.id = Sender._get_next_id()
self.delta_scale = delta_scale
self.starting_rate = rate
self.rate = rate
self.sent = 0
self.acked = 0
self.lost = 0
self.bytes_in_flight = 0
self.min_latency = None
self.rtt_samples = []
self.queue_delay_samples = []
self.prev_rtt_samples = self.rtt_samples
self.sample_time = []
self.net = None
self.path = path
self.dest = dest
self.history_len = history_len
self.features = features
self.history = sender_obs.SenderHistory(self.history_len,
self.features, self.id)
self.cwnd = cwnd
self.use_cwnd = False
self.rto = -1
self.ssthresh = 0
self.pkt_loss_wait_time = -1
self.estRTT = 1000000 / 1e6 # SynInterval in emulation
self.RTTVar = self.estRTT / 2 # RTT variance
# self.got_data = False
_next_id = 1
def _get_next_id():
result = Sender._next_id
Sender._next_id += 1
return result
def apply_rate_delta(self, delta):
# if self.got_data:
delta *= self.delta_scale
#print("Applying delta %f" % delta)
if delta >= 0.0:
self.set_rate(self.rate * (1.0 + delta))
else:
self.set_rate(self.rate / (1.0 - delta))
def apply_cwnd_delta(self, delta):
delta *= self.delta_scale
#print("Applying delta %f" % delta)
if delta >= 0.0:
self.set_cwnd(self.cwnd * (1.0 + delta))
else:
self.set_cwnd(self.cwnd / (1.0 - delta))
def can_send_packet(self):
if self.use_cwnd:
return int(self.bytes_in_flight) / BYTES_PER_PACKET < self.cwnd
else:
return True
def register_network(self, net):
self.net = net
def on_packet_sent(self):
self.sent += 1
self.bytes_in_flight += BYTES_PER_PACKET
def on_packet_acked(self, rtt):
self.estRTT = (7.0 * self.estRTT + rtt) / 8.0 # RTT of emulation way
self.RTTVar = (self.RTTVar * 7.0 + abs(rtt - self.estRTT) * 1.0) / 8.0
self.acked += 1
self.rtt_samples.append(rtt)
# self.rtt_samples.append(self.estRTT)
if (self.min_latency is None) or (rtt < self.min_latency):
self.min_latency = rtt
self.bytes_in_flight -= BYTES_PER_PACKET
def on_packet_lost(self, rtt):
self.lost += 1
self.bytes_in_flight -= BYTES_PER_PACKET
def set_rate(self, new_rate):
self.rate = new_rate
# print("Attempt to set new rate to %f (min %f, max %f)" % (new_rate, MIN_RATE, MAX_RATE))
if self.rate > MAX_RATE:
self.rate = MAX_RATE
if self.rate < MIN_RATE:
self.rate = MIN_RATE
def set_cwnd(self, new_cwnd):
self.cwnd = int(new_cwnd)
#print("Attempt to set new rate to %f (min %f, max %f)" % (new_rate, MIN_RATE, MAX_RATE))
# if self.cwnd > MAX_CWND:
# self.cwnd = MAX_CWND
# if self.cwnd < MIN_CWND:
# self.cwnd = MIN_CWND
def record_run(self):
smi = self.get_run_data()
# if not self.got_data and smi.rtt_samples:
# self.got_data = True
# self.history.step(smi)
# else:
self.history.step(smi)
def get_obs(self):
return self.history.as_array()
def get_run_data(self):
obs_end_time = self.net.get_cur_time()
#obs_dur = obs_end_time - self.obs_start_time
#print("Got %d acks in %f seconds" % (self.acked, obs_dur))
#print("Sent %d packets in %f seconds" % (self.sent, obs_dur))
#print("self.rate = %f" % self.rate)
# print(self.acked, self.sent)
rtt_samples = self.rtt_samples if self.rtt_samples else self.prev_rtt_samples
# if not self.rtt_samples:
# print(self.obs_start_time, obs_end_time, self.rate)
# rtt_samples is empty when there is no packet acked in MI
# Solution: inherit from previous rtt_samples.
return sender_obs.SenderMonitorInterval(
self.id,
bytes_sent=self.sent * BYTES_PER_PACKET,
bytes_acked=self.acked * BYTES_PER_PACKET,
bytes_lost=self.lost * BYTES_PER_PACKET,
send_start=self.obs_start_time,
send_end=obs_end_time,
recv_start=self.obs_start_time,
recv_end=obs_end_time,
rtt_samples=self.rtt_samples,
queue_delay_samples=self.queue_delay_samples,
packet_size=BYTES_PER_PACKET
)
def reset_obs(self):
self.sent = 0
self.acked = 0
self.lost = 0
if self.rtt_samples:
self.prev_rtt_samples = self.rtt_samples
self.rtt_samples = []
self.queue_delay_samples = []
self.obs_start_time = self.net.get_cur_time()
def print_debug(self):
print("Sender:")
print("Obs: %s" % str(self.get_obs()))
print("Rate: %f" % self.rate)
print("Sent: %d" % self.sent)
print("Acked: %d" % self.acked)
print("Lost: %d" % self.lost)
print("Min Latency: %s" % str(self.min_latency))
def reset(self):
#print("Resetting sender!")
self.rate = self.starting_rate
self.bytes_in_flight = 0
self.min_latency = None
self.reset_obs()
self.history = sender_obs.SenderHistory(self.history_len,
self.features, self.id)
self.estRTT = 1000000 / 1e6 # SynInterval in emulation
self.RTTVar = self.estRTT / 2 # RTT variance
# self.got_data = False
def timeout(self):
# placeholder
pass
class SimulatedNetworkEnv(gym.Env):
def __init__(self, traces, history_len=10,
features="sent latency inflation,latency ratio,send ratio",
congestion_control_type="aurora", train_flag=False,
delta_scale=1.0):
"""Network environment used in simulation.
congestion_control_type: aurora is pcc-rl. cubic is TCPCubic.
"""
assert congestion_control_type in {"aurora", "cubic"}, \
"Unrecognized congestion_control_type {}.".format(
congestion_control_type)
# self.replay = EmuReplay()
self.delta_scale = delta_scale
self.traces = traces
self.current_trace = np.random.choice(self.traces)
self.train_flag = train_flag
self.congestion_control_type = congestion_control_type
if self.congestion_control_type == 'aurora':
self.use_cwnd = False
elif self.congestion_control_type == 'cubic':
self.use_cwnd = True
self.history_len = history_len
# print("History length: %d" % history_len)
self.features = features.split(",")
# print("Features: %s" % str(self.features))
self.links = None
self.senders = None
self.create_new_links_and_senders()
self.net = Network(self.senders, self.links, self)
self.run_dur = None
self.run_period = 0.1
self.steps_taken = 0
self.debug_thpt_changes = False
self.last_thpt = None
self.last_rate = None
if self.use_cwnd:
self.action_space = spaces.Box(
np.array([-1e12, -1e12]), np.array([1e12, 1e12]), dtype=np.float32)
else:
self.action_space = spaces.Box(
np.array([-1e12]), np.array([1e12]), dtype=np.float32)
self.observation_space = None
# use_only_scale_free = True
single_obs_min_vec = sender_obs.get_min_obs_vector(self.features)
single_obs_max_vec = sender_obs.get_max_obs_vector(self.features)
self.observation_space = spaces.Box(np.tile(single_obs_min_vec, self.history_len),
np.tile(single_obs_max_vec,
self.history_len),
dtype=np.float32)
self.reward_sum = 0.0
self.reward_ewma = 0.0
self.episodes_run = -1
def seed(self, seed=None):
self.rand, seed = seeding.np_random(seed)
return [seed]
def _get_all_sender_obs(self):
sender_obs = self.senders[0].get_obs()
sender_obs = np.array(sender_obs).reshape(-1,)
return sender_obs
def step(self, actions):
#print("Actions: %s" % str(actions))
# print(actions)
for i in range(0, 1): # len(actions)):
#print("Updating rate for sender %d" % i)
action = actions
self.senders[i].apply_rate_delta(action[0])
if self.use_cwnd:
self.senders[i].apply_cwnd_delta(action[1])
# print("Running for %fs" % self.run_dur)
reward = self.net.run_for_dur(self.run_dur, action=actions[0])
self.steps_taken += 1
sender_obs = self._get_all_sender_obs()
should_stop = self.current_trace.is_finished(self.net.get_cur_time())
self.reward_sum += reward
# print('env step: {}s'.format(time.time() - t_start))
return sender_obs, reward, should_stop, {}
def print_debug(self):
print("---Link Debug---")
for link in self.links:
link.print_debug()
print("---Sender Debug---")
for sender in self.senders:
sender.print_debug()
def create_new_links_and_senders(self):
# self.replay.reset()
self.links = [Link(self.current_trace), Link(self.current_trace)]
if self.congestion_control_type == "aurora":
if not self.train_flag:
self.senders = [Sender( #self.replay.get_rate(),
# 2500000 / 8 /BYTES_PER_PACKET / 0.048,
# 12000000 / 8 /BYTES_PER_PACKET / 0.048,
# 10 / (self.current_trace.get_delay(0) *2/1000),
100,
[self.links[0], self.links[1]], 0,
self.features,
history_len=self.history_len,
delta_scale=self.delta_scale)]
else:
# self.senders = [Sender(random.uniform(0.3, 1.5) * bw,
# [self.links[0], self.links[1]], 0,
# self.features,
# history_len=self.history_len)]
# self.senders = [Sender(random.uniform(10/bw, 1.5) * bw,
# [self.links[0], self.links[1]], 0,
# self.features,
# history_len=self.history_len,
# delta_scale=self.delta_scale)]
self.senders = [Sender(100,
[self.links[0], self.links[1]], 0,
self.features,
history_len=self.history_len,
delta_scale=self.delta_scale)]
elif self.congestion_control_type == "cubic":
raise NotImplementedError
else:
raise RuntimeError("Unrecognized congestion_control_type {}".format(
self.congestion_control_type))
# self.run_dur = 3 * lat
# self.run_dur = 1 * lat
if not self.senders[0].rtt_samples:
# self.run_dur = 0.473
# self.run_dur = 5 / self.senders[0].rate
self.run_dur = 0.01
# self.run_dur = self.current_trace.get_delay(0) * 2 / 1000
# self.run_dur = self.replay.get_ts() - 0
def reset(self):
self.steps_taken = 0
self.net.reset()
self.current_trace = np.random.choice(self.traces)
self.current_trace.reset()
self.create_new_links_and_senders()
self.net = Network(self.senders, self.links, self)
self.episodes_run += 1
# self.replay.reset()
self.net.run_for_dur(self.run_dur)
self.reward_ewma *= 0.99
self.reward_ewma += 0.01 * self.reward_sum
# print("Reward: %0.2f, Ewma Reward: %0.2f" % (self.reward_sum, self.reward_ewma))
self.reward_sum = 0.0
return self._get_all_sender_obs()
register(id='PccNs-v0', entry_point='simulator.network:SimulatedNetworkEnv')
|
[
"warnings.simplefilter",
"heapq.heappush",
"math.ceil",
"pandas.read_csv",
"common.sender_obs.SenderMonitorInterval",
"heapq.heappop",
"random.random",
"common.sender_obs.SenderHistory",
"common.sender_obs.get_min_obs_vector",
"numpy.array",
"numpy.tile",
"numpy.mean",
"numpy.random.choice",
"common.sender_obs.get_max_obs_vector",
"gym.envs.registration.register",
"gym.utils.seeding.np_random"
] |
[((680, 740), 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'UserWarning'}), "(action='ignore', category=UserWarning)\n", (701, 740), False, 'import warnings\n'), ((24695, 24771), 'gym.envs.registration.register', 'register', ([], {'id': '"""PccNs-v0"""', 'entry_point': '"""simulator.network:SimulatedNetworkEnv"""'}), "(id='PccNs-v0', entry_point='simulator.network:SimulatedNetworkEnv')\n", (24703, 24771), False, 'from gym.envs.registration import register\n'), ((1506, 1545), 'pandas.read_csv', 'pd.read_csv', (['"""aurora_emulation_log.csv"""'], {}), "('aurora_emulation_log.csv')\n", (1517, 1545), True, 'import pandas as pd\n'), ((12785, 12851), 'common.sender_obs.SenderHistory', 'sender_obs.SenderHistory', (['self.history_len', 'self.features', 'self.id'], {}), '(self.history_len, self.features, self.id)\n', (12809, 12851), False, 'from common import sender_obs\n'), ((16345, 16750), 'common.sender_obs.SenderMonitorInterval', 'sender_obs.SenderMonitorInterval', (['self.id'], {'bytes_sent': '(self.sent * BYTES_PER_PACKET)', 'bytes_acked': '(self.acked * BYTES_PER_PACKET)', 'bytes_lost': '(self.lost * BYTES_PER_PACKET)', 'send_start': 'self.obs_start_time', 'send_end': 'obs_end_time', 'recv_start': 'self.obs_start_time', 'recv_end': 'obs_end_time', 'rtt_samples': 'self.rtt_samples', 'queue_delay_samples': 'self.queue_delay_samples', 'packet_size': 'BYTES_PER_PACKET'}), '(self.id, bytes_sent=self.sent *\n BYTES_PER_PACKET, bytes_acked=self.acked * BYTES_PER_PACKET, bytes_lost\n =self.lost * BYTES_PER_PACKET, send_start=self.obs_start_time, send_end\n =obs_end_time, recv_start=self.obs_start_time, recv_end=obs_end_time,\n rtt_samples=self.rtt_samples, queue_delay_samples=self.\n queue_delay_samples, packet_size=BYTES_PER_PACKET)\n', (16377, 16750), False, 'from common import sender_obs\n'), ((17688, 17754), 'common.sender_obs.SenderHistory', 'sender_obs.SenderHistory', (['self.history_len', 'self.features', 'self.id'], {}), '(self.history_len, self.features, self.id)\n', (17712, 17754), False, 'from common import sender_obs\n'), ((18715, 18744), 'numpy.random.choice', 'np.random.choice', (['self.traces'], {}), '(self.traces)\n', (18731, 18744), True, 'import numpy as np\n'), ((19942, 19986), 'common.sender_obs.get_min_obs_vector', 'sender_obs.get_min_obs_vector', (['self.features'], {}), '(self.features)\n', (19971, 19986), False, 'from common import sender_obs\n'), ((20016, 20060), 'common.sender_obs.get_max_obs_vector', 'sender_obs.get_max_obs_vector', (['self.features'], {}), '(self.features)\n', (20045, 20060), False, 'from common import sender_obs\n'), ((20509, 20532), 'gym.utils.seeding.np_random', 'seeding.np_random', (['seed'], {}), '(seed)\n', (20526, 20532), False, 'from gym.utils import seeding\n'), ((24174, 24203), 'numpy.random.choice', 'np.random.choice', (['self.traces'], {}), '(self.traces)\n', (24190, 24203), True, 'import numpy as np\n'), ((2522, 2550), 'math.ceil', 'math.ceil', (['self.pkt_in_queue'], {}), '(self.pkt_in_queue)\n', (2531, 2550), False, 'import math\n'), ((2895, 2910), 'random.random', 'random.random', ([], {}), '()\n', (2908, 2910), False, 'import random\n'), ((4575, 4680), 'heapq.heappush', 'heapq.heappush', (['self.q', '(0, sender, EVENT_TYPE_SEND, 0, 0.0, False, self.event_count, sender.rto, 0)'], {}), '(self.q, (0, sender, EVENT_TYPE_SEND, 0, 0.0, False, self.\n event_count, sender.rto, 0))\n', (4589, 4680), False, 'import heapq\n'), ((5914, 5935), 'heapq.heappop', 'heapq.heappop', (['self.q'], {}), '(self.q)\n', (5927, 5935), False, 'import heapq\n'), ((20105, 20150), 'numpy.tile', 'np.tile', (['single_obs_min_vec', 'self.history_len'], {}), '(single_obs_min_vec, self.history_len)\n', (20112, 20150), True, 'import numpy as np\n'), ((20196, 20241), 'numpy.tile', 'np.tile', (['single_obs_max_vec', 'self.history_len'], {}), '(single_obs_max_vec, self.history_len)\n', (20203, 20241), True, 'import numpy as np\n'), ((3105, 3133), 'math.ceil', 'math.ceil', (['self.pkt_in_queue'], {}), '(self.pkt_in_queue)\n', (3114, 3133), False, 'import math\n'), ((10865, 11015), 'heapq.heappush', 'heapq.heappush', (['self.q', '(new_event_time, sender, new_event_type, new_next_hop, new_latency,\n new_dropped, event_id, rto, new_event_queue_delay)'], {}), '(self.q, (new_event_time, sender, new_event_type,\n new_next_hop, new_latency, new_dropped, event_id, rto,\n new_event_queue_delay))\n', (10879, 11015), False, 'import heapq\n'), ((19640, 19686), 'numpy.array', 'np.array', (['[-1000000000000.0, -1000000000000.0]'], {}), '([-1000000000000.0, -1000000000000.0])\n', (19648, 19686), True, 'import numpy as np\n'), ((19666, 19710), 'numpy.array', 'np.array', (['[1000000000000.0, 1000000000000.0]'], {}), '([1000000000000.0, 1000000000000.0])\n', (19674, 19710), True, 'import numpy as np\n'), ((19782, 19810), 'numpy.array', 'np.array', (['[-1000000000000.0]'], {}), '([-1000000000000.0])\n', (19790, 19810), True, 'import numpy as np\n'), ((19801, 19828), 'numpy.array', 'np.array', (['[1000000000000.0]'], {}), '([1000000000000.0])\n', (19809, 19828), True, 'import numpy as np\n'), ((20659, 20679), 'numpy.array', 'np.array', (['sender_obs'], {}), '(sender_obs)\n', (20667, 20679), True, 'import numpy as np\n'), ((9454, 9590), 'heapq.heappush', 'heapq.heappush', (['self.q', '(self.cur_time + 1.0 / sender.rate, sender, EVENT_TYPE_SEND, 0, 0.0, False,\n self.event_count, sender.rto, 0)'], {}), '(self.q, (self.cur_time + 1.0 / sender.rate, sender,\n EVENT_TYPE_SEND, 0, 0.0, False, self.event_count, sender.rto, 0))\n', (9468, 9590), False, 'import heapq\n'), ((11650, 11692), 'numpy.mean', 'np.mean', (['self.env.current_trace.bandwidths'], {}), '(self.env.current_trace.bandwidths)\n', (11657, 11692), True, 'import numpy as np\n')]
|
"""
Copyright 2020 EUROCONTROL
==========================================
Redistribution and use in source and binary forms, with or without modification, are permitted
provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions
and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
and the following disclaimer in the documentation and/or other materials provided with the
distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF
THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
==========================================
Editorial note: this license is an instance of the BSD license template as provided by the Open
Source Initiative: http://opensource.org/licenses/BSD-3-Clause
Details on EUROCONTROL: http://www.eurocontrol.int
"""
__author__ = "EUROCONTROL (SWIM)"
import os
import pytest
from pkg_resources import resource_filename
from aixm_graph.datasets.datasets import AIXMDataSet
TEST_FILENAME = 'dataset.xml'
SKELETON_FILENAME = 'skeleton.xml'
@pytest.fixture
def test_filepath():
return resource_filename(__name__, f'../../static/{TEST_FILENAME}')
@pytest.fixture
def test_skeleton_path():
return resource_filename(__name__, f'../../static/{SKELETON_FILENAME}')
def test_dataset__name(test_filepath):
dataset = AIXMDataSet(test_filepath)
assert TEST_FILENAME == dataset.name
def test_dataset__process__features_are_retrieved(test_filepath, test_config):
dataset = AIXMDataSet(test_filepath)
dataset.process()
features = list(dataset.features)
assert len(features) > 0
for feature_name in test_config['FEATURES']:
assert feature_name in [f.name for f in features]
def test_dataset__generate_skeleton(test_filepath, test_skeleton_path, test_config):
dataset = AIXMDataSet(test_filepath)
dataset.process()
skeleton_path = dataset.generate_skeleton()
assert os.path.exists(skeleton_path)
with open(skeleton_path, 'r') as skeleton:
with open(test_skeleton_path, 'r') as test_skeleton:
assert skeleton.read() == test_skeleton.read()
os.remove(skeleton_path)
|
[
"os.remove",
"aixm_graph.datasets.datasets.AIXMDataSet",
"os.path.exists",
"pkg_resources.resource_filename"
] |
[((2048, 2108), 'pkg_resources.resource_filename', 'resource_filename', (['__name__', 'f"""../../static/{TEST_FILENAME}"""'], {}), "(__name__, f'../../static/{TEST_FILENAME}')\n", (2065, 2108), False, 'from pkg_resources import resource_filename\n'), ((2164, 2228), 'pkg_resources.resource_filename', 'resource_filename', (['__name__', 'f"""../../static/{SKELETON_FILENAME}"""'], {}), "(__name__, f'../../static/{SKELETON_FILENAME}')\n", (2181, 2228), False, 'from pkg_resources import resource_filename\n'), ((2284, 2310), 'aixm_graph.datasets.datasets.AIXMDataSet', 'AIXMDataSet', (['test_filepath'], {}), '(test_filepath)\n', (2295, 2310), False, 'from aixm_graph.datasets.datasets import AIXMDataSet\n'), ((2448, 2474), 'aixm_graph.datasets.datasets.AIXMDataSet', 'AIXMDataSet', (['test_filepath'], {}), '(test_filepath)\n', (2459, 2474), False, 'from aixm_graph.datasets.datasets import AIXMDataSet\n'), ((2776, 2802), 'aixm_graph.datasets.datasets.AIXMDataSet', 'AIXMDataSet', (['test_filepath'], {}), '(test_filepath)\n', (2787, 2802), False, 'from aixm_graph.datasets.datasets import AIXMDataSet\n'), ((2887, 2916), 'os.path.exists', 'os.path.exists', (['skeleton_path'], {}), '(skeleton_path)\n', (2901, 2916), False, 'import os\n'), ((3090, 3114), 'os.remove', 'os.remove', (['skeleton_path'], {}), '(skeleton_path)\n', (3099, 3114), False, 'import os\n')]
|
# Released under the MIT License. See LICENSE for details.
#
"""Call related functionality shared between all efro components."""
from __future__ import annotations
from typing import TYPE_CHECKING, TypeVar, Generic, Callable, cast
import functools
if TYPE_CHECKING:
from typing import Any, overload
CT = TypeVar('CT', bound=Callable)
class _CallbackCall(Generic[CT]):
"""Descriptor for exposing a call with a type defined by a TypeVar."""
def __get__(self, obj: Any, type_in: Any = None) -> CT:
return cast(CT, None)
class CallbackSet(Generic[CT]):
"""Wrangles callbacks for a particular event in a type-safe manner."""
# In the type-checker's eyes, our 'run' attr is a CallbackCall which
# returns a callable with the type we were created with. This lets us
# type-check our run calls. (Is there another way to expose a function
# with a signature defined by a generic?..)
# At runtime, run() simply passes its args verbatim to its registered
# callbacks; no types are checked.
if TYPE_CHECKING:
run: _CallbackCall[CT] = _CallbackCall()
else:
def run(self, *args, **keywds):
"""Run all callbacks."""
print('HELLO FROM RUN', *args, **keywds)
def __init__(self) -> None:
print('CallbackSet()')
def __del__(self) -> None:
print('~CallbackSet()')
def add(self, call: CT) -> None:
"""Add a callback to be run."""
print('Would add call', call)
# Define Call() which can be used in type-checking call-wrappers that behave
# similarly to functools.partial (in that they take a callable and some
# positional arguments to be passed to it).
# In type-checking land, We define several different _CallXArg classes
# corresponding to different argument counts and define Call() as an
# overloaded function which returns one of them based on how many args are
# passed.
# To use this, simply assign your call type to this Call for type checking:
# Example:
# class _MyCallWrapper:
# <runtime class defined here>
# if TYPE_CHECKING:
# MyCallWrapper = efro.call.Call
# else:
# MyCallWrapper = _MyCallWrapper
# Note that this setup currently only works with positional arguments; if you
# would like to pass args via keyword you can wrap a lambda or local function
# which takes keyword args and converts to a call containing keywords.
if TYPE_CHECKING:
In1T = TypeVar('In1T')
In2T = TypeVar('In2T')
In3T = TypeVar('In3T')
In4T = TypeVar('In4T')
In5T = TypeVar('In5T')
In6T = TypeVar('In6T')
In7T = TypeVar('In7T')
OutT = TypeVar('OutT')
class _CallNoArgs(Generic[OutT]):
"""Single argument variant of call wrapper."""
def __init__(self, _call: Callable[[], OutT]):
...
def __call__(self) -> OutT:
...
class _Call1Arg(Generic[In1T, OutT]):
"""Single argument variant of call wrapper."""
def __init__(self, _call: Callable[[In1T], OutT]):
...
def __call__(self, _arg1: In1T) -> OutT:
...
class _Call2Args(Generic[In1T, In2T, OutT]):
"""Two argument variant of call wrapper"""
def __init__(self, _call: Callable[[In1T, In2T], OutT]):
...
def __call__(self, _arg1: In1T, _arg2: In2T) -> OutT:
...
class _Call3Args(Generic[In1T, In2T, In3T, OutT]):
"""Three argument variant of call wrapper"""
def __init__(self, _call: Callable[[In1T, In2T, In3T], OutT]):
...
def __call__(self, _arg1: In1T, _arg2: In2T, _arg3: In3T) -> OutT:
...
class _Call4Args(Generic[In1T, In2T, In3T, In4T, OutT]):
"""Four argument variant of call wrapper"""
def __init__(self, _call: Callable[[In1T, In2T, In3T, In4T], OutT]):
...
def __call__(self, _arg1: In1T, _arg2: In2T, _arg3: In3T,
_arg4: In4T) -> OutT:
...
class _Call5Args(Generic[In1T, In2T, In3T, In4T, In5T, OutT]):
"""Five argument variant of call wrapper"""
def __init__(self, _call: Callable[[In1T, In2T, In3T, In4T, In5T],
OutT]):
...
def __call__(self, _arg1: In1T, _arg2: In2T, _arg3: In3T, _arg4: In4T,
_arg5: In5T) -> OutT:
...
class _Call6Args(Generic[In1T, In2T, In3T, In4T, In5T, In6T, OutT]):
"""Six argument variant of call wrapper"""
def __init__(self,
_call: Callable[[In1T, In2T, In3T, In4T, In5T, In6T],
OutT]):
...
def __call__(self, _arg1: In1T, _arg2: In2T, _arg3: In3T, _arg4: In4T,
_arg5: In5T, _arg6: In6T) -> OutT:
...
class _Call7Args(Generic[In1T, In2T, In3T, In4T, In5T, In6T, In7T, OutT]):
"""Seven argument variant of call wrapper"""
def __init__(
self,
_call: Callable[[In1T, In2T, In3T, In4T, In5T, In6T, In7T],
OutT]):
...
def __call__(self, _arg1: In1T, _arg2: In2T, _arg3: In3T, _arg4: In4T,
_arg5: In5T, _arg6: In6T, _arg7: In7T) -> OutT:
...
# No arg call; no args bundled.
# noinspection PyPep8Naming
@overload
def Call(call: Callable[[], OutT]) -> _CallNoArgs[OutT]:
...
# 1 arg call; 1 arg bundled.
# noinspection PyPep8Naming
@overload
def Call(call: Callable[[In1T], OutT], arg1: In1T) -> _CallNoArgs[OutT]:
...
# 1 arg call; no args bundled.
# noinspection PyPep8Naming
@overload
def Call(call: Callable[[In1T], OutT]) -> _Call1Arg[In1T, OutT]:
...
# 2 arg call; 2 args bundled.
# noinspection PyPep8Naming
@overload
def Call(call: Callable[[In1T, In2T], OutT], arg1: In1T,
arg2: In2T) -> _CallNoArgs[OutT]:
...
# 2 arg call; 1 arg bundled.
# noinspection PyPep8Naming
@overload
def Call(call: Callable[[In1T, In2T], OutT],
arg1: In1T) -> _Call1Arg[In2T, OutT]:
...
# 2 arg call; no args bundled.
# noinspection PyPep8Naming
@overload
def Call(
call: Callable[[In1T, In2T],
OutT]) -> _Call2Args[In1T, In2T, OutT]:
...
# 3 arg call; 3 args bundled.
# noinspection PyPep8Naming
@overload
def Call(call: Callable[[In1T, In2T, In3T], OutT], arg1: In1T, arg2: In2T,
arg3: In3T) -> _CallNoArgs[OutT]:
...
# 3 arg call; 2 args bundled.
# noinspection PyPep8Naming
@overload
def Call(call: Callable[[In1T, In2T, In3T], OutT], arg1: In1T,
arg2: In2T) -> _Call1Arg[In3T, OutT]:
...
# 3 arg call; 1 arg bundled.
# noinspection PyPep8Naming
@overload
def Call(call: Callable[[In1T, In2T, In3T], OutT],
arg1: In1T) -> _Call2Args[In2T, In3T, OutT]:
...
# 3 arg call; no args bundled.
# noinspection PyPep8Naming
@overload
def Call(
call: Callable[[In1T, In2T, In3T], OutT]
) -> _Call3Args[In1T, In2T, In3T, OutT]:
...
# 4 arg call; 4 args bundled.
# noinspection PyPep8Naming
@overload
def Call(call: Callable[[In1T, In2T, In3T, In4T], OutT], arg1: In1T,
arg2: In2T, arg3: In3T, arg4: In4T) -> _CallNoArgs[OutT]:
...
# 5 arg call; 5 args bundled.
# noinspection PyPep8Naming
@overload
def Call(call: Callable[[In1T, In2T, In3T, In4T, In5T],
OutT], arg1: In1T, arg2: In2T, arg3: In3T,
arg4: In4T, arg5: In5T) -> _CallNoArgs[OutT]:
...
# 6 arg call; 6 args bundled.
# noinspection PyPep8Naming
@overload
def Call(call: Callable[[In1T, In2T, In3T, In4T, In5T, In6T],
OutT], arg1: In1T, arg2: In2T, arg3: In3T,
arg4: In4T, arg5: In5T, arg6: In6T) -> _CallNoArgs[OutT]:
...
# 7 arg call; 7 args bundled.
# noinspection PyPep8Naming
@overload
def Call(call: Callable[[In1T, In2T, In3T, In4T, In5T, In6T, In7T], OutT],
arg1: In1T, arg2: In2T, arg3: In3T, arg4: In4T, arg5: In5T,
arg6: In6T, arg7: In7T) -> _CallNoArgs[OutT]:
...
# noinspection PyPep8Naming
def Call(*_args: Any, **_keywds: Any) -> Any:
...
# (Type-safe Partial)
# A convenient wrapper around functools.partial which adds type-safety
# (though it does not support keyword arguments).
tpartial = Call
else:
tpartial = functools.partial
|
[
"typing.cast",
"typing.TypeVar"
] |
[((313, 342), 'typing.TypeVar', 'TypeVar', (['"""CT"""'], {'bound': 'Callable'}), "('CT', bound=Callable)\n", (320, 342), False, 'from typing import TYPE_CHECKING, TypeVar, Generic, Callable, cast\n'), ((2422, 2437), 'typing.TypeVar', 'TypeVar', (['"""In1T"""'], {}), "('In1T')\n", (2429, 2437), False, 'from typing import TYPE_CHECKING, TypeVar, Generic, Callable, cast\n'), ((2449, 2464), 'typing.TypeVar', 'TypeVar', (['"""In2T"""'], {}), "('In2T')\n", (2456, 2464), False, 'from typing import TYPE_CHECKING, TypeVar, Generic, Callable, cast\n'), ((2476, 2491), 'typing.TypeVar', 'TypeVar', (['"""In3T"""'], {}), "('In3T')\n", (2483, 2491), False, 'from typing import TYPE_CHECKING, TypeVar, Generic, Callable, cast\n'), ((2503, 2518), 'typing.TypeVar', 'TypeVar', (['"""In4T"""'], {}), "('In4T')\n", (2510, 2518), False, 'from typing import TYPE_CHECKING, TypeVar, Generic, Callable, cast\n'), ((2530, 2545), 'typing.TypeVar', 'TypeVar', (['"""In5T"""'], {}), "('In5T')\n", (2537, 2545), False, 'from typing import TYPE_CHECKING, TypeVar, Generic, Callable, cast\n'), ((2557, 2572), 'typing.TypeVar', 'TypeVar', (['"""In6T"""'], {}), "('In6T')\n", (2564, 2572), False, 'from typing import TYPE_CHECKING, TypeVar, Generic, Callable, cast\n'), ((2584, 2599), 'typing.TypeVar', 'TypeVar', (['"""In7T"""'], {}), "('In7T')\n", (2591, 2599), False, 'from typing import TYPE_CHECKING, TypeVar, Generic, Callable, cast\n'), ((2611, 2626), 'typing.TypeVar', 'TypeVar', (['"""OutT"""'], {}), "('OutT')\n", (2618, 2626), False, 'from typing import TYPE_CHECKING, TypeVar, Generic, Callable, cast\n'), ((530, 544), 'typing.cast', 'cast', (['CT', 'None'], {}), '(CT, None)\n', (534, 544), False, 'from typing import TYPE_CHECKING, TypeVar, Generic, Callable, cast\n')]
|
import cv2
import time
plate_cascade =cv2.CascadeClassifier('DATA/haarcascades/india_license_plate.xml') # Loads the data required for detecting the license plates from cascade classifier.
def detect_plate(img): # the function detects and perfors blurring on the number plate.
plate_img = img.copy()
roi = img.copy()
plate_rect = plate_cascade.detectMultiScale(plate_img, scaleFactor = 1.3, minNeighbors = 7) # detects numberplates and returns the coordinates and dimensions of detected license plate's contours.
for (x,y,w,h) in plate_rect:
roi_ = roi[y:y+h, x:x+w, :] # extracting the Region of Interest of license plate for blurring.
blurred_roi = cv2.blur(roi_, ksize=(16,16)) # performing blur operation on the ROI
plate_img[y:y+h, x:x+w, :] = blurred_roi # replacing the original license plate with the blurred one.
cv2.rectangle(plate_img, (x,y), (x+w, y+h), (51,51,255), 3) # finally representing the detected contours by drawing rectangles around the edges.
return plate_img # returning the processed image.
#####################- Run this part to take input directly from camera -#####################
# cam = cv2.VideoCapture(0)
# while True:
# ret, frame = cam.read(0) # reading the input frame by frame.
# fr = detect_plate(frame) # sends each frame to the function for processing.
# cv2.imshow('video', fr) # displaying the output image.
# if(cv2.waitKey(1) & 0xFF == 27): # press 'Esc' key to exit anytime.
# break
#####################- Run this part to take input from a video file -#####################
cam = cv2.VideoCapture('car_plate_720p.mp4') # reading the video file.
while cam.isOpened():
ret, frame = cam.read() # reading the file frame by frame.
if ret == True:
# time.sleep(1/20) # change the sleep time to increase the fps of output video.
fr = detect_plate(frame) # sends each frame to the function for processing.
cv2.imshow('video', fr) # displaying the output image.
if(cv2.waitKey(1) & 0xFF == 27): # press 'Esc' key to exit anytime.
break
else:
break
##########################################
cam.release()
cv2.destroyAllWindows()
|
[
"cv2.waitKey",
"cv2.imshow",
"cv2.blur",
"cv2.VideoCapture",
"cv2.rectangle",
"cv2.CascadeClassifier",
"cv2.destroyAllWindows"
] |
[((39, 105), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""DATA/haarcascades/india_license_plate.xml"""'], {}), "('DATA/haarcascades/india_license_plate.xml')\n", (60, 105), False, 'import cv2\n'), ((1637, 1675), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""car_plate_720p.mp4"""'], {}), "('car_plate_720p.mp4')\n", (1653, 1675), False, 'import cv2\n'), ((2236, 2259), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (2257, 2259), False, 'import cv2\n'), ((699, 729), 'cv2.blur', 'cv2.blur', (['roi_'], {'ksize': '(16, 16)'}), '(roi_, ksize=(16, 16))\n', (707, 729), False, 'import cv2\n'), ((887, 953), 'cv2.rectangle', 'cv2.rectangle', (['plate_img', '(x, y)', '(x + w, y + h)', '(51, 51, 255)', '(3)'], {}), '(plate_img, (x, y), (x + w, y + h), (51, 51, 255), 3)\n', (900, 953), False, 'import cv2\n'), ((1988, 2011), 'cv2.imshow', 'cv2.imshow', (['"""video"""', 'fr'], {}), "('video', fr)\n", (1998, 2011), False, 'import cv2\n'), ((2054, 2068), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (2065, 2068), False, 'import cv2\n')]
|
# -*- coding: utf-8 -*-
from dll import DLL
class Deque(object):
"""Python Implementation of Deque Data Structure"""
def __init__(self, iter=None):
"""Constructor Function for Deque."""
self.container = DLL()
if iter:
for val in iter:
self.container.append(val)
def append(self, val):
"""Add val to the end of the Deque."""
self.container.append(val)
def appendleft(self, val):
"""Add val to the start of the Deque."""
self.container.insert(val)
def pop(self):
"""Remove and return the value at the end of the Deque."""
rtn_val = self.container.shift()
if rtn_val is not None:
return rtn_val
raise AttributeError
def popleft(self):
"""Remove and the return the value at the front of the Deque."""
rtn_val = self.container.pop()
if rtn_val is not None:
return rtn_val
raise AttributeError
def peek(self):
"""Return the next value that would be popped."""
try:
return self.container.tail.val
except AttributeError:
return None
def peekleft(self):
"""Return the next value that would popleft."""
try:
return self.container.head.val
except AttributeError:
return None
def size(self):
"""Return the size of the Deque."""
return self.container.size()
|
[
"dll.DLL"
] |
[((230, 235), 'dll.DLL', 'DLL', ([], {}), '()\n', (233, 235), False, 'from dll import DLL\n')]
|
import numpy as np
import tensorflow as tf
from .Layer import Layer
from .initializers import zeros
class RNN(Layer):
def __init__(self, output_dim,
input_dim=None,
initializer='glorot_uniform',
recurrent_initializer='orthogonal',
recurrent_activation='tanh',
length_of_sequences=None,
return_sequence=False,
initial_state=None,
rng=None):
super().__init__()
self.output_dim = output_dim
self.input_dim = input_dim
self.initializer = initializer
self.recurrent_initializer = recurrent_initializer
self.recurrent_activation = \
self.activation_initializer(recurrent_activation)
self._length_of_sequences = length_of_sequences
self._return_sequence = return_sequence
self._initial_state = initial_state
self._use_mask = False
@property
def input_shape(self):
return (self._length_of_sequences, self.input_dim)
def compile(self):
input_dim = self.input_dim
output_dim = self.output_dim
initializer = self.initializer
recurrent_initializer = self.recurrent_initializer
self.W = self.kernel_initializer(initializer,
shape=(input_dim, output_dim),
name='W')
self.W_recurrent = \
self.kernel_initializer(recurrent_initializer,
shape=(output_dim, output_dim),
name='W_recurrent')
self.b = zeros((output_dim), name='b')
self.params = [self.W, self.W_recurrent, self.b]
def forward(self, x, **kwargs):
'''
# Arguments
mask: Tensor. Mask for padded value.
recurrent: boolean (default True).
Whether to loop the input sequence.
initial_state: (default None). Override self._initial_state.
'''
def _recurrent(state, elems):
if not self._use_mask:
x = elems
else:
x = elems[0]
mask = elems[1]
h = self.recurrent_activation(tf.matmul(x, self.W)
+ tf.matmul(state, self.W_recurrent)
+ self.b)
if not self._use_mask:
return h
else:
mask = mask[:, np.newaxis]
return mask * h + (1 - mask) * state
mask = kwargs['mask'] if 'mask' in kwargs else None
self._use_mask = True if mask is not None else False
recurr = kwargs['recurrent'] if 'recurrent' in kwargs else True
if 'initial_state' in kwargs:
initial_state = kwargs['initial_state']
else:
initial_state = self._initial_state
if initial_state is None:
initial_state = \
tf.matmul(x[:, 0, :],
tf.zeros((self.input_dim, self.output_dim)))
if not recurr:
if mask is None:
states = _recurrent(initial_state, x)
else:
states = _recurrent(initial_state, [x, mask])
return states
else:
if mask is None:
states = tf.scan(fn=_recurrent,
elems=tf.transpose(x, perm=[1, 0, 2]),
initializer=initial_state)
else:
mask = tf.transpose(mask)
states = tf.scan(fn=_recurrent,
elems=[tf.transpose(x, perm=[1, 0, 2]), mask],
initializer=initial_state)
if self._return_sequence is True:
return tf.transpose(states, perm=[1, 0, 2])
else:
return states[-1]
|
[
"tensorflow.matmul",
"tensorflow.zeros",
"tensorflow.transpose"
] |
[((3080, 3123), 'tensorflow.zeros', 'tf.zeros', (['(self.input_dim, self.output_dim)'], {}), '((self.input_dim, self.output_dim))\n', (3088, 3123), True, 'import tensorflow as tf\n'), ((3603, 3621), 'tensorflow.transpose', 'tf.transpose', (['mask'], {}), '(mask)\n', (3615, 3621), True, 'import tensorflow as tf\n'), ((3880, 3916), 'tensorflow.transpose', 'tf.transpose', (['states'], {'perm': '[1, 0, 2]'}), '(states, perm=[1, 0, 2])\n', (3892, 3916), True, 'import tensorflow as tf\n'), ((2277, 2297), 'tensorflow.matmul', 'tf.matmul', (['x', 'self.W'], {}), '(x, self.W)\n', (2286, 2297), True, 'import tensorflow as tf\n'), ((2342, 2376), 'tensorflow.matmul', 'tf.matmul', (['state', 'self.W_recurrent'], {}), '(state, self.W_recurrent)\n', (2351, 2376), True, 'import tensorflow as tf\n'), ((3469, 3500), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[1, 0, 2]'}), '(x, perm=[1, 0, 2])\n', (3481, 3500), True, 'import tensorflow as tf\n'), ((3710, 3741), 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[1, 0, 2]'}), '(x, perm=[1, 0, 2])\n', (3722, 3741), True, 'import tensorflow as tf\n')]
|
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ----------------------------------------------------------------------------
#
# The spectral_norm() function is provided under the following licence:
# MIT License
#
# Copyright (c) 2018 <NAME> (1993.01.12)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
def spectral_norm(w, iteration=1):
"""
Implementation of the Spectral Normalization layer by Junho Kim at:
https://github.com/taki0112/Spectral_Normalization-Tensorflow
Args:
w:
iteration:
Returns:
"""
w_shape = w.shape.as_list()
w = tf.reshape(w, [-1, w_shape[-1]])
u = tf.get_variable("u", [1, w_shape[-1]], initializer=tf.random_normal_initializer(), trainable=False)
u_hat = u
v_hat = None
for i in range(iteration):
# power iteration
# Usually iteration = 1 will be enough
v_ = tf.matmul(u_hat, tf.transpose(w))
v_hat = tf.nn.l2_normalize(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.nn.l2_normalize(u_)
u_hat = tf.stop_gradient(u_hat)
v_hat = tf.stop_gradient(v_hat)
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
with tf.control_dependencies([u.assign(u_hat)]):
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
return w_norm
def spectral_norm_conv2d(incoming, filters, kernel_size=3, stride=1, padding='same', iteration=1, scope='sn_conv'):
"""
Wrapper to SpectralNormConv2D class.
Args:
incoming (tensor): input tensor
filters (int): number of filters
kernel_size (int): kernel size for the convolutional layer
stride (int): stride for the convolutional layer
padding (str): padding to apply to the convolved tensor
iteration (int): power iteration. Usually iteration = 1 will be enough.
scope (string): variable scope (optional)
Returns:
A tensor with the same shape as the input.
"""
conv_norm = SpectralNormConv2D(filters, kernel_size, stride, padding, iteration, scope)
return conv_norm.call(incoming)
class SpectralNormConv2D(tf.keras.layers.Layer):
def __init__(self, filters, kernel_size=3, stride=1, padding='same', iteration=1, scope='sn_conv'):
"""
Convolutional layer containing a wrapper to the spectral_norm() operation.
Args:
filters (int): number of filters
kernel_size (int): kernel size for the convolutional layer
stride (int): stride for the convolutional layer
padding (str): padding to apply to the convolved tensor
iteration (int): power iteration. Usually iteration = 1 will be enough.
scope (string): variable scope (optional)
"""
super(SpectralNormConv2D, self).__init__()
self.filters = filters
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding.upper()
self.iteration = iteration
self.scope = scope
def call(self, inputs, **kwargs):
"""
Call to the layer.
Args:
inputs (tensor): input tensor
**kwargs:
Returns:
A tensor with the same shape as the input.
"""
with tf.variable_scope(self.scope):
w = tf.get_variable("kernel", shape=[self.kernel_size, self.kernel_size, inputs.get_shape()[-1],
self.filters])
b = tf.get_variable("bias", [self.filters], initializer=tf.constant_initializer(0.0))
conv_norm = tf.nn.conv2d(input=inputs, filter=spectral_norm(w, self.iteration),
strides=[1, self.stride, self.stride, 1], padding=self.padding) + b
return conv_norm
|
[
"tensorflow.constant_initializer",
"tensorflow.stop_gradient",
"tensorflow.reshape",
"tensorflow.nn.l2_normalize",
"tensorflow.variable_scope",
"tensorflow.transpose",
"tensorflow.matmul",
"tensorflow.random_normal_initializer"
] |
[((2163, 2195), 'tensorflow.reshape', 'tf.reshape', (['w', '[-1, w_shape[-1]]'], {}), '(w, [-1, w_shape[-1]])\n', (2173, 2195), True, 'import tensorflow as tf\n'), ((2614, 2637), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['u_hat'], {}), '(u_hat)\n', (2630, 2637), True, 'import tensorflow as tf\n'), ((2650, 2673), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['v_hat'], {}), '(v_hat)\n', (2666, 2673), True, 'import tensorflow as tf\n'), ((2505, 2527), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['v_'], {}), '(v_)\n', (2523, 2527), True, 'import tensorflow as tf\n'), ((2542, 2561), 'tensorflow.matmul', 'tf.matmul', (['v_hat', 'w'], {}), '(v_hat, w)\n', (2551, 2561), True, 'import tensorflow as tf\n'), ((2578, 2600), 'tensorflow.nn.l2_normalize', 'tf.nn.l2_normalize', (['u_'], {}), '(u_)\n', (2596, 2600), True, 'import tensorflow as tf\n'), ((2697, 2716), 'tensorflow.matmul', 'tf.matmul', (['v_hat', 'w'], {}), '(v_hat, w)\n', (2706, 2716), True, 'import tensorflow as tf\n'), ((2718, 2737), 'tensorflow.transpose', 'tf.transpose', (['u_hat'], {}), '(u_hat)\n', (2730, 2737), True, 'import tensorflow as tf\n'), ((2837, 2864), 'tensorflow.reshape', 'tf.reshape', (['w_norm', 'w_shape'], {}), '(w_norm, w_shape)\n', (2847, 2864), True, 'import tensorflow as tf\n'), ((2256, 2286), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', ([], {}), '()\n', (2284, 2286), True, 'import tensorflow as tf\n'), ((2472, 2487), 'tensorflow.transpose', 'tf.transpose', (['w'], {}), '(w)\n', (2484, 2487), True, 'import tensorflow as tf\n'), ((4831, 4860), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.scope'], {}), '(self.scope)\n', (4848, 4860), True, 'import tensorflow as tf\n'), ((5103, 5131), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (5126, 5131), True, 'import tensorflow as tf\n')]
|
import os
import pytest
import subprocess
import ssl
import time
import trustme
import bmemcached
import test_simple_functions
ca = trustme.CA()
server_cert = ca.issue_cert(os.environ["MEMCACHED_HOST"] + u"")
@pytest.yield_fixture(scope="module", autouse=True)
def memcached_tls():
key = server_cert.private_key_pem
cert = server_cert.cert_chain_pems[0]
with cert.tempfile() as c, key.tempfile() as k:
p = subprocess.Popen(
[
"memcached",
"-p5001",
"-Z",
"-o",
"ssl_key={}".format(k),
"-o",
"ssl_chain_cert={}".format(c),
"-o",
"ssl_verify_mode=1",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
time.sleep(0.1)
if p.poll() is not None:
pytest.skip("Memcached server is not built with TLS support.")
yield p
p.kill()
p.wait()
class TLSMemcachedTests(test_simple_functions.MemcachedTests):
"""
Same tests as above, just make sure it works with TLS.
"""
def setUp(self):
ctx = ssl.create_default_context()
ca.configure_trust(ctx)
self.server = "{}:5001".format(os.environ["MEMCACHED_HOST"])
self.client = bmemcached.Client(self.server, tls_context=ctx)
self.reset()
|
[
"pytest.yield_fixture",
"ssl.create_default_context",
"trustme.CA",
"pytest.skip",
"time.sleep",
"bmemcached.Client"
] |
[((135, 147), 'trustme.CA', 'trustme.CA', ([], {}), '()\n', (145, 147), False, 'import trustme\n'), ((215, 265), 'pytest.yield_fixture', 'pytest.yield_fixture', ([], {'scope': '"""module"""', 'autouse': '(True)'}), "(scope='module', autouse=True)\n", (235, 265), False, 'import pytest\n'), ((836, 851), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (846, 851), False, 'import time\n'), ((1188, 1216), 'ssl.create_default_context', 'ssl.create_default_context', ([], {}), '()\n', (1214, 1216), False, 'import ssl\n'), ((1342, 1389), 'bmemcached.Client', 'bmemcached.Client', (['self.server'], {'tls_context': 'ctx'}), '(self.server, tls_context=ctx)\n', (1359, 1389), False, 'import bmemcached\n'), ((898, 960), 'pytest.skip', 'pytest.skip', (['"""Memcached server is not built with TLS support."""'], {}), "('Memcached server is not built with TLS support.')\n", (909, 960), False, 'import pytest\n')]
|
import numpy as np
from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples
from sklearn.utils.validation import _deprecate_positional_args
# https://www.kaggle.com/marketneutral/purged-time-series-cv-xgboost-optuna/data
# modified code for group gaps; source
# https://github.com/getgaurav2/scikit-learn/blob/d4a3af5cc9da3a76f0266932644b884c99724c57/sklearn/model_selection/_split.py#L2243
class PurgedGroupTimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator variant with non-overlapping groups.
Allows for a gap in groups to avoid potentially leaking info from
train into test if the model has windowed or lag features.
Provides train/test indices to split time series data samples
that are observed at fixed time intervals according to a
third-party provided group.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=5
Number of splits. Must be at least 2.
max_train_group_size : int, default=Inf
Maximum group size for a single training set.
group_gap : int, default=None
Gap between train and test
max_test_group_size : int, default=Inf
We discard this number of groups from the end of each train split
"""
@_deprecate_positional_args
def __init__(self,
n_splits=5,
*,
max_train_group_size=np.inf,
max_test_group_size=np.inf,
group_gap=None,
verbose=False
):
super().__init__(n_splits, shuffle=False, random_state=None)
self.max_train_group_size = max_train_group_size
self.group_gap = group_gap
self.max_test_group_size = max_test_group_size
self.verbose = verbose
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like of shape (n_samples,)
Always ignored, exists for compatibility.
groups : array-like of shape (n_samples,)
Group labels for the samples used while splitting the dataset into
train/test set.
Yields
------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
if groups is None:
raise ValueError(
"The 'groups' parameter should not be None")
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
group_gap = self.group_gap
max_test_group_size = self.max_test_group_size
max_train_group_size = self.max_train_group_size
n_folds = n_splits + 1
group_dict = {}
u, ind = np.unique(groups, return_index=True)
unique_groups = u[np.argsort(ind)]
n_samples = _num_samples(X)
n_groups = _num_samples(unique_groups)
for idx in np.arange(n_samples):
if (groups[idx] in group_dict):
group_dict[groups[idx]].append(idx)
else:
group_dict[groups[idx]] = [idx]
if n_folds > n_groups:
raise ValueError(
("Cannot have number of folds={0} greater than"
" the number of groups={1}").format(n_folds,
n_groups))
group_test_size = min(n_groups // n_folds, max_test_group_size)
group_test_starts = range(n_groups - n_splits * group_test_size,
n_groups, group_test_size)
for group_test_start in group_test_starts:
train_array = []
test_array = []
group_st = max(0, group_test_start - group_gap - max_train_group_size)
for train_group_idx in unique_groups[group_st:(group_test_start - group_gap)]:
train_array_tmp = group_dict[train_group_idx]
train_array = np.sort(np.unique(
np.concatenate((train_array,
train_array_tmp)),
axis=None), axis=None)
# train_end = train_array.size
for test_group_idx in unique_groups[group_test_start:
group_test_start +
group_test_size]:
test_array_tmp = group_dict[test_group_idx]
test_array = np.sort(np.unique(
np.concatenate((test_array,
test_array_tmp)),
axis=None), axis=None)
# test_array = test_array[group_gap:]
if self.verbose > 0:
pass
yield [int(i) for i in train_array], [int(i) for i in test_array]
|
[
"sklearn.model_selection._split.indexable",
"numpy.concatenate",
"sklearn.model_selection._split._num_samples",
"numpy.argsort",
"numpy.arange",
"numpy.unique"
] |
[((3291, 3314), 'sklearn.model_selection._split.indexable', 'indexable', (['X', 'y', 'groups'], {}), '(X, y, groups)\n', (3300, 3314), False, 'from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples\n'), ((3335, 3350), 'sklearn.model_selection._split._num_samples', '_num_samples', (['X'], {}), '(X)\n', (3347, 3350), False, 'from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples\n'), ((3603, 3639), 'numpy.unique', 'np.unique', (['groups'], {'return_index': '(True)'}), '(groups, return_index=True)\n', (3612, 3639), True, 'import numpy as np\n'), ((3703, 3718), 'sklearn.model_selection._split._num_samples', '_num_samples', (['X'], {}), '(X)\n', (3715, 3718), False, 'from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples\n'), ((3738, 3765), 'sklearn.model_selection._split._num_samples', '_num_samples', (['unique_groups'], {}), '(unique_groups)\n', (3750, 3765), False, 'from sklearn.model_selection._split import _BaseKFold, indexable, _num_samples\n'), ((3785, 3805), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (3794, 3805), True, 'import numpy as np\n'), ((3666, 3681), 'numpy.argsort', 'np.argsort', (['ind'], {}), '(ind)\n', (3676, 3681), True, 'import numpy as np\n'), ((4860, 4906), 'numpy.concatenate', 'np.concatenate', (['(train_array, train_array_tmp)'], {}), '((train_array, train_array_tmp))\n', (4874, 4906), True, 'import numpy as np\n'), ((5421, 5465), 'numpy.concatenate', 'np.concatenate', (['(test_array, test_array_tmp)'], {}), '((test_array, test_array_tmp))\n', (5435, 5465), True, 'import numpy as np\n')]
|
import xlearn as xl
import config
# Training task
ffm_model = xl.create_ffm() # Use field-aware factorization machine
ffm_model.disableEarlyStop()
ffm_model.setTrain("./train_ffm.txt") # Training data
ffm_model.setValidate("./valid_ffm.txt") # Validation data
# param:
# 0. binary classification
# 1. learning rate: 0.2
# 2. regular lambda: 0.002
# 3. evaluation metric: accuracy
param = {'task': 'binary', 'lr': 0.1,
'lambda': 0.002, 'metric': 'auc',
'opt': 'ftrl', 'epoch': 20}
# param = {'task': 'binary', 'lr': 0.15,
# 'lambda': 0.00002, 'metric': 'auc',
# 'opt': 'ftrl', 'epoch': 10}
# Start to train
# The trained model will be stored in model.out
ffm_model.fit(param, './model.out')
# Prediction task
ffm_model.setTest("./test_ffm.txt") # Test data
ffm_model.setSigmoid() # Convert output to 0-1
# Start to predict
# The output result will be stored in output.txt
ffm_model.predict("./model.out", "./output.txt")
|
[
"xlearn.create_ffm"
] |
[((63, 78), 'xlearn.create_ffm', 'xl.create_ffm', ([], {}), '()\n', (76, 78), True, 'import xlearn as xl\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 24 08:54:07 2018
@author: bwhe
"""
import ast
import numpy as np
import pandas as pd
import gc
import lightgbm as lgb
import pickle
import time
import w2v
from itertools import repeat
def remove_iteral(sentence):
return ast.literal_eval(sentence)
readfile = './cf2xgb_pred1_v1.csv.gz'
savefile = 'cf2xgb_1_pred1_v2.csv.gz'
df_test = pd.read_csv(readfile, usecols=['pid', 'pred', 'scores'], nrows=10)
df_test = df_test.rename(columns={'scores': 'cf_prob'})
w2v_test_pids = df_test.pid.unique()
# save for later prediction
final_df_test = df_test[['pid']]
df_test['pred'] = df_test['pred'].apply(remove_iteral)
df_test['cf_prob'] = df_test['cf_prob'].apply(remove_iteral)
''' convert the list mode to column mode '''
result_test = pd.DataFrame([(tup.pid, pred, cf_prob) for tup in df_test.itertuples()
for pred, cf_prob in zip(tup.pred, tup.cf_prob)])
result_test = result_test.fillna(0)
result_test.columns = ['pid', 'track_uri', 'cf_prob']
del df_test
gc.collect()
''' add relative cf probability score '''
def add_avg_cfscore(data):
tmp = data.groupby('pid')['cf_prob'].mean().reset_index().rename(columns={'cf_prob':'cf_avg_prob'})
data = data.merge(tmp, left_on=['pid'], right_on=['pid'], how='left')
data['cf_rlt_prob'] = data['cf_prob'] / data['cf_avg_prob']
data = data.drop(['cf_avg_prob'], axis=1)
return data
start_time = time.time()
result_test = add_avg_cfscore(result_test)
print("average score --- %s seconds ---" % (time.time() - start_time))
''' add song frequency '''
start_time = time.time()
songfreq = pd.read_csv('../data/songfreq.csv.gz')
result_test = result_test.merge(songfreq, left_on=['track_uri'], right_on=['track_uri'], how='left')
print("songfreq merge --- %s seconds ---" % (time.time() - start_time))
''' add album uri '''
start_time = time.time()
with open('../data/song2album.pkl', 'rb') as f:
song2album = pickle.load(f)
tracks_test = result_test['track_uri'].values
album_uri = [song2album[track] for track in tracks_test]
result_test['album_uri'] = album_uri
print("add album uri --- %s seconds ---" % (time.time() - start_time))
del album_uri, song2album
gc.collect()
''' add track uri '''
start_time = time.time()
with open('../data/song2artist.pkl', 'rb') as f:
song2artist = pickle.load(f)
artist_uri_test = [song2artist[track] for track in tracks_test]
result_test['artist_uri'] = artist_uri_test
print("add artist uri --- %s seconds ---" % (time.time() - start_time))
del artist_uri_test, song2artist
gc.collect()
pids_test = result_test['pid']
''' add similarity between playlist name and track name '''
with open('../data/song2names.pkl', 'rb') as f:
song2names = pickle.load(f)
song_names_test = [song2names[track] for track in tracks_test]
del song2names
gc.collect()
with open('../data/test_pid2more_clean_name.pkl', 'rb') as f:
pid2names = pickle.load(f)
pid_names_test = [pid2names[pid] for pid in pids_test]
del pid2names
gc.collect()
from difflib import SequenceMatcher
def similar(var):
a = var[0]
b = var[1]
a = str(a).lower()
b = str(b).lower()
return SequenceMatcher(None, a, b).ratio()
#name_sim = [similar(str(a).lower(), str(b)) for a, b in zip(song_names, pid_names)]
start_time = time.time()
name_sim_test = list(map(similar, zip(song_names_test, pid_names_test)))
result_test['name_sim'] = name_sim_test
print("calculate track name similarity --- %s seconds ---" % (time.time() - start_time))
''' add similarity between playlist name and album name '''
with open('../data/song2album_name.pkl', 'rb') as f:
song2album_names = pickle.load(f)
album_names_test = [song2album_names[track] for track in tracks_test]
start_time = time.time()
album_sim_test = list(map(similar, zip(album_names_test, pid_names_test)))
result_test['album_sim'] = album_sim_test
print("calculate album similarity --- %s seconds ---" % (time.time() - start_time))
del song2album_names, album_names_test
gc.collect()
''' add similarity between playlist name and artist name '''
start_time = time.time()
with open('../data/song2artist_name.pkl', 'rb') as f:
song2artist_names = pickle.load(f)
artist_names_test = [song2artist_names[track] for track in tracks_test]
artist_sim_test = list(map(similar, zip(artist_names_test, pid_names_test)))
result_test['artist_sim'] = artist_sim_test
print("calculate artist name similarity --- %s seconds ---" % (time.time() - start_time))
del song2artist_names, artist_names_test
gc.collect()
''' add similarity '''
from gensim.models import Word2Vec
w2v.build_track_w2v(w2v_test_pids)
w2v.build_album_w2v(w2v_test_pids)
w2v.build_artist_w2v(w2v_test_pids)
model1 = Word2Vec.load('../data/w2v_model1.bin')
model2 = Word2Vec.load('../data/w2v_model2.bin')
model3 = Word2Vec.load('../data/w2v_model3.bin')
with open('../data/song2album.pkl', 'rb') as f:
song2album = pickle.load(f)
with open('../data/song2artist.pkl', 'rb') as f:
song2artist = pickle.load(f)
def remove_iteral(sentence):
return ast.literal_eval(sentence)
df = pd.read_csv(readfile, usecols=['pid','pos_songs'], nrows=None)
df['pos_songs'] = df['pos_songs'].apply(remove_iteral)
result_test = result_test.merge(df, left_on=['pid'], right_on=['pid'], how='left')
def track_sim(var):
pos_song = var[0]
track = var[1]
try:
return model1.wv.similarity(str(pos_song), str(track))
except:
return 0
def album_sim(var):
pos_song = var[0]
track = var[1]
try:
return model2.wv.similarity(str(song2album[pos_song]), str(song2album[track]))
except:
return 0
def artist_sim(var):
pos_song = var[0]
track = var[1]
try:
return model3.wv.similarity(str(song2artist[pos_song]), str(song2artist[track]))
except:
return 0
def w2v_sim(tup):
pos_songs = tup.pos_songs
track = tup.track_uri
track_scores = list(map(track_sim, zip(pos_songs, repeat(track))))
album_scores = list(map(album_sim, zip(pos_songs, repeat(track))))
artist_scores = list(map(artist_sim, zip(pos_songs, repeat(track))))
return np.mean(track_scores), np.mean(album_scores), np.mean(artist_scores)
def add_w2v_sim(data):
track_w2v_sim_arr = []
album_w2v_sim_arr = []
artist_w2v_sim_arr = []
for tup in data.itertuples():
track_score, album_score, artist_score = w2v_sim(tup)
track_w2v_sim_arr.append(track_score)
album_w2v_sim_arr.append(album_score)
artist_w2v_sim_arr.append(artist_score)
data['w2v_track_sim'] = track_w2v_sim_arr
data['w2v_album_sim'] = album_w2v_sim_arr
data['w2v_artist_sim'] = artist_w2v_sim_arr
return data
start_time = time.time()
result_test = add_w2v_sim(result_test)
print("track w2v similarity --- %s seconds ---" % (time.time() - start_time))
result_test.to_csv(savefile, index=False, compression='gzip')
|
[
"pandas.read_csv",
"time.time",
"w2v.build_artist_w2v",
"gc.collect",
"difflib.SequenceMatcher",
"pickle.load",
"numpy.mean",
"w2v.build_album_w2v",
"ast.literal_eval",
"gensim.models.Word2Vec.load",
"w2v.build_track_w2v",
"itertools.repeat"
] |
[((419, 485), 'pandas.read_csv', 'pd.read_csv', (['readfile'], {'usecols': "['pid', 'pred', 'scores']", 'nrows': '(10)'}), "(readfile, usecols=['pid', 'pred', 'scores'], nrows=10)\n", (430, 485), True, 'import pandas as pd\n'), ((1086, 1098), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1096, 1098), False, 'import gc\n'), ((1495, 1506), 'time.time', 'time.time', ([], {}), '()\n', (1504, 1506), False, 'import time\n'), ((1667, 1678), 'time.time', 'time.time', ([], {}), '()\n', (1676, 1678), False, 'import time\n'), ((1691, 1729), 'pandas.read_csv', 'pd.read_csv', (['"""../data/songfreq.csv.gz"""'], {}), "('../data/songfreq.csv.gz')\n", (1702, 1729), True, 'import pandas as pd\n'), ((1946, 1957), 'time.time', 'time.time', ([], {}), '()\n', (1955, 1957), False, 'import time\n'), ((2285, 2297), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2295, 2297), False, 'import gc\n'), ((2337, 2348), 'time.time', 'time.time', ([], {}), '()\n', (2346, 2348), False, 'import time\n'), ((2653, 2665), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2663, 2665), False, 'import gc\n'), ((2926, 2938), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2936, 2938), False, 'import gc\n'), ((3110, 3122), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3120, 3122), False, 'import gc\n'), ((3411, 3422), 'time.time', 'time.time', ([], {}), '()\n', (3420, 3422), False, 'import time\n'), ((3873, 3884), 'time.time', 'time.time', ([], {}), '()\n', (3882, 3884), False, 'import time\n'), ((4132, 4144), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4142, 4144), False, 'import gc\n'), ((4223, 4234), 'time.time', 'time.time', ([], {}), '()\n', (4232, 4234), False, 'import time\n'), ((4662, 4674), 'gc.collect', 'gc.collect', ([], {}), '()\n', (4672, 4674), False, 'import gc\n'), ((4740, 4774), 'w2v.build_track_w2v', 'w2v.build_track_w2v', (['w2v_test_pids'], {}), '(w2v_test_pids)\n', (4759, 4774), False, 'import w2v\n'), ((4776, 4810), 'w2v.build_album_w2v', 'w2v.build_album_w2v', (['w2v_test_pids'], {}), '(w2v_test_pids)\n', (4795, 4810), False, 'import w2v\n'), ((4812, 4847), 'w2v.build_artist_w2v', 'w2v.build_artist_w2v', (['w2v_test_pids'], {}), '(w2v_test_pids)\n', (4832, 4847), False, 'import w2v\n'), ((4860, 4899), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['"""../data/w2v_model1.bin"""'], {}), "('../data/w2v_model1.bin')\n", (4873, 4899), False, 'from gensim.models import Word2Vec\n'), ((4910, 4949), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['"""../data/w2v_model2.bin"""'], {}), "('../data/w2v_model2.bin')\n", (4923, 4949), False, 'from gensim.models import Word2Vec\n'), ((4960, 4999), 'gensim.models.Word2Vec.load', 'Word2Vec.load', (['"""../data/w2v_model3.bin"""'], {}), "('../data/w2v_model3.bin')\n", (4973, 4999), False, 'from gensim.models import Word2Vec\n'), ((5262, 5325), 'pandas.read_csv', 'pd.read_csv', (['readfile'], {'usecols': "['pid', 'pos_songs']", 'nrows': 'None'}), "(readfile, usecols=['pid', 'pos_songs'], nrows=None)\n", (5273, 5325), True, 'import pandas as pd\n'), ((6938, 6949), 'time.time', 'time.time', ([], {}), '()\n', (6947, 6949), False, 'import time\n'), ((298, 324), 'ast.literal_eval', 'ast.literal_eval', (['sentence'], {}), '(sentence)\n', (314, 324), False, 'import ast\n'), ((2025, 2039), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2036, 2039), False, 'import pickle\n'), ((2418, 2432), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2429, 2432), False, 'import pickle\n'), ((2830, 2844), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2841, 2844), False, 'import pickle\n'), ((3023, 3037), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3034, 3037), False, 'import pickle\n'), ((3771, 3785), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (3782, 3785), False, 'import pickle\n'), ((4315, 4329), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4326, 4329), False, 'import pickle\n'), ((5075, 5089), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5086, 5089), False, 'import pickle\n'), ((5165, 5179), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5176, 5179), False, 'import pickle\n'), ((5224, 5250), 'ast.literal_eval', 'ast.literal_eval', (['sentence'], {}), '(sentence)\n', (5240, 5250), False, 'import ast\n'), ((6341, 6362), 'numpy.mean', 'np.mean', (['track_scores'], {}), '(track_scores)\n', (6348, 6362), True, 'import numpy as np\n'), ((6364, 6385), 'numpy.mean', 'np.mean', (['album_scores'], {}), '(album_scores)\n', (6371, 6385), True, 'import numpy as np\n'), ((6387, 6409), 'numpy.mean', 'np.mean', (['artist_scores'], {}), '(artist_scores)\n', (6394, 6409), True, 'import numpy as np\n'), ((1596, 1607), 'time.time', 'time.time', ([], {}), '()\n', (1605, 1607), False, 'import time\n'), ((1878, 1889), 'time.time', 'time.time', ([], {}), '()\n', (1887, 1889), False, 'import time\n'), ((2228, 2239), 'time.time', 'time.time', ([], {}), '()\n', (2237, 2239), False, 'import time\n'), ((2589, 2600), 'time.time', 'time.time', ([], {}), '()\n', (2598, 2600), False, 'import time\n'), ((3275, 3302), 'difflib.SequenceMatcher', 'SequenceMatcher', (['None', 'a', 'b'], {}), '(None, a, b)\n', (3290, 3302), False, 'from difflib import SequenceMatcher\n'), ((3601, 3612), 'time.time', 'time.time', ([], {}), '()\n', (3610, 3612), False, 'import time\n'), ((4062, 4073), 'time.time', 'time.time', ([], {}), '()\n', (4071, 4073), False, 'import time\n'), ((4590, 4601), 'time.time', 'time.time', ([], {}), '()\n', (4599, 4601), False, 'import time\n'), ((7042, 7053), 'time.time', 'time.time', ([], {}), '()\n', (7051, 7053), False, 'import time\n'), ((6166, 6179), 'itertools.repeat', 'repeat', (['track'], {}), '(track)\n', (6172, 6179), False, 'from itertools import repeat\n'), ((6238, 6251), 'itertools.repeat', 'repeat', (['track'], {}), '(track)\n', (6244, 6251), False, 'from itertools import repeat\n'), ((6312, 6325), 'itertools.repeat', 'repeat', (['track'], {}), '(track)\n', (6318, 6325), False, 'from itertools import repeat\n')]
|
from django.apps import apps
from rest_framework import serializers
from config.settings import TAG_COUNT_MODELS, CATEGORY_COUNT_MODELS
from user.serializers import BasicUserSerializer
from .models import *
class TagsField(serializers.Field):
'''
comma-separated tags
'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def to_representation(self, obj):
return [TagSerializer(x).data for x in obj.all()]
def to_internal_value(self, data):
return Tag.tag_objects(data)
class CategorySerializer(serializers.ModelSerializer):
count = serializers.SerializerMethodField()
class Meta:
model = Category
fields = '__all__'
read_only_fields = ('count',)
def get_count(self, obj):
count = 0
for app_label, model_name in TAG_COUNT_MODELS:
count += obj.get_count(apps.get_model(app_label, model_name).objects.all())
return count
class TagSerializer(serializers.ModelSerializer):
count = serializers.SerializerMethodField()
class Meta:
model = Tag
fields = '__all__'
read_only_fields = ('count',)
def get_count(self, obj):
count = 0
for app_label, model_name in CATEGORY_COUNT_MODELS:
count += obj.get_count(apps.get_model(app_label, model_name).objects.all())
return count
class ContentBaseSerializer(serializers.ModelSerializer):
'''
A base serializer class for subclasses of ContentMetadata. For example:
class PostSerializer(ContentBaseSerializer):
class Meta(ContentBaseSerializer.Meta):
model = Post
'''
tags = TagsField(required=False)
category = serializers.SlugRelatedField(slug_field='name', allow_null=True,
queryset=Category.objects.all(), required=False)
author = BasicUserSerializer(required=False, read_only=True)
class Meta:
fields = '__all__'
read_only_fields = ('id', 'creation_time', 'modification_time', 'author', 'archived')
|
[
"user.serializers.BasicUserSerializer",
"django.apps.apps.get_model",
"rest_framework.serializers.SerializerMethodField"
] |
[((613, 648), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (646, 648), False, 'from rest_framework import serializers\n'), ((1033, 1068), 'rest_framework.serializers.SerializerMethodField', 'serializers.SerializerMethodField', ([], {}), '()\n', (1066, 1068), False, 'from rest_framework import serializers\n'), ((1887, 1938), 'user.serializers.BasicUserSerializer', 'BasicUserSerializer', ([], {'required': '(False)', 'read_only': '(True)'}), '(required=False, read_only=True)\n', (1906, 1938), False, 'from user.serializers import BasicUserSerializer\n'), ((895, 932), 'django.apps.apps.get_model', 'apps.get_model', (['app_label', 'model_name'], {}), '(app_label, model_name)\n', (909, 932), False, 'from django.apps import apps\n'), ((1315, 1352), 'django.apps.apps.get_model', 'apps.get_model', (['app_label', 'model_name'], {}), '(app_label, model_name)\n', (1329, 1352), False, 'from django.apps import apps\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'han'
import os
import h5py
import math
import torch
import torch.utils.data
from torch.utils.data.sampler import Sampler, SequentialSampler
import logging
import pandas as pd
from dataset.preprocess_data import PreprocessData
from utils.functions import *
logger = logging.getLogger(__name__)
class SquadDataset:
"""
dataset module for SQuAD
"""
def __init__(self, global_config):
self._data = {}
self._attr = {}
self.meta_data = {}
self.global_config = global_config
# whether preprocessing squad dataset
is_exist_dataset_h5 = os.path.exists(self.global_config['data']['dataset_h5'])
assert is_exist_dataset_h5, 'not found dataset hdf5 file in %s' % self.global_config['data']['dataset_h5']
self._load_hdf5()
def _load_hdf5(self):
"""
load squad hdf5 file
:return:
"""
squad_h5_path = self.global_config['data']['dataset_h5']
with h5py.File(squad_h5_path, 'r') as f:
f_data = f['data']
for name in ['train', 'dev']:
self._data[name] = {}
for sub_name in ['answer_range', 'samples_id']:
self._data[name][sub_name] = np.array(f_data[name][sub_name])
for sub_name in ['context', 'question']:
cur_data = f_data[name][sub_name]
self._data[name][sub_name] = {}
# 'token', 'pos', 'ent', 'em', 'em_lemma', 'right_space'
for subsub_name in cur_data.keys():
self._data[name][sub_name][subsub_name] = np.array(cur_data[subsub_name])
for key, value in f.attrs.items():
self._attr[key] = value
# 'id2word', 'id2char', 'id2pos', 'id2ent'
for key in f['meta_data'].keys():
self.meta_data[key] = np.array(f['meta_data'][key])
self._char2id = dict(zip(self.meta_data['id2char'],
range(len(self.meta_data['id2char']))))
def get_dataloader_train(self, batch_size, num_workers):
"""
a train data dataloader
:param batch_size:
:return:
"""
return self.get_dataloader(batch_size, 'train', num_workers, shuffle=True)
def get_dataloader_dev(self, batch_size, num_workers):
"""
a dev data dataloader
:param batch_size:
:return:
"""
return self.get_dataloader(batch_size, 'dev', num_workers, shuffle=False)
def get_dataloader(self, batch_size, type, num_workers, shuffle):
"""
get dataloader on train or dev dataset
:param batch_size:
:param type: 'train' or 'dev'
:return:
"""
data = self._data[type]
dataset = CQA_Dataset(data['context'],
data['question'],
data['answer_range'],
self.meta_data,
self.global_config['preprocess'])
if shuffle:
sampler = SortedBatchSampler(dataset.get_lengths(), batch_size)
else:
sampler = SequentialSampler(dataset)
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
sampler=sampler,
collate_fn=self.collect_fun,
num_workers=num_workers)
return dataloader
def collect_fun(self, batch):
"""
collect function for DataLoader, will generate char idx currently
:param batch:
:return:
"""
context = []
context_f = []
question = []
question_f = []
answer_range = []
for ele in batch:
context.append(ele[0])
question.append(ele[1])
context_f.append(ele[2])
question_f.append(ele[3])
answer_range.append(ele[4])
# word idx
bat_context, max_ct_len = del_zeros_right(torch.stack(context, dim=0))
bat_question, max_qt_len = del_zeros_right(torch.stack(question, dim=0))
bat_answer, _ = del_zeros_right(torch.stack(answer_range, dim=0))
# additional features
bat_context_f = None
bat_question_f = None
if context_f[0] is not None:
bat_context_f = torch.stack(context_f, dim=0)[:, 0:max_ct_len, :]
bat_question_f = torch.stack(question_f, dim=0)[:, 0:max_qt_len, :]
# generate char idx
bat_context_char = None
bat_question_char = None
if self.global_config['preprocess']['use_char']:
bat_context_char = self._batch_word_to_char(bat_context)
bat_question_char = self._batch_word_to_char(bat_question)
return bat_context, bat_question, bat_context_char, bat_question_char, bat_context_f, bat_question_f, bat_answer
def get_batch_train(self, batch_size):
"""
a train data batch
.. warning::
This method is now deprecated in favor of
:func:`get_dataloader_train`.
"""
return self.get_batch_data(batch_size, 'train')
def get_batch_dev(self, batch_size):
"""
development data batch
.. warning::
This method is now deprecated in favor of
:func:`get_dataloader_dev`.
"""
return self.get_batch_data(batch_size, 'dev')
def get_batch_data(self, batch_size, type):
"""
same with BatchSampler
.. warning::
This method is now deprecated in favor of
:func:`BatchSampler` and `get_dataloader`.
"""
data = self._data[type]
data_size = len(data['context'])
i = 0
while i < data_size:
j = min(i + batch_size, data_size)
bat = [data['context'][i:j], data['question'][i:j], data['answer_range'][i:j]]
bat_tensor = [to_long_tensor(x) for x in bat]
i = j
yield bat_tensor
def get_all_samples_id_train(self):
return self.get_all_samples_id('train')
def get_all_samples_id_dev(self):
return self.get_all_samples_id('dev')
def get_all_samples_id(self, type):
"""
get samples id of 'train' or 'dev' data
:param type:
:return:
"""
data = self._data[type]
return data['samples_id']
def get_all_ct_right_space_train(self):
return self.get_all_ct_right_space('train')
def get_all_ct_right_space_dev(self):
return self.get_all_ct_right_space('dev')
def get_all_ct_right_space(self, type):
data = self._data[type]
return data['context']['right_space']
def get_train_batch_cnt(self, batch_size):
"""
get count of train batches
:param batch_size: single batch size
:return: count
"""
data_size = self._attr['train_size']
cnt_batch = math.ceil(data_size * 1.0 / batch_size)
return cnt_batch
def get_dev_batch_cnt(self, batch_size):
"""
get count of dev batches
:param batch_size: single batch size
:return: count
"""
data_size = self._attr['dev_size']
cnt_batch = math.ceil(data_size * 1.0 / batch_size)
return cnt_batch
def _batch_word_to_char(self, batch_wordid):
"""
transform batch with sentence of wordid to batch data with sentence of char id
:param batch_wordid: (batch, seq_len), torch tensor
:return: (batch, seq_len, word_len), torch tensor
"""
batch_wordid = batch_wordid.numpy()
batch_word = [self.sentence_id2word(x) for x in batch_wordid]
batch_length = [[len(x) if x != PreprocessData.padding else 0 for x in s] for s in batch_word]
batch_max_len = np.max(batch_length)
batch_char = list(map(lambda x: self.sentence_char2id(x, max_len=batch_max_len), batch_word))
batch_char = np.stack(batch_char, axis=0)
return to_long_tensor(batch_char)
def gen_batch_with_char(self, batch_data, enable_char, device):
"""
word batch to generate char barch, also move to device, used in train or valid steps
.. warning::
This method is now deprecated in favor of collect function in DataLoader
"""
batch_data = [del_zeros_right(x)[0] for x in batch_data]
if not enable_char:
bat_context, bat_question, bat_answer_range = [x.to(device) for x in batch_data]
bat_context_char = None
bat_question_char = None
else:
bat_context, bat_question, bat_answer_range = batch_data
bat_context_char = self._batch_word_to_char(bat_context)
bat_question_char = self._batch_word_to_char(bat_question)
bat_context, bat_question, bat_context_char, bat_question_char, bat_answer_range = [x.to(device) for x in
[bat_context,
bat_question,
bat_context_char,
bat_question_char,
bat_answer_range]]
return bat_context, bat_question, bat_context_char, bat_question_char, bat_answer_range
def sentence_id2word(self, s_id):
"""
transform a sentence with word id to a sentence with real word
:param s_id:
:return:
"""
s = map(lambda id: self.meta_data['id2word'][id], s_id)
return list(s)
def sentence_word2id(self, s):
"""
transform a sentence with word to a sentence with word id
(Note that it's a slow version when using np.where)
:param s:
:return:
"""
s_id = map(lambda word: np.where(self.meta_data['id2word'] == word)[0][0], s)
return np.array(list(s_id))
def word_id2char(self, w_id):
w = map(lambda id: self.meta_data['id2char'][id], w_id)
return list(w)
def word_char2id(self, w):
if w == PreprocessData.padding: # not actual word
return np.ones(1, ) # make sure word length>0 and right encoding, here any none-zero value not effect
w_id = map(lambda ch: self._char2id[ch], w)
return np.array(list(w_id))
def sentence_char2id(self, s, max_len=None):
s_cid = list(map(lambda w: self.word_char2id(w), s))
if max_len is None:
word_len = list(map(lambda x: len(x), s_cid))
max_len = np.max(word_len)
s_cid_pad = map(lambda x: np.pad(x, (0, max_len - len(x)), 'constant', constant_values=(0, 0)), s_cid)
return np.stack(list(s_cid_pad), axis=0)
def gather_context_seq_len(self, type, steps=None):
"""
gather the context sequence counts with different lengths
:param type: 'train' or 'dev' data
:param steps: set to None means default steps
:return:
"""
data = self._data[type]
context = to_long_tensor(data['context']['token'])
mask = compute_mask(context)
lengths = mask.eq(1).long().sum(1).squeeze()
length_pd = pd.DataFrame(data=lengths.numpy(), columns=['length'])
if steps is None:
steps = [0, 100, 200, 300, 400, 500, 600, 700, 800]
assert len(steps) > 0
# get step length cnt
real_step = []
step_length_cnt = []
for i in range(1, len(steps)):
lower_bound = steps[i - 1]
upper_bound = steps[i]
assert lower_bound < upper_bound # [lower_bound, upper_bound)
real_step.append((lower_bound, upper_bound))
valid = length_pd[(length_pd['length'] < upper_bound) & (length_pd['length'] >= lower_bound)]
tmp_cnt = valid.shape[0]
step_length_cnt.append(tmp_cnt)
rtn_step_length = list(zip(real_step, step_length_cnt))
# get all length cnt
length_cnt = length_pd['length'].value_counts().to_frame(name='cnt')
length_cnt['length'] = length_cnt.index
return rtn_step_length, length_cnt
def gather_answer_seq_len(self, type, max_len=None):
"""
gather the answer sequence counts with different lengths
:param type: 'train' or 'dev' data
:param max_len:
:return:
"""
data = self._data[type]
answer_range = data['answer_range']
lengths = []
for i in range(answer_range.shape[0]):
tmp_lens = []
for j in range(int(answer_range.shape[1] / 2)):
if answer_range[i, j * 2] != -1:
tmp_lens.append(answer_range[i, j * 2 + 1] - answer_range[i, j * 2] + 1)
lengths.append(min(tmp_lens))
length_pd = pd.DataFrame(data=lengths, columns=['length'])
# get all length cnt
length_cnt = length_pd['length'].value_counts().to_frame(name='cnt')
length_cnt['length'] = length_cnt.index
length_cnt = length_cnt.sort_index()
if max_len is not None:
sum_len = length_cnt[length_cnt['length'] >= max_len]['cnt'].sum()
length_cnt = length_cnt[length_cnt['length'] < max_len]
length_cnt.loc[max_len] = [sum_len, '>=%d' % max_len]
return length_cnt
class CQA_Dataset(torch.utils.data.Dataset):
"""
squad like dataset, used for dataloader
Args:
- context: (batch, ct_len)
- question: (batch, qt_len)
- answer_range: (batch, ans_len)
"""
def __init__(self, context, question, answer_range, feature_dict, config):
self.context = context
self.question = question
self.answer_range = answer_range
self.feature_dict = feature_dict
self.config = config
self.lengths = self.get_lengths()
def __getitem__(self, index):
cur_context = to_long_tensor(self.context['token'][index])
cur_question = to_long_tensor(self.question['token'][index])
cur_answer = to_long_tensor(self.answer_range[index])
cur_context_f, cur_question_f = self.addition_feature(index)
return cur_context, cur_question, cur_context_f, cur_question_f, cur_answer
def __len__(self):
return self.answer_range.shape[0]
def get_lengths(self):
ct_mask = self.context['token'].__ne__(PreprocessData.padding_idx)
ct_lengths = ct_mask.sum(1)
qt_mask = self.question['token'].__ne__(PreprocessData.padding_idx)
qt_lengths = qt_mask.sum(1)
lengths = np.stack([ct_lengths, qt_lengths])
return lengths
def addition_feature(self, index):
data = [self.context, self.question]
add_features = [None, None]
for k in range(len(data)):
features = {}
tmp_seq_len = data[k]['token'].shape[1]
if self.config['use_pos']:
features['pos'] = torch.zeros((tmp_seq_len, len(self.feature_dict['id2pos'])), dtype=torch.float)
for i, ele in enumerate(data[k]['pos'][index]):
if ele == PreprocessData.padding_idx:
break
features['pos'][i, ele] = 1
if self.config['use_ent']:
features['ent'] = torch.zeros((tmp_seq_len, len(self.feature_dict['id2ent'])), dtype=torch.float)
for i, ele in enumerate(data[k]['ent'][index]):
if ele == PreprocessData.padding_idx:
break
features['ent'][i, ele] = 1
if self.config['use_em']:
features['em'] = to_float_tensor(data[k]['em'][index]).unsqueeze(-1)
if self.config['use_em_lemma']:
features['em_lemma'] = to_float_tensor(data[k]['em_lemma'][index]).unsqueeze(-1)
if len(features) > 0:
add_features[k] = torch.cat(list(features.values()), dim=-1)
return add_features
class SortedBatchSampler(Sampler):
"""
forked from https://github.com/HKUST-KnowComp/MnemonicReader
"""
def __init__(self, lengths, batch_size, shuffle=True):
self.lengths = lengths # (2, data_num)
self.batch_size = batch_size
self.shuffle = shuffle
def __iter__(self):
lengths = np.array(
[(-l[0], -l[1], np.random.random()) for l in self.lengths.T],
dtype=[('l1', np.int_), ('l2', np.int_), ('rand', np.float_)]
)
indices = np.argsort(lengths, order=('l1', 'l2', 'rand'))
batches = [indices[i:i + self.batch_size]
for i in range(0, len(indices), self.batch_size)]
last = batches[-1] # last batch may not be full batch size
if self.shuffle:
batches = batches[:len(batches)-1]
np.random.shuffle(batches)
batches.append(last)
return iter([i for batch in batches for i in batch])
def __len__(self):
return self.lengths.shape[1]
|
[
"pandas.DataFrame",
"h5py.File",
"torch.stack",
"torch.utils.data.DataLoader",
"math.ceil",
"os.path.exists",
"torch.utils.data.sampler.SequentialSampler",
"logging.getLogger"
] |
[((328, 355), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (345, 355), False, 'import logging\n'), ((659, 715), 'os.path.exists', 'os.path.exists', (["self.global_config['data']['dataset_h5']"], {}), "(self.global_config['data']['dataset_h5'])\n", (673, 715), False, 'import os\n'), ((3291, 3425), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['dataset'], {'batch_size': 'batch_size', 'sampler': 'sampler', 'collate_fn': 'self.collect_fun', 'num_workers': 'num_workers'}), '(dataset, batch_size=batch_size, sampler=sampler,\n collate_fn=self.collect_fun, num_workers=num_workers)\n', (3318, 3425), False, 'import torch\n'), ((7172, 7211), 'math.ceil', 'math.ceil', (['(data_size * 1.0 / batch_size)'], {}), '(data_size * 1.0 / batch_size)\n', (7181, 7211), False, 'import math\n'), ((7472, 7511), 'math.ceil', 'math.ceil', (['(data_size * 1.0 / batch_size)'], {}), '(data_size * 1.0 / batch_size)\n', (7481, 7511), False, 'import math\n'), ((13337, 13383), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'lengths', 'columns': "['length']"}), "(data=lengths, columns=['length'])\n", (13349, 13383), True, 'import pandas as pd\n'), ((1032, 1061), 'h5py.File', 'h5py.File', (['squad_h5_path', '"""r"""'], {}), "(squad_h5_path, 'r')\n", (1041, 1061), False, 'import h5py\n'), ((3243, 3269), 'torch.utils.data.sampler.SequentialSampler', 'SequentialSampler', (['dataset'], {}), '(dataset)\n', (3260, 3269), False, 'from torch.utils.data.sampler import Sampler, SequentialSampler\n'), ((4215, 4242), 'torch.stack', 'torch.stack', (['context'], {'dim': '(0)'}), '(context, dim=0)\n', (4226, 4242), False, 'import torch\n'), ((4295, 4323), 'torch.stack', 'torch.stack', (['question'], {'dim': '(0)'}), '(question, dim=0)\n', (4306, 4323), False, 'import torch\n'), ((4365, 4397), 'torch.stack', 'torch.stack', (['answer_range'], {'dim': '(0)'}), '(answer_range, dim=0)\n', (4376, 4397), False, 'import torch\n'), ((4554, 4583), 'torch.stack', 'torch.stack', (['context_f'], {'dim': '(0)'}), '(context_f, dim=0)\n', (4565, 4583), False, 'import torch\n'), ((4633, 4663), 'torch.stack', 'torch.stack', (['question_f'], {'dim': '(0)'}), '(question_f, dim=0)\n', (4644, 4663), False, 'import torch\n')]
|
import logging
import torch.nn as nn
import torch.utils.checkpoint as cp
import torch
import numpy as np
from mmcv.cnn import constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from ...registry import BACKBONES
from ..utils.resnet_r3d_utils import *
class BasicBlock(nn.Module):
def __init__(self,
input_filters,
num_filters,
base_filters,
down_sampling=False,
down_sampling_temporal=None,
block_type='3d',
is_real_3d=True,
group=1,
with_bn=True):
super(BasicBlock, self).__init__()
self.num_filters = num_filters
self.base_filters = base_filters
self.input_filters = input_filters
self.with_bn = with_bn
if self.with_bn:
conv3d = conv3d_wobias
else:
conv3d = conv3d_wbias
if block_type == '2.5d':
assert is_real_3d
if down_sampling_temporal is None:
down_sampling_temporal = down_sampling
if down_sampling:
if is_real_3d and down_sampling_temporal:
self.down_sampling_stride = [2, 2, 2]
else:
self.down_sampling_stride = [1, 2, 2]
else:
self.down_sampling_stride = [1, 1, 1]
self.down_sampling = down_sampling
self.relu = nn.ReLU()
self.conv1 = add_conv3d(input_filters, num_filters,
kernel=[3, 3, 3] if is_real_3d else [1, 3, 3],
stride=self.down_sampling_stride,
pad=[1, 1, 1] if is_real_3d else [0, 1, 1],
block_type=block_type, with_bn=self.with_bn)
if self.with_bn:
self.bn1 = add_bn(num_filters)
self.conv2 = add_conv3d(num_filters, num_filters,
kernel=[3, 3, 3] if is_real_3d else [1, 3, 3],
stride=[1, 1, 1],
pad=[1, 1, 1] if is_real_3d else [0, 1, 1],
block_type=block_type, with_bn=self.with_bn)
if self.with_bn:
self.bn2 = add_bn(num_filters)
if num_filters != input_filters or down_sampling:
self.conv3 = conv3d(input_filters, num_filters, kernel=[1, 1, 1],
stride=self.down_sampling_stride, pad=[0, 0, 0])
if self.with_bn:
self.bn3 = nn.BatchNorm3d(num_filters, eps=1e-3)
def forward(self, x):
identity = x
out = self.conv1(x)
if self.with_bn:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
if self.with_bn:
out = self.bn2(out)
if self.down_sampling or self.num_filters != self.input_filters:
identity = self.conv3(identity)
if self.with_bn:
identity = self.bn3(identity)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
def __init__(self,
input_filters,
num_filters,
base_filters,
down_sampling=False,
down_sampling_temporal=None,
block_type='3d',
is_real_3d=True,
group=1,
with_bn=True):
super(Bottleneck, self).__init__()
self.num_filters = num_filters
self.base_filters = base_filters
self.input_filters = input_filters
self.with_bn = with_bn
if self.with_bn:
conv3d = conv3d_wobias
else:
conv3d = conv3d_wbias
if block_type == '2.5d':
assert is_real_3d
if down_sampling_temporal is None:
down_sampling_temporal = down_sampling
if down_sampling:
if is_real_3d and down_sampling_temporal:
self.down_sampling_stride = [2, 2, 2]
else:
self.down_sampling_stride = [1, 2, 2]
else:
self.down_sampling_stride = [1, 1, 1]
self.down_sampling = down_sampling
self.relu = nn.ReLU()
self.conv0 = add_conv3d(input_filters, base_filters, kernel=[
1, 1, 1], stride=[1, 1, 1], pad=[0, 0, 0], with_bn=self.with_bn)
if self.with_bn:
self.bn0 = add_bn(base_filters)
self.conv1 = add_conv3d(base_filters, base_filters,
kernel=[3, 3, 3] if is_real_3d else [1, 3, 3],
stride=self.down_sampling_stride,
pad=[1, 1, 1] if is_real_3d else [0, 1, 1],
block_type=block_type, with_bn=self.with_bn)
if self.with_bn:
self.bn1 = add_bn(base_filters)
self.conv2 = add_conv3d(base_filters, num_filters, kernel=[
1, 1, 1], pad=[0, 0, 0], stride=[1, 1, 1], with_bn=self.with_bn)
if self.with_bn:
self.bn2 = add_bn(num_filters)
if num_filters != input_filters or down_sampling:
self.conv3 = conv3d(input_filters, num_filters, kernel=[1, 1, 1],
stride=self.down_sampling_stride, pad=[0, 0, 0])
if self.with_bn:
self.bn3 = nn.BatchNorm3d(num_filters, eps=1e-3)
def forward(self, x):
identity = x
if self.with_bn:
out = self.relu(self.bn0(self.conv0(x)))
out = self.relu(self.bn1(self.conv1(out)))
out = self.bn2(self.conv2(out))
else:
out = self.relu(self.conv0(x))
out = self.relu(self.conv1(out))
out = self.conv2(out)
if self.down_sampling or self.num_filters != self.input_filters:
identity = self.conv3(identity)
if self.with_bn:
identity = self.bn3(identity)
out += identity
out = self.relu(out)
return out
def make_plain_res_layer(block, num_blocks, in_filters, num_filters, base_filters,
block_type='3d', down_sampling=False, down_sampling_temporal=None,
is_real_3d=True, with_bn=True):
layers = []
layers.append(block(in_filters, num_filters, base_filters, down_sampling=down_sampling,
down_sampling_temporal=down_sampling_temporal, block_type=block_type,
is_real_3d=is_real_3d, with_bn=with_bn))
for i in range(num_blocks - 1):
layers.append(block(num_filters, num_filters, base_filters,
block_type=block_type, is_real_3d=is_real_3d, with_bn=with_bn))
return module_list(layers)
BLOCK_CONFIG = {
10: (1, 1, 1, 1),
16: (2, 2, 2, 1),
18: (2, 2, 2, 2),
26: (2, 2, 2, 2),
34: (3, 4, 6, 3),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
}
SHALLOW_FILTER_CONFIG = [
[64, 64],
[128, 128],
[256, 256],
[512, 512]
]
DEEP_FILTER_CONFIG = [
[256, 64],
[512, 128],
[1024, 256],
[2048, 512]
]
@BACKBONES.register_module
class ResNet_R3D(nn.Module):
def __init__(self,
pretrained=None,
num_input_channels=3,
depth=34,
block_type='2.5d',
channel_multiplier=1.0,
bottleneck_multiplier=1.0,
conv1_kernel_t=3,
conv1_stride_t=1,
use_pool1=False,
bn_eval=True,
bn_frozen=True,
with_bn=True):
# parameter initialization
super(ResNet_R3D, self).__init__()
self.pretrained = pretrained
self.num_input_channels = num_input_channels
self.depth = depth
self.block_type = block_type
self.channel_multiplier = channel_multiplier
self.bottleneck_multiplier = bottleneck_multiplier
self.conv1_kernel_t = conv1_kernel_t
self.conv1_stride_t = conv1_stride_t
self.use_pool1 = use_pool1
self.relu = nn.ReLU()
self.bn_eval = bn_eval
self.bn_frozen = bn_frozen
self.with_bn = with_bn
global comp_count, comp_idx
comp_idx = 0
comp_count = 0
if self.with_bn:
conv3d = conv3d_wobias
else:
conv3d = conv3d_wbias
# stem block
if self.block_type in ['2.5d', '2.5d-sep']:
self.conv1_s = conv3d(self.num_input_channels, 45, [
1, 7, 7], [1, 2, 2], [0, 3, 3])
if self.with_bn:
self.bn1_s = nn.BatchNorm3d(45, eps=1e-3)
self.conv1_t = conv3d(45, 64, [self.conv1_kernel_t, 1, 1], [self.conv1_stride_t, 1, 1],
[(self.conv1_kernel_t - 1) // 2, 0, 0])
if self.with_bn:
self.bn1_t = nn.BatchNorm3d(64, eps=1e-3)
else:
self.conv1 = conv3d(self.num_input_channels, 64, [self.conv1_kernel_t, 7, 7],
[self.conv1_stride_t, 2, 2], [(self.conv1_kernel_t - 1) // 2, 3, 3])
if self.with_bn:
self.bn1 = nn.BatchNorm3d(64, eps=1e-3)
if self.use_pool1:
self.pool1 = nn.MaxPool3d(kernel_size=[1, 3, 3], stride=[
1, 2, 2], padding=[0, 1, 1])
self.stage_blocks = BLOCK_CONFIG[self.depth]
if self.depth <= 18 or self.depth == 34:
self.block = BasicBlock
else:
self.block = Bottleneck
if self.depth <= 34:
self.filter_config = SHALLOW_FILTER_CONFIG
else:
self.filter_config = DEEP_FILTER_CONFIG
self.filter_config = np.multiply(
self.filter_config, self.channel_multiplier).astype(np.int)
layer1 = make_plain_res_layer(self.block, self.stage_blocks[0],
64, self.filter_config[0][0],
int(self.filter_config[0][1]
* self.bottleneck_multiplier),
block_type=self.block_type,
with_bn=self.with_bn)
self.add_module('layer1', layer1)
layer2 = make_plain_res_layer(self.block, self.stage_blocks[1],
self.filter_config[0][0], self.filter_config[1][0],
int(self.filter_config[1][1]
* self.bottleneck_multiplier),
block_type=self.block_type, down_sampling=True,
with_bn=self.with_bn)
self.add_module('layer2', layer2)
layer3 = make_plain_res_layer(self.block, self.stage_blocks[2],
self.filter_config[1][0], self.filter_config[2][0],
int(self.filter_config[2][1]
* self.bottleneck_multiplier),
block_type=self.block_type, down_sampling=True,
with_bn=self.with_bn)
self.add_module('layer3', layer3)
layer4 = make_plain_res_layer(self.block, self.stage_blocks[3],
self.filter_config[2][0], self.filter_config[3][0],
int(self.filter_config[3][1]
* self.bottleneck_multiplier),
block_type=self.block_type, down_sampling=True,
with_bn=self.with_bn)
self.add_module('layer4', layer4)
self.res_layers = ['layer1', 'layer2', 'layer3', 'layer4']
def forward(self, x):
if self.block_type in ['2.5d', '2.5d-sep']:
if self.with_bn:
x = self.relu(self.bn1_s(self.conv1_s(x)))
x = self.relu(self.bn1_t(self.conv1_t(x)))
else:
x = self.relu(self.conv1_s(x))
x = self.relu(self.conv1_t(x))
else:
if self.with_bn:
x = self.relu(self.bn1(self.conv1(x)))
else:
x = self.relu(self.conv1(x))
if self.use_pool1:
x = self.pool1(x)
for i, layer_name in enumerate(self.res_layers):
res_layer = getattr(self, layer_name)
x = res_layer(x)
return x
def init_weights(self):
if isinstance(self.pretrained, str):
logger = logging.getLogger()
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv3d):
kaiming_init(m)
elif isinstance(m, nn.BatchNorm3d):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def train(self, mode=True):
super(ResNet_R3D, self).train(mode)
if self.bn_eval and self.with_bn:
for m in self.modules():
if isinstance(m, nn.BatchNorm3d):
m.eval()
if self.bn_frozen:
for params in m.parameters():
params.requires_grad = False
|
[
"torch.nn.BatchNorm3d",
"torch.nn.ReLU",
"numpy.multiply",
"mmcv.cnn.constant_init",
"mmcv.cnn.kaiming_init",
"mmcv.runner.load_checkpoint",
"torch.nn.MaxPool3d",
"logging.getLogger"
] |
[((1424, 1433), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1431, 1433), True, 'import torch.nn as nn\n'), ((4251, 4260), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4258, 4260), True, 'import torch.nn as nn\n'), ((8206, 8215), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (8213, 8215), True, 'import torch.nn as nn\n'), ((9398, 9470), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', ([], {'kernel_size': '[1, 3, 3]', 'stride': '[1, 2, 2]', 'padding': '[0, 1, 1]'}), '(kernel_size=[1, 3, 3], stride=[1, 2, 2], padding=[0, 1, 1])\n', (9410, 9470), True, 'import torch.nn as nn\n'), ((12756, 12775), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (12773, 12775), False, 'import logging\n'), ((12788, 12855), 'mmcv.runner.load_checkpoint', 'load_checkpoint', (['self', 'self.pretrained'], {'strict': '(False)', 'logger': 'logger'}), '(self, self.pretrained, strict=False, logger=logger)\n', (12803, 12855), False, 'from mmcv.runner import load_checkpoint\n'), ((2541, 2579), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['num_filters'], {'eps': '(0.001)'}), '(num_filters, eps=0.001)\n', (2555, 2579), True, 'import torch.nn as nn\n'), ((5435, 5473), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['num_filters'], {'eps': '(0.001)'}), '(num_filters, eps=0.001)\n', (5449, 5473), True, 'import torch.nn as nn\n'), ((8765, 8794), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(45)'], {'eps': '(0.001)'}), '(45, eps=0.001)\n', (8779, 8794), True, 'import torch.nn as nn\n'), ((9026, 9055), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(64)'], {'eps': '(0.001)'}), '(64, eps=0.001)\n', (9040, 9055), True, 'import torch.nn as nn\n'), ((9316, 9345), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(64)'], {'eps': '(0.001)'}), '(64, eps=0.001)\n', (9330, 9345), True, 'import torch.nn as nn\n'), ((9878, 9934), 'numpy.multiply', 'np.multiply', (['self.filter_config', 'self.channel_multiplier'], {}), '(self.filter_config, self.channel_multiplier)\n', (9889, 9934), True, 'import numpy as np\n'), ((12996, 13011), 'mmcv.cnn.kaiming_init', 'kaiming_init', (['m'], {}), '(m)\n', (13008, 13011), False, 'from mmcv.cnn import constant_init, kaiming_init\n'), ((13084, 13103), 'mmcv.cnn.constant_init', 'constant_init', (['m', '(1)'], {}), '(m, 1)\n', (13097, 13103), False, 'from mmcv.cnn import constant_init, kaiming_init\n')]
|
import os
origin = os.getenv("AUDIO_REQ_ORIGIN", "https://api.openverse.engineering")
identifier = "29cb352c-60c1-41d8-bfa1-7d6f7d955f63"
base_image = {
"id": identifier,
"title": "Bust of Patroclus (photograph; calotype; salt print)",
"foreign_landing_url": "https://collection.sciencemuseumgroup.org.uk/objects/co8554747/bust-of-patroclus-photograph-calotype-salt-print", # noqa
"creator": "<NAME>",
"url": "https://coimages.sciencemuseumgroup.org.uk/images/439/67/large_1937_1281_0001__0001_.jpg", # noqa
"license": "by-nc-nd",
"license_version": "4.0",
"license_url": "https://creativecommons.org/licenses/by-nc-nd/4.0/",
"provider": "sciencemuseum",
"source": "sciencemuseum",
"thumbnail": f"{origin}/v1/images/{identifier}/thumb/",
"detail_url": f"{origin}/v1/images/{identifier}/",
"related_url": f"{origin}/v1/images/{identifier}/related/",
}
image_search_200_example = {
"application/json": {
"result_count": 1,
"page_count": 0,
"page_size": 20,
"page": 1,
"results": [
base_image
| {
"fields_matched": ["title"],
}
],
},
}
image_search_400_example = {
"application/json": {
"error": "InputError",
"detail": "Invalid input given for fields. 'license' -> License 'PDMNBCG' does not exist.", # noqa
"fields": ["license"],
}
}
image_stats_200_example = {
"application/json": [
{
"source_name": "flickr",
"display_name": "Flickr",
"source_url": "https://www.flickr.com",
"logo_url": None,
"media_count": 1000,
},
{
"source_name": "rawpixel",
"display_name": "rawpixel",
"source_url": "https://www.rawpixel.com",
"logo_url": None,
"media_count": 1000,
},
{
"source_name": "sciencemuseum",
"display_name": "Science Museum",
"source_url": "https://www.sciencemuseum.org.uk",
"logo_url": None,
"media_count": 1000,
},
{
"source_name": "stocksnap",
"display_name": "StockSnap",
"source_url": "https://stocksnap.io",
"logo_url": None,
"media_count": 1000,
},
{
"source_name": "wikimedia",
"display_name": "Wikimedia",
"source_url": "https://commons.wikimedia.org",
"logo_url": None,
"media_count": 1000,
},
]
}
image_detail_200_example = {
"application/json": base_image
| {
"attribution": '"Bust of Patroclus (photograph; calotype; salt print)" by <NAME> is licensed under CC-BY-NC-ND 4.0. To view a copy of this license, visit https://creativecommons.org/licenses/by-nc-nd/4.0/.', # noqa
"height": 1536,
"width": 1276,
"tags": None,
"creator_url": None,
}
}
image_detail_404_example = {"application/json": {"detail": "Not found."}}
image_related_200_example = {
"application/json": {
"result_count": 10000,
"page_count": 0,
"results": [
{
"title": "exam tactics",
"id": "610756ec-ae31-4d5e-8f03-8cc52f31b71d",
"creator": "<NAME>",
"creator_url": "https://www.flickr.com/photos/18090920@N07",
"tags": [{"name": "exam"}, {"name": "tactics"}],
"url": "https://live.staticflickr.com/4065/4459771899_07595dc42e.jpg", # noqa
"thumbnail": "https://api.openverse.engineering/v1/thumbs/610756ec-ae31-4d5e-8f03-8cc52f31b71d", # noqa
"provider": "flickr",
"source": "flickr",
"license": "by",
"license_version": "2.0",
"license_url": "https://creativecommons.org/licenses/by/2.0/",
"foreign_landing_url": "https://www.flickr.com/photos/18090920@N07/4459771899", # noqa
"detail_url": "http://api.openverse.engineering/v1/images/610756ec-ae31-4d5e-8f03-8cc52f31b71d", # noqa
"related_url": "http://api.openverse.engineering/v1/recommendations/images/610756ec-ae31-4d5e-8f03-8cc52f31b71d", # noqa
}
],
}
}
image_related_404_example = {
"application/json": {"detail": "An internal server error occurred."}
}
image_oembed_200_example = {
"application/json": {
"version": "1.0",
"type": "photo",
"width": 1276,
"height": 1536,
"title": "Bust of Patroclus (photograph; calotype; salt print)",
"author_name": "<NAME>",
"author_url": None,
"license_url": "https://creativecommons.org/licenses/by-nc-nd/4.0/",
}
}
image_oembed_404_example = {
"application/json": {"detail": "An internal server error occurred."}
}
image_complain_201_example = {
"application/json": {
"identifier": identifier,
"reason": "mature",
"description": "This image contains sensitive content",
}
}
|
[
"os.getenv"
] |
[((21, 87), 'os.getenv', 'os.getenv', (['"""AUDIO_REQ_ORIGIN"""', '"""https://api.openverse.engineering"""'], {}), "('AUDIO_REQ_ORIGIN', 'https://api.openverse.engineering')\n", (30, 87), False, 'import os\n')]
|
from typer import Option as Opt
from ..system import system
from .main import program
from .. import config
@program.command(name="api")
def program_api(
port: int = Opt(config.DEFAULT_SERVER_PORT, help="Specify server port"),
):
"""
Start API server
"""
server = system.create_server("api")
server.listen(port=port)
|
[
"typer.Option"
] |
[((172, 231), 'typer.Option', 'Opt', (['config.DEFAULT_SERVER_PORT'], {'help': '"""Specify server port"""'}), "(config.DEFAULT_SERVER_PORT, help='Specify server port')\n", (175, 231), True, 'from typer import Option as Opt\n')]
|
import io
import itertools
import logging
import sys
import traceback
from operator import itemgetter
from typing import BinaryIO, Optional, TextIO, Tuple
import target_postgres
from target_postgres import DbSync
from target_postgres.db_sync import column_type, flatten_key
from splitgraph.config import CONFIG
from splitgraph.core.image import Image
from splitgraph.core.repository import Repository
from splitgraph.core.types import TableColumn, TableSchema
from splitgraph.engine.postgres.engine import get_change_key
from splitgraph.exceptions import TableNotFoundError
from splitgraph.ingestion.common import merge_tables
from .common import _make_changeset, _migrate_schema, log_exception, rollback_at_end
def select_breadcrumb(stream_message, breadcrumb):
for sub_meta in stream_message["metadata"]:
if sub_meta["breadcrumb"] == breadcrumb:
return sub_meta["metadata"]
raise ValueError("Breadcrumb %s not found!" % breadcrumb)
def get_key_properties(stream_message):
"""Extract the PK from a stream message. Supports both legacy ("key_properties") and
new ("metadata") Singer taps."""
if "key_properties" in stream_message:
return stream_message["key_properties"]
return select_breadcrumb(stream_message, []).get("table-key-properties", [])
class DbSyncProxy(DbSync):
def __init__(self, *args, **kwargs):
# The structure here is that we edit an image and write / modify tables in it. This
# is supposed to be called with an image already existing.
self.image = kwargs.pop("image")
self.staging_schema = kwargs.pop("staging_schema")
super().__init__(*args, **kwargs)
self.staging_table: Optional[Tuple[str, str]] = None
def _sg_schema(self) -> TableSchema:
stream_schema_message = self.stream_schema_message
return _get_sg_schema(self.flatten_schema, get_key_properties(stream_schema_message))
def create_indices(self, stream):
pass
@rollback_at_end
def sync_table(self):
# NB the overridden method never calls self.update_columns() to bring the
# schema up to date: this is because it compares a quoted name of the
# table to the unquoted names returned by self.get_tables().
schema_spec = self._sg_schema()
# See if the table exists
try:
table = self.image.get_table(get_table_name(self.stream_schema_message))
except TableNotFoundError:
self.staging_table = (
self.staging_schema,
"staging_" + get_table_name(self.stream_schema_message),
)
self.logger.info("Creating a staging table at %s.%s", *self.staging_table)
self.image.repository.object_engine.create_table(
schema=self.staging_table[0],
table=self.staging_table[1],
schema_spec=schema_spec,
unlogged=True,
)
# Make an empty table (will replace later on when flushing the data)
self.image.repository.objects.register_tables(
self.image.repository,
[
(
self.image.image_hash,
get_table_name(self.stream_schema_message),
schema_spec,
[],
)
],
)
self.image.repository.commit_engines()
return
if table.table_schema != schema_spec:
# Materialize the table into a temporary location and update its schema
self.staging_table = (
self.staging_schema,
"staging_" + get_table_name(self.stream_schema_message),
)
self.logger.info(
"Schema mismatch, materializing the table into %s.%s and migrating",
*self.staging_table,
)
table.materialize(self.staging_table[1], self.staging_table[0])
_migrate_schema(
self.image.repository.object_engine,
self.staging_table[0],
self.staging_table[1],
table.table_schema,
schema_spec,
)
self.image.repository.commit_engines()
@log_exception
@rollback_at_end
def load_csv(self, file, count, size_bytes):
from splitgraph.ingestion.csv import copy_csv_buffer
table_name = get_table_name(self.stream_schema_message)
schema_spec = self._sg_schema()
temp_table = "tmp_" + table_name
self.logger.info("Loading %d rows into '%s'", count, table_name)
old_table = self.image.get_table(table_name)
self.image.repository.object_engine.create_table(
schema="pg_temp", table=temp_table, schema_spec=schema_spec, temporary=True
)
with open(file, "rb") as f:
copy_csv_buffer(
data=f,
engine=self.image.repository.object_engine,
schema="pg_temp",
table=temp_table,
no_header=True,
escape="\\",
)
schema_spec = self._sg_schema()
if not self.staging_table:
self._merge_existing_table(old_table, temp_table)
else:
self._overwrite_existing_table(temp_table, table_name, schema_spec)
# Commit (deletes temporary tables)
self.image.repository.commit_engines()
def _overwrite_existing_table(self, temp_table, table_name, schema_spec):
assert self.staging_table
staging_table_schema, staging_table = self.staging_table
# Merge the CSV data into the "materialized" table
merge_tables(
self.image.object_engine,
"pg_temp",
temp_table,
schema_spec,
staging_table_schema,
staging_table,
schema_spec,
)
object_id = self.image.repository.objects.create_base_fragment(
staging_table_schema,
staging_table,
self.image.repository.namespace,
table_schema=schema_spec,
)
# Overwrite the existing table
self.image.repository.objects.overwrite_table(
self.image.repository, self.image.image_hash, table_name, schema_spec, [object_id]
)
self.staging_table = None
def _merge_existing_table(self, old_table, temp_table):
# Load the data directly as a Splitgraph object. Note that this can still be
# improved: currently the Singer loader loads the stream as a CSV, gives it
# to us, we ingest it into a temporary table, generate a changeset from that
# (which goes back into Python), give it to the fragment manager, the fragment
# manager builds an object by loading it into a temporary table _again_ and then
# into a cstore_fdw file.
assert self._sg_schema() == old_table.table_schema
with old_table.image.query_schema(commit=False):
# If hard_delete is set, check the sdc_removed_at flag in the table: if it's
# not NULL, we mark rows as deleted instead.
hard_delete = bool(self.connection_config.get("hard_delete"))
# Find PKs that have been upserted and deleted (make fake changeset)
changeset = _make_changeset(
self.image.object_engine,
"pg_temp",
temp_table,
old_table.table_schema,
upsert_condition="_sdc_removed_at IS NOT DISTINCT FROM NULL"
if hard_delete
else "TRUE",
)
inserted = sum(1 for v in changeset.values() if v[0] and not v[1])
updated = sum(1 for v in changeset.values() if v[0] and v[1])
deleted = sum(1 for v in changeset.values() if not v[0])
self.logger.info(
"Table %s: inserted %d, updated %d, deleted %d",
get_table_name(self.stream_schema_message),
inserted,
updated,
deleted,
)
# Split the changeset according to the original fragment boundaries so that a single
# change spanning multiple fragments doesn't force materialization.
split_changesets = self.image.repository.objects.split_changeset_boundaries(
changeset, get_change_key(old_table.table_schema), old_table.objects
)
self.logger.info(
"Table %s: split changeset into %d fragment(s)",
get_table_name(self.stream_schema_message),
len(split_changesets),
)
# Store the changeset as a new SG object
object_ids = self.image.repository.objects._store_changesets(
old_table, split_changesets, "pg_temp", table_name=temp_table
)
# Add the new object to the table.
# add_table (called by register_tables) does that by default (appends
# the object to the table if it already exists)
self.image.repository.objects.register_tables(
self.image.repository,
[
(
self.image.image_hash,
old_table.table_name,
old_table.table_schema,
object_ids,
)
],
)
def delete_rows(self, stream):
# We delete rows in load_csv if required (by putting them into the DIFF as
# upserted=False).
pass
def create_schema_if_not_exists(self, table_columns_cache=None):
pass
def _get_sg_schema(flattened_schema, primary_key) -> TableSchema:
return [
TableColumn(i, name, column_type(schema_property), name in primary_key, None)
for i, (name, schema_property) in enumerate(flattened_schema.items())
]
# Taken from target-postgres and adapted to not crash on unsupported columns
def _flatten_schema(d, parent_key=None, sep="__", level=0, max_level=0):
if parent_key is None:
parent_key = []
items = []
if "properties" not in d:
return {}
for k, v in d["properties"].items():
new_key = flatten_key(k, parent_key, sep)
if "type" in v:
if "object" in v["type"] and "properties" in v and level < max_level:
items.extend(
_flatten_schema(
v, parent_key + [k], sep=sep, level=level + 1, max_level=max_level
).items()
)
else:
items.append((new_key, v))
else:
if len(v.values()) > 0:
if v.get("inclusion") == "unsupported":
logging.warning("Unsupported field %s: %s", new_key, v.get("description", ""))
continue
if list(v.values())[0][0]["type"] == "string":
list(v.values())[0][0]["type"] = ["null", "string"]
items.append((new_key, list(v.values())[0][0]))
elif list(v.values())[0][0]["type"] == "array":
list(v.values())[0][0]["type"] = ["null", "array"]
items.append((new_key, list(v.values())[0][0]))
elif list(v.values())[0][0]["type"] == "object":
list(v.values())[0][0]["type"] = ["null", "object"]
items.append((new_key, list(v.values())[0][0]))
sorted_items = sorted(items, key=itemgetter(0))
for k, g in itertools.groupby(sorted_items, key=itemgetter(0)):
if len(list(g)) > 1:
raise ValueError("Duplicate column name produced in schema: {}".format(k))
return dict(sorted_items)
def get_sg_schema(stream_schema_message, flattening_max_level=0):
return _get_sg_schema(
_flatten_schema(stream_schema_message["schema"], max_level=flattening_max_level),
get_key_properties(stream_schema_message),
)
def get_table_name(stream_schema_message):
return stream_schema_message["stream"].replace(".", "_").replace("-", "_").lower()
def db_sync_wrapper(image: "Image", staging_schema: str):
def wrapped(*args, **kwargs):
return DbSyncProxy(image=image, staging_schema=staging_schema, *args, **kwargs)
return wrapped
def run_patched_sync(
repository: Repository,
base_image: Optional[Image],
new_image_hash: str,
delete_old: bool,
failure: str,
input_stream: Optional[BinaryIO] = None,
output_stream: Optional[TextIO] = None,
):
input_stream = input_stream or sys.stdin.buffer
# Build a staging schema
staging_schema = "sg_tmp_" + repository.to_schema()
repository.object_engine.delete_schema(staging_schema)
repository.object_engine.create_schema(staging_schema)
repository.commit_engines()
config = _prepare_config_params(repository)
old_sync = target_postgres.DbSync
stdout = sys.stdout
target_postgres.DbSync = db_sync_wrapper(repository.images[new_image_hash], staging_schema)
if output_stream:
sys.stdout = output_stream
try:
singer_messages = io.TextIOWrapper(input_stream, encoding="utf-8")
target_postgres.persist_lines(config, singer_messages)
if delete_old and base_image:
repository.images.delete([base_image.image_hash])
except Exception:
repository.rollback_engines()
if failure == "delete-new":
repository.images.delete([new_image_hash])
elif failure == "delete-old" and base_image:
repository.images.delete([base_image.image_hash])
repository.commit_engines()
logging.error(traceback.format_exc())
raise
finally:
sys.stdout = stdout
target_postgres.DbSync = old_sync
repository.object_engine.delete_schema(staging_schema)
repository.commit_engines()
def _prepare_config_params(repository):
conn_params = repository.engine.conn_params
# Prepare target_postgres config
config = {
"host": conn_params["SG_ENGINE_HOST"],
"port": int(conn_params["SG_ENGINE_PORT"]),
"user": conn_params["SG_ENGINE_USER"],
"password": conn_params["SG_ENGINE_PWD"],
"dbname": conn_params["SG_ENGINE_DB_NAME"],
"default_target_schema": repository.to_schema(),
"max_parallelism": _calc_max_threads(conn_params),
}
return config
def _calc_max_threads(conn_params):
"""Each loader thread really uses 2 connections (one directly when communicating with
the engine, one from the in-engine Splitgraph during querying) and this can lead to
weird deadlocks when we exhaust the connection limit enforced by pgbouncer) -- to avoid
that, we decrease the number of concurrent loader threads."""
return max(int(conn_params.get("SG_ENGINE_POOL", CONFIG["SG_ENGINE_POOL"])) // 2 - 1, 1)
|
[
"target_postgres.db_sync.flatten_key",
"target_postgres.db_sync.column_type",
"target_postgres.persist_lines",
"io.TextIOWrapper",
"splitgraph.ingestion.common.merge_tables",
"splitgraph.ingestion.csv.copy_csv_buffer",
"traceback.format_exc",
"operator.itemgetter",
"splitgraph.engine.postgres.engine.get_change_key"
] |
[((5748, 5876), 'splitgraph.ingestion.common.merge_tables', 'merge_tables', (['self.image.object_engine', '"""pg_temp"""', 'temp_table', 'schema_spec', 'staging_table_schema', 'staging_table', 'schema_spec'], {}), "(self.image.object_engine, 'pg_temp', temp_table, schema_spec,\n staging_table_schema, staging_table, schema_spec)\n", (5760, 5876), False, 'from splitgraph.ingestion.common import merge_tables\n'), ((10288, 10319), 'target_postgres.db_sync.flatten_key', 'flatten_key', (['k', 'parent_key', 'sep'], {}), '(k, parent_key, sep)\n', (10299, 10319), False, 'from target_postgres.db_sync import column_type, flatten_key\n'), ((13212, 13260), 'io.TextIOWrapper', 'io.TextIOWrapper', (['input_stream'], {'encoding': '"""utf-8"""'}), "(input_stream, encoding='utf-8')\n", (13228, 13260), False, 'import io\n'), ((13269, 13323), 'target_postgres.persist_lines', 'target_postgres.persist_lines', (['config', 'singer_messages'], {}), '(config, singer_messages)\n', (13298, 13323), False, 'import target_postgres\n'), ((4935, 5072), 'splitgraph.ingestion.csv.copy_csv_buffer', 'copy_csv_buffer', ([], {'data': 'f', 'engine': 'self.image.repository.object_engine', 'schema': '"""pg_temp"""', 'table': 'temp_table', 'no_header': '(True)', 'escape': '"""\\\\"""'}), "(data=f, engine=self.image.repository.object_engine, schema=\n 'pg_temp', table=temp_table, no_header=True, escape='\\\\')\n", (4950, 5072), False, 'from splitgraph.ingestion.csv import copy_csv_buffer\n'), ((9819, 9847), 'target_postgres.db_sync.column_type', 'column_type', (['schema_property'], {}), '(schema_property)\n', (9830, 9847), False, 'from target_postgres.db_sync import column_type, flatten_key\n'), ((11576, 11589), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (11586, 11589), False, 'from operator import itemgetter\n'), ((11643, 11656), 'operator.itemgetter', 'itemgetter', (['(0)'], {}), '(0)\n', (11653, 11656), False, 'from operator import itemgetter\n'), ((8463, 8501), 'splitgraph.engine.postgres.engine.get_change_key', 'get_change_key', (['old_table.table_schema'], {}), '(old_table.table_schema)\n', (8477, 8501), False, 'from splitgraph.engine.postgres.engine import get_change_key\n'), ((13748, 13770), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (13768, 13770), False, 'import traceback\n')]
|
from abc import ABC, abstractmethod
from hyperopt import STATUS_OK
import numpy as np
import logging
import pandas as pd
import shap
import matplotlib.pyplot as plt
import seaborn as sns
from crosspredict.iterator import Iterator
class CrossModelFabric(ABC):
def __init__(self,
iterator: Iterator,
params,
feature_name,
col_target,
cols_cat='auto',
num_boost_round=99999,
early_stopping_rounds=50,
valid=True,
random_state=0,
cross_target_encoder=None
):
self.params = params
self.feature_name = feature_name
self.cols_cat = cols_cat
self.num_boost_round = num_boost_round
self.early_stopping_rounds = early_stopping_rounds
self.valid = valid
self.col_target = col_target
self.random_state = random_state
self.iterator = iterator
self.cross_target_encoder = cross_target_encoder
self.models = {}
self.scores = None
self.score_max = None
self.num_boost_optimal = None
self.std = None
@abstractmethod
def get_hyperopt_space(self, params, random_state):
pass
@abstractmethod
def get_dataset(self, data, label, categorical_feature, **kwargs):
pass
@abstractmethod
def train(
self,
params,
train_set,
train_name,
valid_sets,
valid_name,
num_boost_round,
evals_result,
categorical_feature,
early_stopping_rounds,
verbose_eval):
pass
def fit(self, df):
log = logging.getLogger(__name__)
scores = {}
scores_avg = []
log.info(self.params)
self.iterator.fit(df=df)
for fold, (train, val) in enumerate(self.iterator.split(df)):
if self.cross_target_encoder is not None:
encoded_train, encoded_test = self.cross_target_encoder.transform(
fold=fold, train=train, test=val)
train = pd.concat([train, encoded_train], axis=1)
val = pd.concat([val, encoded_test], axis=1)
X_train, X_val = train[self.feature_name], val[self.feature_name]
y_train, y_val = train[self.col_target], val[self.col_target]
dtrain = self.get_dataset(
data=X_train.astype(float),
label=y_train,
categorical_feature=self.cols_cat)
dvalid = self.get_dataset(data=X_val.astype(float), label=y_val,
categorical_feature=self.cols_cat)
if fold % self.iterator.n_splits == 0:
log.info(f'REPEAT FOLDS {fold//self.iterator.n_splits} START')
# Обучение
evals_result = {}
if self.valid:
model = self.train(
params=self.params,
train_set=dtrain,
train_name='train',
valid_set=dvalid,
valid_name='eval',
num_boost_round=self.num_boost_round,
evals_result=evals_result,
categorical_feature=self.cols_cat,
early_stopping_rounds=self.early_stopping_rounds,
verbose_eval=False)
else:
model = self.train(params=self.params,
train_set=dtrain,
num_boost_round=self.num_boost_round,
categorical_feature=self.cols_cat,
verbose_eval=False)
self.models[fold] = model
if self.valid:
# Построение прогнозов при разном виде взаимодействия
scores[fold] = evals_result['eval'][self.params['metric']]
best_auc = np.max(evals_result['eval'][self.params['metric']])
scores_avg.append(best_auc)
log.info(f'\tCROSSVALIDATION FOLD {fold%self.iterator.n_splits} ENDS with best `{self.params["metric"]}` = {best_auc}')
if self.valid:
self.scores = pd.DataFrame(
dict([(k, pd.Series(v)) for k, v in scores.items()]))
mask = self.scores.isnull().sum(axis=1) == 0
self.num_boost_optimal = np.argmax(
self.scores[mask].mean(axis=1).values)
self.score_max = self.scores[mask].mean(
axis=1)[self.num_boost_optimal]
# self.score_max = np.mean(scores_avg)
self.std = self.scores[mask].std(axis=1)[self.num_boost_optimal]
# self.std = np.std(scores_avg)
result = {'loss': -self.score_max,
'status': STATUS_OK,
'std': self.std,
'score_max': self.score_max,
'scores_all': scores_avg,
'num_boost': int(self.num_boost_optimal),
}
log.info(result)
return result
return self
def transform(self, df):
x = df[self.feature_name]
y = df[self.col_target]
predict = pd.Series(index=df.index, data=np.zeros(df.shape[0]))
for fold, (train, val) in enumerate(self.iterator.split(df)):
if self.cross_target_encoder is not None:
encoded_train, encoded_test = self.cross_target_encoder.transform(
fold=fold, train=train, test=val)
train = pd.concat([train, encoded_train], axis=1)
val = pd.concat([val, encoded_test], axis=1)
X_train, X_val = train[self.feature_name], val[self.feature_name]
y_train, y_val = train[self.col_target], val[self.col_target]
# Подготовка данных в нужном формате
model = self.models[fold]
predict.loc[X_val.index] += \
model.predict(X_val[model.feature_name()].astype(float),
num_iteration=self.num_boost_optimal) / self.iterator.n_repeats
return predict
def predict(self, test):
predict = pd.Series(index=test.index, data=np.zeros(test.shape[0]))
models_len = len(self.models.keys())
if self.cross_target_encoder is not None:
encoded_test = self.cross_target_encoder.predict(test)
test = pd.concat([test, encoded_test], axis=1)
for fold in self.models.keys():
model = self.models[fold]
predict += model.predict(test[model.feature_name()].astype(
float), num_iteration=self.num_boost_optimal) / models_len
return predict
def shap(self, df: pd.DataFrame, n_samples=500):
'''
:param df:
:param n_samples: количество записей которое будет семплироваться в каждом тестовом фолде для анализы shap values
:return:
'''
fig = plt.figure(figsize=(10, 10))
log = logging.getLogger(__name__)
shap_df_fin = pd.DataFrame(columns=['feature'])
x = df[self.feature_name]
y = df[self.col_target]
for fold, (train, val) in enumerate(self.iterator.split(df)):
if self.cross_target_encoder is not None:
encoded_train, encoded_test = self.cross_target_encoder.transform(
fold=fold, train=train, test=val)
train = pd.concat([train, encoded_train], axis=1)
val = pd.concat([val, encoded_test], axis=1)
X_train, X_val = train[self.feature_name], val[self.feature_name]
y_train, y_val = train[self.col_target], val[self.col_target]
model = self.models[fold]
explainer = shap.TreeExplainer(model)
df_sample = X_val[model.feature_name()].sample(
n=n_samples, random_state=0, replace=True).astype(float)
if self.params['metric']=='auc':
shap_values = explainer.shap_values(df_sample)[1]
else:
shap_values = explainer.shap_values(df_sample)
shap_df = pd.DataFrame(zip(model.feature_name(), np.mean(
np.abs(shap_values), axis=0)), columns=['feature', 'shap_' + str(fold)])
shap_df_fin = pd.merge(shap_df_fin, shap_df,
how='outer', on='feature')
shap_feature_stats = shap_df_fin.set_index('feature').agg(
['mean', 'std'], axis=1).sort_values('mean', ascending=False)
cols_best = shap_feature_stats[:30].index
best_features = shap_df_fin.loc[shap_df_fin['feature'].isin(cols_best)]
best_features_melt = pd.melt(
best_features, id_vars=['feature'], value_vars=[
feature for feature in best_features.columns.values.tolist() if feature not in ['feature']])
sns.barplot(x='value', y='feature', data=best_features_melt,
estimator=np.mean, order=cols_best)
return fig, shap_feature_stats.reset_index()
def shap_summary_plot(self, test: pd.DataFrame, n_samples=500):
fig = plt.figure()
log = logging.getLogger(__name__)
shap_df_fin = pd.DataFrame(columns=['feature'])
if self.cross_target_encoder is not None:
encoded_test = self.cross_target_encoder.predict(test=test)
test = pd.concat([test, encoded_test], axis=1)
# Подготовка данных в нужном формате
model = self.models[0]
explainer = shap.TreeExplainer(model)
df_sample = test[model.feature_name()].sample(
n=n_samples, random_state=0, replace=True).astype(float)
if self.params['metric']=='auc':
shap_values = explainer.shap_values(df_sample)[1]
else:
shap_values = explainer.shap_values(df_sample)
shap_df = pd.DataFrame(zip(model.feature_name(), np.mean(
np.abs(shap_values), axis=0)), columns=['feature', 'shap_'])
shap_df_fin = pd.merge(shap_df_fin, shap_df, how='outer', on='feature')
shap.summary_plot(shap_values, df_sample, show=False, )
return fig
|
[
"pandas.DataFrame",
"numpy.abs",
"pandas.merge",
"seaborn.barplot",
"numpy.zeros",
"shap.TreeExplainer",
"matplotlib.pyplot.figure",
"numpy.max",
"pandas.Series",
"shap.summary_plot",
"pandas.concat",
"logging.getLogger"
] |
[((1763, 1790), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1780, 1790), False, 'import logging\n'), ((7084, 7112), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), '(figsize=(10, 10))\n', (7094, 7112), True, 'import matplotlib.pyplot as plt\n'), ((7127, 7154), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (7144, 7154), False, 'import logging\n'), ((7177, 7210), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['feature']"}), "(columns=['feature'])\n", (7189, 7210), True, 'import pandas as pd\n'), ((9002, 9103), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""value"""', 'y': '"""feature"""', 'data': 'best_features_melt', 'estimator': 'np.mean', 'order': 'cols_best'}), "(x='value', y='feature', data=best_features_melt, estimator=np.\n mean, order=cols_best)\n", (9013, 9103), True, 'import seaborn as sns\n'), ((9255, 9267), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9265, 9267), True, 'import matplotlib.pyplot as plt\n'), ((9282, 9309), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (9299, 9309), False, 'import logging\n'), ((9332, 9365), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['feature']"}), "(columns=['feature'])\n", (9344, 9365), True, 'import pandas as pd\n'), ((9644, 9669), 'shap.TreeExplainer', 'shap.TreeExplainer', (['model'], {}), '(model)\n', (9662, 9669), False, 'import shap\n'), ((10131, 10188), 'pandas.merge', 'pd.merge', (['shap_df_fin', 'shap_df'], {'how': '"""outer"""', 'on': '"""feature"""'}), "(shap_df_fin, shap_df, how='outer', on='feature')\n", (10139, 10188), True, 'import pandas as pd\n'), ((10198, 10251), 'shap.summary_plot', 'shap.summary_plot', (['shap_values', 'df_sample'], {'show': '(False)'}), '(shap_values, df_sample, show=False)\n', (10215, 10251), False, 'import shap\n'), ((6542, 6581), 'pandas.concat', 'pd.concat', (['[test, encoded_test]'], {'axis': '(1)'}), '([test, encoded_test], axis=1)\n', (6551, 6581), True, 'import pandas as pd\n'), ((7883, 7908), 'shap.TreeExplainer', 'shap.TreeExplainer', (['model'], {}), '(model)\n', (7901, 7908), False, 'import shap\n'), ((8419, 8476), 'pandas.merge', 'pd.merge', (['shap_df_fin', 'shap_df'], {'how': '"""outer"""', 'on': '"""feature"""'}), "(shap_df_fin, shap_df, how='outer', on='feature')\n", (8427, 8476), True, 'import pandas as pd\n'), ((9507, 9546), 'pandas.concat', 'pd.concat', (['[test, encoded_test]'], {'axis': '(1)'}), '([test, encoded_test], axis=1)\n', (9516, 9546), True, 'import pandas as pd\n'), ((2185, 2226), 'pandas.concat', 'pd.concat', (['[train, encoded_train]'], {'axis': '(1)'}), '([train, encoded_train], axis=1)\n', (2194, 2226), True, 'import pandas as pd\n'), ((2249, 2287), 'pandas.concat', 'pd.concat', (['[val, encoded_test]'], {'axis': '(1)'}), '([val, encoded_test], axis=1)\n', (2258, 2287), True, 'import pandas as pd\n'), ((4032, 4083), 'numpy.max', 'np.max', (["evals_result['eval'][self.params['metric']]"], {}), "(evals_result['eval'][self.params['metric']])\n", (4038, 4083), True, 'import numpy as np\n'), ((5369, 5390), 'numpy.zeros', 'np.zeros', (['df.shape[0]'], {}), '(df.shape[0])\n', (5377, 5390), True, 'import numpy as np\n'), ((5678, 5719), 'pandas.concat', 'pd.concat', (['[train, encoded_train]'], {'axis': '(1)'}), '([train, encoded_train], axis=1)\n', (5687, 5719), True, 'import pandas as pd\n'), ((5742, 5780), 'pandas.concat', 'pd.concat', (['[val, encoded_test]'], {'axis': '(1)'}), '([val, encoded_test], axis=1)\n', (5751, 5780), True, 'import pandas as pd\n'), ((6336, 6359), 'numpy.zeros', 'np.zeros', (['test.shape[0]'], {}), '(test.shape[0])\n', (6344, 6359), True, 'import numpy as np\n'), ((7564, 7605), 'pandas.concat', 'pd.concat', (['[train, encoded_train]'], {'axis': '(1)'}), '([train, encoded_train], axis=1)\n', (7573, 7605), True, 'import pandas as pd\n'), ((7628, 7666), 'pandas.concat', 'pd.concat', (['[val, encoded_test]'], {'axis': '(1)'}), '([val, encoded_test], axis=1)\n', (7637, 7666), True, 'import pandas as pd\n'), ((10048, 10067), 'numpy.abs', 'np.abs', (['shap_values'], {}), '(shap_values)\n', (10054, 10067), True, 'import numpy as np\n'), ((8320, 8339), 'numpy.abs', 'np.abs', (['shap_values'], {}), '(shap_values)\n', (8326, 8339), True, 'import numpy as np\n'), ((4355, 4367), 'pandas.Series', 'pd.Series', (['v'], {}), '(v)\n', (4364, 4367), True, 'import pandas as pd\n')]
|
# -*- encoding: utf-8 -*-
# ! python3
import click
from src.visualization.overlay import overlay_command
from src.visualization.prediction_only import prediction_only_command
from src.visualization.side_by_side import side_by_side_command
@click.group(name='cli')
def cli():
pass
cli.add_command(overlay_command)
cli.add_command(side_by_side_command)
cli.add_command(prediction_only_command)
cli()
|
[
"click.group"
] |
[((245, 268), 'click.group', 'click.group', ([], {'name': '"""cli"""'}), "(name='cli')\n", (256, 268), False, 'import click\n')]
|
import sys
n, t = map(int, sys.stdin.readline().split())
a = list(map(int, sys.stdin.readline().split()))
ans = 0
def go(i, s):
if i == n:
if s == t:
global ans
ans += 1
return
go(i+1, s)
go(i+1, s+a[i])
go(0, 0)
print(ans)
|
[
"sys.stdin.readline"
] |
[((28, 48), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (46, 48), False, 'import sys\n'), ((77, 97), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (95, 97), False, 'import sys\n')]
|
#!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
'''
An example to set OMP threads in FCI calculations. In old pyscf versions,
different number of OpenMP threads may lead to slightly different answers.
This issue was fixed. see github issue #249.
'''
from functools import reduce
import numpy
from pyscf import gto, lo, fci, ao2mo, scf, lib
mol = gto.M(atom=[('H', 0, 0, i*1.8) for i in range(10)],
basis = 'sto6g', unit='B')
s = mol.intor('cint1e_ovlp_sph')
orb = lo.lowdin(s)
#mf = scf.RHF(mol).run()
#orb = mf.mo_coeff
h1 = mol.intor('cint1e_nuc_sph')
h1+= mol.intor('cint1e_kin_sph')
h1 = reduce(numpy.dot, (orb.T, h1, orb))
h2 = ao2mo.kernel(mol, orb)
e, ci = fci.direct_spin0.kernel(h1, h2, 10, 10, ecore=mol.energy_nuc(),
max_cycle=500, max_space=100, verbose=5)
print(e)
e, ci = fci.direct_spin0.kernel(h1, h2, 10, 10, ecore=mol.energy_nuc(), ci0=ci,
max_cycle=500, max_space=100, verbose=5)
print(e)
e, ci = fci.direct_spin0.kernel(h1, h2, 10, 10, ecore=mol.energy_nuc(), ci0=ci,
max_cycle=500, max_space=100, verbose=5)
print(e)
#
# Reducing OMP threads can improve the numerical stability
#
# Set OMP_NUM_THREADS to 1
lib.num_threads(1)
e, ci = fci.direct_spin0.kernel(h1, h2, 10, 10, ecore=mol.energy_nuc(),
max_cycle=500, max_space=100, verbose=5)
print(e)
e, ci = fci.direct_spin0.kernel(h1, h2, 10, 10, ecore=mol.energy_nuc(), ci0=ci,
max_cycle=500, max_space=100, verbose=5)
print(e)
#
# Another Example.
#
import h5py
with h5py.File('spin_op_hamiltonian.h5', 'r') as f:
h1 = lib.unpack_tril(f['h1'].value)
h2 = f['h2'].value
norb = 10
nelec = (5,5)
na = fci.cistring.num_strings(norb, nelec[0])
c0 = numpy.zeros((na,na))
c0[0,0] = 1
solver = fci.addons.fix_spin_(fci.direct_spin0.FCI())
# Smooth convergence was found with single thread.
solver.threads = 1
solver.kernel(h1, h2, norb, nelec, ci0=c0, verbose=5)
# When switching to multi-threads, numerical fluctuation leads to convergence
# problem
solver.threads = 4
solver.kernel(h1, h2, norb, nelec, ci0=c0, verbose=5)
|
[
"h5py.File",
"pyscf.lib.num_threads",
"pyscf.fci.direct_spin0.FCI",
"pyscf.ao2mo.kernel",
"numpy.zeros",
"pyscf.fci.cistring.num_strings",
"functools.reduce",
"pyscf.lo.lowdin",
"pyscf.lib.unpack_tril"
] |
[((485, 497), 'pyscf.lo.lowdin', 'lo.lowdin', (['s'], {}), '(s)\n', (494, 497), False, 'from pyscf import gto, lo, fci, ao2mo, scf, lib\n'), ((614, 649), 'functools.reduce', 'reduce', (['numpy.dot', '(orb.T, h1, orb)'], {}), '(numpy.dot, (orb.T, h1, orb))\n', (620, 649), False, 'from functools import reduce\n'), ((655, 677), 'pyscf.ao2mo.kernel', 'ao2mo.kernel', (['mol', 'orb'], {}), '(mol, orb)\n', (667, 677), False, 'from pyscf import gto, lo, fci, ao2mo, scf, lib\n'), ((1252, 1270), 'pyscf.lib.num_threads', 'lib.num_threads', (['(1)'], {}), '(1)\n', (1267, 1270), False, 'from pyscf import gto, lo, fci, ao2mo, scf, lib\n'), ((1770, 1810), 'pyscf.fci.cistring.num_strings', 'fci.cistring.num_strings', (['norb', 'nelec[0]'], {}), '(norb, nelec[0])\n', (1794, 1810), False, 'from pyscf import gto, lo, fci, ao2mo, scf, lib\n'), ((1816, 1837), 'numpy.zeros', 'numpy.zeros', (['(na, na)'], {}), '((na, na))\n', (1827, 1837), False, 'import numpy\n'), ((1630, 1670), 'h5py.File', 'h5py.File', (['"""spin_op_hamiltonian.h5"""', '"""r"""'], {}), "('spin_op_hamiltonian.h5', 'r')\n", (1639, 1670), False, 'import h5py\n'), ((1686, 1716), 'pyscf.lib.unpack_tril', 'lib.unpack_tril', (["f['h1'].value"], {}), "(f['h1'].value)\n", (1701, 1716), False, 'from pyscf import gto, lo, fci, ao2mo, scf, lib\n'), ((1879, 1901), 'pyscf.fci.direct_spin0.FCI', 'fci.direct_spin0.FCI', ([], {}), '()\n', (1899, 1901), False, 'from pyscf import gto, lo, fci, ao2mo, scf, lib\n')]
|
#!/usr/bin/python3
#
# RaspberryPIの操作
#
import sys
import json
import RPi.GPIO as GPIO
import adafruit_dht
from board import *
import smbus
import time
import re
from decimal import *
from gpiozero import LED
from datetime import datetime
#AD/DAモジュール設定
address = 0x48
A0 = 0x40
A1 = 0x41
A2 = 0x42
A3 = 0x43
# GPIO.BCM番号
#gpio_led = [35,36,37,38]
gpio_led = [19,16,26,20]
gpio_dht11 = D18 # 12番のBCM GPIO番号
gpio_ds18 = 4
gpio_pump = 17
gpio_trig = 23
gpio_echo = 24
MAX_DISTANCE = 220
timeOut = MAX_DISTANCE*60
WATER_LEVEL_MAX = 29
WATER_LEVEL_FULL = 20
SYSFILE_DS18B20 = '/sys/bus/w1/devices/28-01204c43b99b/w1_slave'
RETRY_TDS_MAX = 5
RETRY_TDS_DELAY = 0.5
RETRY_DISTANCE_MAX = 30
RETRY_DISTANCE_DELAY = 0.5
gpio_subp_relay = 5
gpio_subp_level = 6
#
# ポンプ動作開始
#
def pump_start():
# GPIO.setmode(GPIO.BOARD)
timestr = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
print(f' {timestr} pump_start', file=sys.stderr)
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(gpio_pump, GPIO.OUT)
GPIO.output(gpio_pump, GPIO.HIGH)
return True
#
# ポンプ動作停止
#
def pump_stop():
# GPIO.setmode(GPIO.BOARD)
timestr = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
print(f' {timestr} pump_stop', file=sys.stderr)
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(gpio_pump, GPIO.OUT)
GPIO.output(gpio_pump, GPIO.LOW)
return True
#
# センサー値の取得
#
def measure_temp_humid():
dht11 = adafruit_dht.DHT11(gpio_dht11, use_pulseio=False)
temperature = dht11.temperature
humidity = dht11.humidity
result = {}
result['air_temp'] = float(f"{temperature:.1f}")
result['humidity'] = float(f"{humidity:.1f}")
return result
def measure_water_temp():
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(gpio_ds18, GPIO.IN, pull_up_down=GPIO.PUD_UP)
result = {}
try:
with open(SYSFILE_DS18B20, mode='r') as f:
striped = [line.strip() for line in f.readlines()]
f.close()
if 'YES' in striped[0]:
values = re.findall(r't=([0-9]+)', striped[1])
water_temp = int(values[0]) / 1000
result['water_temp'] = float(f"{water_temp:.1f}")
return result
except Exception as e:
print(e, file=sys.stderr)
return result
def pulseIn(pin,level,timeOut):
t0 = time.time()
while(GPIO.input(pin) != level):
if((time.time() - t0) > timeOut * 0.000001):
return 0;
t0 = time.time()
while(GPIO.input(pin) == level):
if((time.time() - t0) > timeOut * 0.000001):
return 0;
pulseTime = (time.time() - t0) * 1000000
return pulseTime
def getSonar():
GPIO.setmode(GPIO.BCM)
GPIO.setup(gpio_trig, GPIO.OUT)
GPIO.setup(gpio_echo, GPIO.IN)
GPIO.output(gpio_trig, GPIO.HIGH)
time.sleep(0.00001)
GPIO.output(gpio_trig, GPIO.LOW)
pingTime = pulseIn(gpio_echo, GPIO.HIGH, timeOut)
distance = pingTime * 340.0 / 2.0 / 10000.0
return distance
def measure_water_level():
for i in range(RETRY_DISTANCE_MAX):
distance = getSonar()
# print(distance, file=sys.stderr)
if 0 != distance and distance < 100:
break;
time.sleep(RETRY_DISTANCE_DELAY)
# %を計算(0~100に制限)
water_level = int((WATER_LEVEL_MAX - distance) * 100 / WATER_LEVEL_FULL)
water_level = min(100, max(water_level, 0))
result = {}
result['distance'] = float(f"{distance:.1f}")
result['water_level'] = water_level
return result
def measure_tds(temperature):
AREF = 5
ADCRANGE = 256
KVALUE = 1.0274
bus = smbus.SMBus(1)
bus.write_byte(address,A2)
for i in range(RETRY_TDS_MAX):
value = bus.read_byte(address)
# print(value, file=sys.stderr)
if 0 < value and value < 100:
break;
time.sleep(RETRY_TDS_DELAY)
voltage = value * AREF / ADCRANGE
ecValue = (133.42*voltage**3 - 255.86*voltage**2 + 857.39*voltage) * KVALUE
# print(ecValue, file=sys.stderr)
ecValue25 = ecValue / (1.0+0.02*(temperature-25.0))
# print(ecValue25, file=sys.stderr)
# print(1413/ecValue25, file=sys.stderr)
ecResult = ecValue25 / 1000
result = {}
result['voltage'] = float(f"{voltage:.2f}")
result['tds_level'] = float(f"{ecResult:.2f}")
return result
def measure_brightness():
bus = smbus.SMBus(1)
bus.write_byte(address,A0)
value = bus.read_byte(address)
time.sleep(0.1)
value = bus.read_byte(address) # 2回読まないとあまり変化しない
bus.close()
result = {}
result['brightness'] = 255 - value
return result
def measure_sensor(num):
if num == 0:
return measure_temp_humid()
elif num == 1:
return measure_water_temp()
elif num == 2:
return measure_water_level()
elif num == 3:
result = measure_water_temp()
return measure_tds(result['water_temp'])
elif num == 4:
return measure_brightness()
else:
return {}
#
# 全センサー値の取得
#
def measure_sensors_internal():
result = {}
dic = measure_temp_humid()
result.update(dic)
dic = measure_water_temp()
result.update(dic)
dic = measure_water_level()
result.update(dic)
dic = measure_tds(result['water_temp'])
result.update(dic)
dic = measure_brightness()
result.update(dic)
return result
def measure_sensors():
result = {}
# exception時のリトライは合計5回まで。
for i in range(5):
try:
result = measure_sensors_internal();
except Exception as e:
print(e, file=sys.stderr)
if len(result) != 0:
break
return result
#
# LED ON/OFF
# num 0:青 1:緑 2:黄 3:赤
#
def set_led(num, state):
output = GPIO.HIGH if state == "on" else GPIO.LOW
# GPIO.setmode(GPIO.BOARD)
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(gpio_led[num], GPIO.OUT)
# print(f'{gpio_led[num]}', file=sys.stderr);
GPIO.output(gpio_led[num], output)
# GPIO.cleanup();
return True
def update_led(color):
leds = {'blue': 0, 'green': 1, 'yellow': 2, 'red': 3}
for key, value in leds.items():
if color == key:
set_led(value, 'on')
else:
set_led(value, 'off')
#
# サブポンプ動作開始
#
def sub_pump_start():
timestr = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
print(f' {timestr} sub_pump_start', file=sys.stderr)
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(gpio_subp_relay, GPIO.OUT)
GPIO.output(gpio_subp_relay, GPIO.HIGH)
return True
#
# サブポンプ動作停止
#
def sub_pump_stop():
timestr = datetime.now().strftime('%Y/%m/%d %H:%M:%S')
print(f' {timestr} sub_pump_stop', file=sys.stderr)
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(gpio_subp_relay, GPIO.OUT)
GPIO.output(gpio_subp_relay, GPIO.LOW)
return True
#
# サブタンクの水の状態確認
#
def sub_level_check():
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
GPIO.setup(gpio_subp_level, GPIO.IN)
reading = GPIO.input(gpio_subp_level)
result = False
if reading == GPIO.HIGH:
result = True
return result
#
# サブタンクの水終了コールバック登録
#
def sub_pump_set_end_cb(sub_pump_end):
GPIO.add_event_detect(gpio_subp_level, GPIO.FALLING, sub_pump_end, 1000)
pass
|
[
"RPi.GPIO.setmode",
"adafruit_dht.DHT11",
"RPi.GPIO.setup",
"time.time",
"time.sleep",
"RPi.GPIO.add_event_detect",
"re.findall",
"RPi.GPIO.input",
"RPi.GPIO.output",
"datetime.datetime.now",
"RPi.GPIO.setwarnings",
"smbus.SMBus"
] |
[((922, 944), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (934, 944), True, 'import RPi.GPIO as GPIO\n'), ((946, 969), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (962, 969), True, 'import RPi.GPIO as GPIO\n'), ((971, 1002), 'RPi.GPIO.setup', 'GPIO.setup', (['gpio_pump', 'GPIO.OUT'], {}), '(gpio_pump, GPIO.OUT)\n', (981, 1002), True, 'import RPi.GPIO as GPIO\n'), ((1004, 1037), 'RPi.GPIO.output', 'GPIO.output', (['gpio_pump', 'GPIO.HIGH'], {}), '(gpio_pump, GPIO.HIGH)\n', (1015, 1037), True, 'import RPi.GPIO as GPIO\n'), ((1217, 1239), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (1229, 1239), True, 'import RPi.GPIO as GPIO\n'), ((1241, 1264), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (1257, 1264), True, 'import RPi.GPIO as GPIO\n'), ((1266, 1297), 'RPi.GPIO.setup', 'GPIO.setup', (['gpio_pump', 'GPIO.OUT'], {}), '(gpio_pump, GPIO.OUT)\n', (1276, 1297), True, 'import RPi.GPIO as GPIO\n'), ((1299, 1331), 'RPi.GPIO.output', 'GPIO.output', (['gpio_pump', 'GPIO.LOW'], {}), '(gpio_pump, GPIO.LOW)\n', (1310, 1331), True, 'import RPi.GPIO as GPIO\n'), ((1396, 1445), 'adafruit_dht.DHT11', 'adafruit_dht.DHT11', (['gpio_dht11'], {'use_pulseio': '(False)'}), '(gpio_dht11, use_pulseio=False)\n', (1414, 1445), False, 'import adafruit_dht\n'), ((1659, 1681), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (1671, 1681), True, 'import RPi.GPIO as GPIO\n'), ((1683, 1706), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (1699, 1706), True, 'import RPi.GPIO as GPIO\n'), ((1708, 1764), 'RPi.GPIO.setup', 'GPIO.setup', (['gpio_ds18', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(gpio_ds18, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (1718, 1764), True, 'import RPi.GPIO as GPIO\n'), ((2192, 2203), 'time.time', 'time.time', ([], {}), '()\n', (2201, 2203), False, 'import time\n'), ((2304, 2315), 'time.time', 'time.time', ([], {}), '()\n', (2313, 2315), False, 'import time\n'), ((2488, 2510), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (2500, 2510), True, 'import RPi.GPIO as GPIO\n'), ((2512, 2543), 'RPi.GPIO.setup', 'GPIO.setup', (['gpio_trig', 'GPIO.OUT'], {}), '(gpio_trig, GPIO.OUT)\n', (2522, 2543), True, 'import RPi.GPIO as GPIO\n'), ((2545, 2575), 'RPi.GPIO.setup', 'GPIO.setup', (['gpio_echo', 'GPIO.IN'], {}), '(gpio_echo, GPIO.IN)\n', (2555, 2575), True, 'import RPi.GPIO as GPIO\n'), ((2577, 2610), 'RPi.GPIO.output', 'GPIO.output', (['gpio_trig', 'GPIO.HIGH'], {}), '(gpio_trig, GPIO.HIGH)\n', (2588, 2610), True, 'import RPi.GPIO as GPIO\n'), ((2612, 2629), 'time.sleep', 'time.sleep', (['(1e-05)'], {}), '(1e-05)\n', (2622, 2629), False, 'import time\n'), ((2633, 2665), 'RPi.GPIO.output', 'GPIO.output', (['gpio_trig', 'GPIO.LOW'], {}), '(gpio_trig, GPIO.LOW)\n', (2644, 2665), True, 'import RPi.GPIO as GPIO\n'), ((3320, 3334), 'smbus.SMBus', 'smbus.SMBus', (['(1)'], {}), '(1)\n', (3331, 3334), False, 'import smbus\n'), ((3995, 4009), 'smbus.SMBus', 'smbus.SMBus', (['(1)'], {}), '(1)\n', (4006, 4009), False, 'import smbus\n'), ((4071, 4086), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (4081, 4086), False, 'import time\n'), ((5242, 5264), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (5254, 5264), True, 'import RPi.GPIO as GPIO\n'), ((5266, 5289), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (5282, 5289), True, 'import RPi.GPIO as GPIO\n'), ((5291, 5326), 'RPi.GPIO.setup', 'GPIO.setup', (['gpio_led[num]', 'GPIO.OUT'], {}), '(gpio_led[num], GPIO.OUT)\n', (5301, 5326), True, 'import RPi.GPIO as GPIO\n'), ((5374, 5408), 'RPi.GPIO.output', 'GPIO.output', (['gpio_led[num]', 'output'], {}), '(gpio_led[num], output)\n', (5385, 5408), True, 'import RPi.GPIO as GPIO\n'), ((5779, 5801), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (5791, 5801), True, 'import RPi.GPIO as GPIO\n'), ((5803, 5826), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (5819, 5826), True, 'import RPi.GPIO as GPIO\n'), ((5828, 5865), 'RPi.GPIO.setup', 'GPIO.setup', (['gpio_subp_relay', 'GPIO.OUT'], {}), '(gpio_subp_relay, GPIO.OUT)\n', (5838, 5865), True, 'import RPi.GPIO as GPIO\n'), ((5867, 5906), 'RPi.GPIO.output', 'GPIO.output', (['gpio_subp_relay', 'GPIO.HIGH'], {}), '(gpio_subp_relay, GPIO.HIGH)\n', (5878, 5906), True, 'import RPi.GPIO as GPIO\n'), ((6069, 6091), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (6081, 6091), True, 'import RPi.GPIO as GPIO\n'), ((6093, 6116), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (6109, 6116), True, 'import RPi.GPIO as GPIO\n'), ((6118, 6155), 'RPi.GPIO.setup', 'GPIO.setup', (['gpio_subp_relay', 'GPIO.OUT'], {}), '(gpio_subp_relay, GPIO.OUT)\n', (6128, 6155), True, 'import RPi.GPIO as GPIO\n'), ((6157, 6195), 'RPi.GPIO.output', 'GPIO.output', (['gpio_subp_relay', 'GPIO.LOW'], {}), '(gpio_subp_relay, GPIO.LOW)\n', (6168, 6195), True, 'import RPi.GPIO as GPIO\n'), ((6253, 6275), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (6265, 6275), True, 'import RPi.GPIO as GPIO\n'), ((6277, 6300), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (6293, 6300), True, 'import RPi.GPIO as GPIO\n'), ((6302, 6338), 'RPi.GPIO.setup', 'GPIO.setup', (['gpio_subp_level', 'GPIO.IN'], {}), '(gpio_subp_level, GPIO.IN)\n', (6312, 6338), True, 'import RPi.GPIO as GPIO\n'), ((6350, 6377), 'RPi.GPIO.input', 'GPIO.input', (['gpio_subp_level'], {}), '(gpio_subp_level)\n', (6360, 6377), True, 'import RPi.GPIO as GPIO\n'), ((6516, 6588), 'RPi.GPIO.add_event_detect', 'GPIO.add_event_detect', (['gpio_subp_level', 'GPIO.FALLING', 'sub_pump_end', '(1000)'], {}), '(gpio_subp_level, GPIO.FALLING, sub_pump_end, 1000)\n', (6537, 6588), True, 'import RPi.GPIO as GPIO\n'), ((2211, 2226), 'RPi.GPIO.input', 'GPIO.input', (['pin'], {}), '(pin)\n', (2221, 2226), True, 'import RPi.GPIO as GPIO\n'), ((2323, 2338), 'RPi.GPIO.input', 'GPIO.input', (['pin'], {}), '(pin)\n', (2333, 2338), True, 'import RPi.GPIO as GPIO\n'), ((2956, 2988), 'time.sleep', 'time.sleep', (['RETRY_DISTANCE_DELAY'], {}), '(RETRY_DISTANCE_DELAY)\n', (2966, 2988), False, 'import time\n'), ((3506, 3533), 'time.sleep', 'time.sleep', (['RETRY_TDS_DELAY'], {}), '(RETRY_TDS_DELAY)\n', (3516, 3533), False, 'import time\n'), ((825, 839), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (837, 839), False, 'from datetime import datetime\n'), ((1121, 1135), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1133, 1135), False, 'from datetime import datetime\n'), ((2424, 2435), 'time.time', 'time.time', ([], {}), '()\n', (2433, 2435), False, 'import time\n'), ((5678, 5692), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5690, 5692), False, 'from datetime import datetime\n'), ((5969, 5983), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5981, 5983), False, 'from datetime import datetime\n'), ((1936, 1972), 're.findall', 're.findall', (['"""t=([0-9]+)"""', 'striped[1]'], {}), "('t=([0-9]+)', striped[1])\n", (1946, 1972), False, 'import re\n'), ((2244, 2255), 'time.time', 'time.time', ([], {}), '()\n', (2253, 2255), False, 'import time\n'), ((2356, 2367), 'time.time', 'time.time', ([], {}), '()\n', (2365, 2367), False, 'import time\n')]
|
#!/usr/bin/env python3
import sys
import numpy as np
from PySide6.QtCore import Qt, Slot
from PySide6.QtGui import QAction, QKeySequence
from PySide6.QtWidgets import (
QApplication, QHBoxLayout, QLabel,
QMainWindow, QPushButton, QSizePolicy,
QVBoxLayout, QWidget
)
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.figure import Figure
from skimage import data
from skimage.color import rgb2hed
from skimage.exposure import rescale_intensity
class ApplicationWindow(QMainWindow):
"""Example base on the example by 'scikit-image' gallery"""
def __init__(self, root, parent=None):
super(ApplicationWindow, self).__init__(parent)
self._main = QWidget()
self.setCentralWidget(self._main)
# Main menu bar
self.menu = self.menuBar()
self.menu_file = self.menu.addMenu("File")
exit = QAction("Exit", self, triggered=root.quit)
self.menu_file.addAction(exit)
self.menu_about = self.menu.addMenu("&About")
about = QAction(
"About Qt", self,
shortcut=QKeySequence(QKeySequence.HelpContents),
triggered=root.aboutQt
)
self.menu_about.addAction(about)
# Create an artificial color close to the original one
self.ihc_rgb = data.immunohistochemistry()
self.ihc_hed = rgb2hed(self.ihc_rgb)
main_layout = QVBoxLayout(self._main)
plot_layout = QHBoxLayout()
button_layout = QHBoxLayout()
label_layout = QHBoxLayout()
self.canvas1 = FigureCanvas(Figure(figsize=(5, 5)))
self.canvas2 = FigureCanvas(Figure(figsize=(5, 5)))
self._ax1 = self.canvas1.figure.subplots()
self._ax2 = self.canvas2.figure.subplots()
self._ax1.imshow(self.ihc_rgb)
plot_layout.addWidget(self.canvas1)
plot_layout.addWidget(self.canvas2)
self.button1 = QPushButton("Hematoxylin")
self.button2 = QPushButton("Eosin")
self.button3 = QPushButton("DAB")
self.button4 = QPushButton("Fluorescene")
self.button1.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
self.button2.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
self.button3.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
self.button4.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
self.button1.clicked.connect(self.plot_hematoxylin)
self.button2.clicked.connect(self.plot_eosin)
self.button3.clicked.connect(self.plot_dab)
self.button4.clicked.connect(self.plot_final)
self.label1 = QLabel("Original", alignment=Qt.AlignCenter)
self.label2 = QLabel("", alignment=Qt.AlignCenter)
font = self.label1.font()
font.setPointSize(16)
self.label1.setFont(font)
self.label2.setFont(font)
label_layout.addWidget(self.label1)
label_layout.addWidget(self.label2)
button_layout.addWidget(self.button1)
button_layout.addWidget(self.button2)
button_layout.addWidget(self.button3)
button_layout.addWidget(self.button4)
main_layout.addLayout(label_layout, 2)
main_layout.addLayout(plot_layout, 88)
main_layout.addLayout(button_layout, 10)
# Default image
self.plot_hematoxylin()
def set_buttons_state(self, states):
self.button1.setEnabled(states[0])
self.button2.setEnabled(states[1])
self.button3.setEnabled(states[2])
self.button4.setEnabled(states[3])
@Slot()
def plot_hematoxylin(self):
cmap_hema = LinearSegmentedColormap.from_list(
"mycmap", ["white", "navy"]
)
self._ax2.imshow(self.ihc_hed[:, :, 0], cmap=cmap_hema)
self.canvas2.draw()
self.label2.setText("Hematoxylin")
self.set_buttons_state((False, True, True, True))
@Slot()
def plot_eosin(self):
cmap_eosin = LinearSegmentedColormap.from_list(
"mycmap", ["darkviolet", "white"]
)
self._ax2.imshow(self.ihc_hed[:, :, 1], cmap=cmap_eosin)
self.canvas2.draw()
self.label2.setText("Eosin")
self.set_buttons_state((True, False, True, True))
@Slot()
def plot_dab(self):
cmap_dab = LinearSegmentedColormap.from_list(
"mycmap", ["white", "saddlebrown"]
)
self._ax2.imshow(self.ihc_hed[:, :, 2], cmap=cmap_dab)
self.canvas2.draw()
self.label2.setText("DAB")
self.set_buttons_state((True, True, False, True))
@Slot()
def plot_final(self):
h = rescale_intensity(self.ihc_hed[:, :, 0], out_range=(0, 1))
d = rescale_intensity(self.ihc_hed[:, :, 2], out_range=(0, 1))
zdh = np.dstack((np.zeros_like(h), d, h))
self._ax2.imshow(zdh)
self.canvas2.draw()
self.label2.setText("Stain separated image")
self.set_buttons_state((True, True, True, False))
def main():
root = QApplication(sys.argv)
app = ApplicationWindow(root)
app.show()
sys.exit(root.exec())
if __name__ == "__main__":
main()
|
[
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.zeros_like",
"PySide6.QtGui.QAction",
"skimage.data.immunohistochemistry",
"PySide6.QtGui.QKeySequence",
"skimage.exposure.rescale_intensity",
"PySide6.QtWidgets.QVBoxLayout",
"PySide6.QtWidgets.QWidget",
"PySide6.QtWidgets.QPushButton",
"PySide6.QtWidgets.QLabel",
"skimage.color.rgb2hed",
"matplotlib.figure.Figure",
"PySide6.QtWidgets.QApplication",
"PySide6.QtCore.Slot",
"PySide6.QtWidgets.QHBoxLayout"
] |
[((3659, 3665), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (3663, 3665), False, 'from PySide6.QtCore import Qt, Slot\n'), ((4002, 4008), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (4006, 4008), False, 'from PySide6.QtCore import Qt, Slot\n'), ((4341, 4347), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (4345, 4347), False, 'from PySide6.QtCore import Qt, Slot\n'), ((4673, 4679), 'PySide6.QtCore.Slot', 'Slot', ([], {}), '()\n', (4677, 4679), False, 'from PySide6.QtCore import Qt, Slot\n'), ((5092, 5114), 'PySide6.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (5104, 5114), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((782, 791), 'PySide6.QtWidgets.QWidget', 'QWidget', ([], {}), '()\n', (789, 791), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((960, 1002), 'PySide6.QtGui.QAction', 'QAction', (['"""Exit"""', 'self'], {'triggered': 'root.quit'}), "('Exit', self, triggered=root.quit)\n", (967, 1002), False, 'from PySide6.QtGui import QAction, QKeySequence\n'), ((1387, 1414), 'skimage.data.immunohistochemistry', 'data.immunohistochemistry', ([], {}), '()\n', (1412, 1414), False, 'from skimage import data\n'), ((1438, 1459), 'skimage.color.rgb2hed', 'rgb2hed', (['self.ihc_rgb'], {}), '(self.ihc_rgb)\n', (1445, 1459), False, 'from skimage.color import rgb2hed\n'), ((1483, 1506), 'PySide6.QtWidgets.QVBoxLayout', 'QVBoxLayout', (['self._main'], {}), '(self._main)\n', (1494, 1506), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((1529, 1542), 'PySide6.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1540, 1542), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((1567, 1580), 'PySide6.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1578, 1580), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((1604, 1617), 'PySide6.QtWidgets.QHBoxLayout', 'QHBoxLayout', ([], {}), '()\n', (1615, 1617), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((1995, 2021), 'PySide6.QtWidgets.QPushButton', 'QPushButton', (['"""Hematoxylin"""'], {}), "('Hematoxylin')\n", (2006, 2021), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((2045, 2065), 'PySide6.QtWidgets.QPushButton', 'QPushButton', (['"""Eosin"""'], {}), "('Eosin')\n", (2056, 2065), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((2089, 2107), 'PySide6.QtWidgets.QPushButton', 'QPushButton', (['"""DAB"""'], {}), "('DAB')\n", (2100, 2107), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((2131, 2157), 'PySide6.QtWidgets.QPushButton', 'QPushButton', (['"""Fluorescene"""'], {}), "('Fluorescene')\n", (2142, 2157), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((2727, 2771), 'PySide6.QtWidgets.QLabel', 'QLabel', (['"""Original"""'], {'alignment': 'Qt.AlignCenter'}), "('Original', alignment=Qt.AlignCenter)\n", (2733, 2771), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((2794, 2830), 'PySide6.QtWidgets.QLabel', 'QLabel', (['""""""'], {'alignment': 'Qt.AlignCenter'}), "('', alignment=Qt.AlignCenter)\n", (2800, 2830), False, 'from PySide6.QtWidgets import QApplication, QHBoxLayout, QLabel, QMainWindow, QPushButton, QSizePolicy, QVBoxLayout, QWidget\n'), ((3718, 3780), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""mycmap"""', "['white', 'navy']"], {}), "('mycmap', ['white', 'navy'])\n", (3751, 3780), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((4056, 4124), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""mycmap"""', "['darkviolet', 'white']"], {}), "('mycmap', ['darkviolet', 'white'])\n", (4089, 4124), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((4391, 4460), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'LinearSegmentedColormap.from_list', (['"""mycmap"""', "['white', 'saddlebrown']"], {}), "('mycmap', ['white', 'saddlebrown'])\n", (4424, 4460), False, 'from matplotlib.colors import LinearSegmentedColormap\n'), ((4718, 4776), 'skimage.exposure.rescale_intensity', 'rescale_intensity', (['self.ihc_hed[:, :, 0]'], {'out_range': '(0, 1)'}), '(self.ihc_hed[:, :, 0], out_range=(0, 1))\n', (4735, 4776), False, 'from skimage.exposure import rescale_intensity\n'), ((4789, 4847), 'skimage.exposure.rescale_intensity', 'rescale_intensity', (['self.ihc_hed[:, :, 2]'], {'out_range': '(0, 1)'}), '(self.ihc_hed[:, :, 2], out_range=(0, 1))\n', (4806, 4847), False, 'from skimage.exposure import rescale_intensity\n'), ((1655, 1677), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (1661, 1677), False, 'from matplotlib.figure import Figure\n'), ((1715, 1737), 'matplotlib.figure.Figure', 'Figure', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (1721, 1737), False, 'from matplotlib.figure import Figure\n'), ((1173, 1212), 'PySide6.QtGui.QKeySequence', 'QKeySequence', (['QKeySequence.HelpContents'], {}), '(QKeySequence.HelpContents)\n', (1185, 1212), False, 'from PySide6.QtGui import QAction, QKeySequence\n'), ((4873, 4889), 'numpy.zeros_like', 'np.zeros_like', (['h'], {}), '(h)\n', (4886, 4889), True, 'import numpy as np\n')]
|
# -*- encoding: utf-8 -*-
from flask import url_for, redirect, render_template, flash, g, session
from app import app
@app.route('/')
def index():
return render_template('index.html')
|
[
"app.app.route",
"flask.render_template"
] |
[((121, 135), 'app.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (130, 135), False, 'from app import app\n'), ((157, 186), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (172, 186), False, 'from flask import url_for, redirect, render_template, flash, g, session\n')]
|
from typing import List
from secrets import token_urlsafe
from datetime import datetime
from pydantic import BaseModel, Field
class NewTokenForm(BaseModel):
scopes: List[str] = Field(default_factory=list)
class Token(BaseModel):
tid: str = Field(default_factory=lambda: token_urlsafe(15))
refresh_token: str = Field(default_factory=lambda: token_urlsafe(20))
scopes: List[str]
created_at: datetime = Field(default_factory=datetime.now)
active: bool = True
|
[
"pydantic.Field",
"secrets.token_urlsafe"
] |
[((184, 211), 'pydantic.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (189, 211), False, 'from pydantic import BaseModel, Field\n'), ((425, 460), 'pydantic.Field', 'Field', ([], {'default_factory': 'datetime.now'}), '(default_factory=datetime.now)\n', (430, 460), False, 'from pydantic import BaseModel, Field\n'), ((283, 300), 'secrets.token_urlsafe', 'token_urlsafe', (['(15)'], {}), '(15)\n', (296, 300), False, 'from secrets import token_urlsafe\n'), ((357, 374), 'secrets.token_urlsafe', 'token_urlsafe', (['(20)'], {}), '(20)\n', (370, 374), False, 'from secrets import token_urlsafe\n')]
|
import torch
import subprocess
import time
import logging
from subprocess import PIPE
"""
ADAPTED FROM <NAME>'S CODE
PYTHON VERSION = 3.6
"""
# Takes about 8GB
ndim = 25_000
logging.basicConfig(format='[%(asctime)s] %(filename)s [%(levelname).1s] %(message)s', level=logging.DEBUG)
def get_gpu_usage():
command = "nvidia-smi --query-gpu=memory.total,memory.used,memory.free --format=csv,noheader,nounits"
result = subprocess.run(command.split(), stdout=PIPE, stderr=PIPE)
resultList = result.stdout.strip().split(b",")
mem_total = resultList[0].decode("utf-8")
mem_used = resultList[1].decode("utf-8")
mem_free = resultList[2].decode("utf-8")
mem_used = int(mem_used)
mem_free = int(mem_free)
logging.info(f"GPU Stats: Total: {mem_total}, Free: {mem_free} Used: {mem_used}")
return mem_used / mem_free
def run_dummy_job():
start = time.time()
random1 = torch.randn([ndim, ndim]).to("cuda")
random2 = torch.randn([ndim, ndim]).to("cuda")
while time.time() - start < 0.5 * 60:
random1 = random1 * random2
random2 = random2 * random1
del random1, random2
torch.cuda.empty_cache()
def main():
while True:
usage = get_gpu_usage()
if usage < 0.2:
logging.debug("Running dummy GPU job for 30 seconds")
run_dummy_job()
else:
logging.debug("Waiting for 30 seconds")
time.sleep(30)
if __name__ == "__main__":
main()
|
[
"logging.debug",
"logging.basicConfig",
"torch.randn",
"time.time",
"time.sleep",
"logging.info",
"torch.cuda.empty_cache"
] |
[((187, 304), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s] %(filename)s [%(levelname).1s] %(message)s"""', 'level': 'logging.DEBUG'}), "(format=\n '[%(asctime)s] %(filename)s [%(levelname).1s] %(message)s', level=\n logging.DEBUG)\n", (206, 304), False, 'import logging\n'), ((747, 833), 'logging.info', 'logging.info', (['f"""GPU Stats: Total: {mem_total}, Free: {mem_free} Used: {mem_used}"""'], {}), "(\n f'GPU Stats: Total: {mem_total}, Free: {mem_free} Used: {mem_used}')\n", (759, 833), False, 'import logging\n'), ((894, 905), 'time.time', 'time.time', ([], {}), '()\n', (903, 905), False, 'import time\n'), ((1151, 1175), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (1173, 1175), False, 'import torch\n'), ((920, 945), 'torch.randn', 'torch.randn', (['[ndim, ndim]'], {}), '([ndim, ndim])\n', (931, 945), False, 'import torch\n'), ((971, 996), 'torch.randn', 'torch.randn', (['[ndim, ndim]'], {}), '([ndim, ndim])\n', (982, 996), False, 'import torch\n'), ((1018, 1029), 'time.time', 'time.time', ([], {}), '()\n', (1027, 1029), False, 'import time\n'), ((1273, 1326), 'logging.debug', 'logging.debug', (['"""Running dummy GPU job for 30 seconds"""'], {}), "('Running dummy GPU job for 30 seconds')\n", (1286, 1326), False, 'import logging\n'), ((1381, 1420), 'logging.debug', 'logging.debug', (['"""Waiting for 30 seconds"""'], {}), "('Waiting for 30 seconds')\n", (1394, 1420), False, 'import logging\n'), ((1433, 1447), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (1443, 1447), False, 'import time\n')]
|
import torch
import torch.nn as nn
import torchvision
import numpy as np
import torch.nn.functional as F
import math
from torch.autograd import Variable
import torch.utils.model_zoo as model_zoo
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
''' StackGAN for Text to Image Generation'''
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.02)
if m.bias is not None:
m.bias.data.fill_(0.0)
class Conditioning_Augmentation_StageI(nn.Module):
def __init__(self):
super(Conditioning_Augmentation_StageI, self).__init__()
self.fc1 = nn.Linear(768, 256)
self.relu = nn.ReLU()
def forward(self, x):
x = x.to(device)
y = self.relu(self.fc1(x))
u0 = y[:, :128]
logvar = y[:, 128:]
sigma0 = torch.exp(logvar/2)
epsilon = torch.randn((x.shape[0], 128)).to(device)
out = u0 + sigma0*epsilon
return out, u0, logvar
class StageI_GAN_Gen(nn.Module):
def __init__(self, condaug1):
super(StageI_GAN_Gen, self).__init__()
# In: [batch_size, 128]
self.CA1 = condaug1()
self.fc = nn.Sequential(
nn.Linear(228, 4*4*128*8),
nn.BatchNorm1d(4*4*128*8),
nn.ReLU(True))
self.upsample1 = nn.Upsample(scale_factor=2, mode='nearest')
self.conv1 = nn.Conv2d(128*8, 64*8, kernel_size=3, stride=1, padding=1, bias = False)
self.batchnorm1 = nn.BatchNorm2d(64*8)
self.upsample2 = nn.Upsample(scale_factor=2, mode='nearest')
self.conv2 = nn.Conv2d(64*8, 32*8, kernel_size=3, stride=1, padding=1, bias = False)
self.batchnorm2 = nn.BatchNorm2d(32*8)
self.upsample3 = nn.Upsample(scale_factor=2, mode='nearest')
self.conv3 = nn.Conv2d(32*8, 16*8, kernel_size=3, stride=1, padding=1, bias = False)
self.batchnorm3 = nn.BatchNorm2d(16*8)
self.upsample4 = nn.Upsample(scale_factor=2, mode='nearest')
self.conv4 = nn.Conv2d(16*8, 8*8, kernel_size=3, stride=1, padding=1, bias = False)
self.batchnorm4 = nn.BatchNorm2d(8*8)
self.conv5 = nn.Conv2d(8*8, 3, kernel_size=3, stride=1, padding=1, bias = False)
self.tanh = nn.Tanh()
self.relu = nn.ReLU()
def forward(self, x):
x = x.to(device)
x, u0, logvar = self.CA1(x)
z = torch.randn((x.shape[0], 100)).to(device)
x = torch.cat((x, z), 1)
x = self.fc(x)
x = torch.reshape(x, (-1, 128*8, 4, 4))
x = self.relu(self.batchnorm1(self.conv1(self.upsample1(x))))
x = self.relu(self.batchnorm2(self.conv2(self.upsample2(x))))
x = self.relu(self.batchnorm3(self.conv3(self.upsample3(x))))
x = self.relu(self.batchnorm4(self.conv4(self.upsample4(x))))
x = self.tanh(self.conv5(x))
return x, u0, logvar
class DownSample1(nn.Module):
def __init__(self):
super(DownSample1, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1, bias = False)
self.conv2 = nn.Conv2d(64, 128, kernel_size=4, stride=2, padding=1, bias = False)
self.batchnorm2 = nn.BatchNorm2d(128)
self.conv3 = nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1, bias = False)
self.batchnorm3 = nn.BatchNorm2d(256)
self.conv4 = nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1, bias = False)
self.batchnorm4 = nn.BatchNorm2d(512)
self.leakyrelu = nn.LeakyReLU(0.2, inplace = True)
def forward(self, x):
x = x.to(device)
x = self.leakyrelu(self.conv1(x))
x = self.leakyrelu(self.batchnorm2(self.conv2(x)))
x = self.leakyrelu(self.batchnorm3(self.conv3(x)))
x = self.leakyrelu(self.batchnorm4(self.conv4(x)))
return x
class StageI_GAN_Dis(nn.Module):
def __init__(self, downsample):
super(StageI_GAN_Dis, self).__init__()
self.fc1 = nn.Linear(768, 128)
self.downsample = downsample()
self.conv1 = nn.Conv2d(640, 512, kernel_size=1, stride=1, bias = False)
self.batchnorm1 = nn.BatchNorm2d(512)
self.leakyrelu = nn.LeakyReLU(0.2)
self.conv2 = nn.Conv2d(512, 1, kernel_size = 4, stride = 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x, text):
x = x.to(device)
text = text.to(device)
x = self.downsample(x)
text = self.fc1(text)
text = text.unsqueeze(2)
text = text.unsqueeze(3)
text1 = torch.cat((text, text, text, text), 2)
text = torch.cat((text1, text1, text1, text1), 3)
x = torch.cat((x, text), 1)
x = self.leakyrelu(self.batchnorm1(self.conv1(x)))
x = self.conv2(x)
x = torch.squeeze(x, 3)
x = torch.squeeze(x, 2)
x = self.sigmoid(x)
return x
class Conditioning_Augmentation_StageII(nn.Module):
def __init__(self):
super(Conditioning_Augmentation_StageII, self).__init__()
self.fc1 = nn.Linear(768, 256)
self.relu = nn.ReLU()
def forward(self, x):
x = x.to(device)
#print(x.shape)
y = self.relu(self.fc1(x))
u0 = y[:, :128]
logvar = y[:, 128:]
sigma0 = torch.exp(logvar/2)
epsilon = torch.randn((x.shape[0], 128)).to(device)
out = u0 + sigma0*epsilon
return out, u0, logvar
class DownSample2(nn.Module):
def __init__(self):
super(DownSample2, self).__init__()
self.conv1 = nn.Conv2d(3, 128, kernel_size=3, stride=1, padding=1, bias = False)
self.conv2 = nn.Conv2d(128, 256, kernel_size=4, stride=2, padding=1, bias = False)
self.batchnorm2 = nn.BatchNorm2d(256)
self.conv3 = nn.Conv2d(256, 512, kernel_size=4, stride=2, padding=1, bias = False)
self.batchnorm3 = nn.BatchNorm2d(512)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = x.to(device)
x = self.relu(self.conv1(x))
x = self.relu(self.batchnorm2(self.conv2(x)))
x = self.relu(self.batchnorm3(self.conv3(x)))
return x
class ResidualBlock(nn.Module):
def __init__(self):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias = False)
self.batchnorm1 = nn.BatchNorm2d(512)
self.conv2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias = False)
self.batchnorm2 = nn.BatchNorm2d(512)
self.relu = nn.ReLU()
def forward(self, x):
x = x.to(device)
identity = x
x = self.relu(self.batchnorm1(self.conv1(x)))
x = self.batchnorm2(self.conv2(x))
x = x + identity
x = self.relu(x)
return x
class UpSampling2(nn.Module):
def __init__(self):
super(UpSampling2, self).__init__()
self.upsample = nn.Upsample(scale_factor=2, mode='nearest')
self.conv1 = nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1, bias = False)
self.batchnorm1 = nn.BatchNorm2d(256)
self.conv2 = nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1, bias = False)
self.batchnorm2 = nn.BatchNorm2d(128)
self.conv3 = nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1, bias = False)
self.batchnorm3 = nn.BatchNorm2d(64)
self.conv4 = nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1, bias = False)
self.batchnorm4 = nn.BatchNorm2d(32)
self.conv5 = nn.Conv2d(32, 3, kernel_size=3, stride=1, padding=1, bias = False)
self.relu = nn.ReLU()
def forward(self, x):
x = x.to(device)
x = self.relu(self.batchnorm1(self.conv1(self.upsample(x))))
x = self.relu(self.batchnorm2(self.conv2(self.upsample(x))))
x = self.relu(self.batchnorm3(self.conv3(self.upsample(x))))
x = self.relu(self.batchnorm4(self.conv4(self.upsample(x))))
x = self.conv5(x)
return x
class StageII_GAN_Gen(nn.Module):
def __init__(self, downsample, resblock, upsample, condaug2):
super(StageII_GAN_Gen, self).__init__()
self.downsample = downsample()
self.resblock = resblock()
self.upsample = upsample()
self.CA2 = condaug2()
self.conv = nn.Conv2d(640, 512, kernel_size=3, stride=1, padding=1, bias = False)
self.batchnorm = nn.BatchNorm2d(512)
self.relu = nn.ReLU(True)
self.tanh = nn.Tanh()
def forward(self, x, text):
x = x.to(device)
text = text.to(device)
text, u0, logvar = self.CA2(text)
text = text.unsqueeze(2)
text = text.unsqueeze(3)
text = text.repeat(1, 1, 16, 16)
x = self.downsample(x)
x = torch.cat((x, text), 1)
x = self.relu(self.batchnorm(self.conv(x)))
x = self.resblock(self.resblock(self.resblock(self.resblock(x))))
x = self.upsample(x)
x = self.tanh(x)
return x, u0, logvar
class DownSample3(nn.Module):
def __init__(self):
super(DownSample3, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size = 4, stride = 2, padding = 1, bias = False)
self.conv2 = nn.Conv2d(64, 128, kernel_size = 4, stride = 2, padding = 1, bias = False)
self.batchnorm2 = nn.BatchNorm2d(128)
self.conv3 = nn.Conv2d(128, 256, kernel_size = 4, stride = 2, padding = 1, bias = False)
self.batchnorm3 = nn.BatchNorm2d(256)
self.conv4 = nn.Conv2d(256, 512, kernel_size = 4, stride = 2, padding = 1, bias = False)
self.batchnorm4 = nn.BatchNorm2d(512)
self.conv5 = nn.Conv2d(512, 1024, kernel_size = 4, stride = 2, padding = 1, bias = False)
self.batchnorm5 = nn.BatchNorm2d(1024)
self.conv6 = nn.Conv2d(1024, 2048, kernel_size = 4, stride = 2, padding = 1, bias = False)
self.batchnorm6 = nn.BatchNorm2d(2048)
self.conv7 = nn.Conv2d(2048, 1024, kernel_size = 3, stride = 1, padding = 1, bias = False)
self.batchnorm7 = nn.BatchNorm2d(1024)
self.conv8 = nn.Conv2d(1024, 512, kernel_size = 3, stride = 1, padding = 1, bias = False)
self.batchnorm8 = nn.BatchNorm2d(512)
self.leakyrelu = nn.LeakyReLU(0.2)
def forward(self, x):
x = x.to(device)
x = self.leakyrelu(self.conv1(x))
x = self.leakyrelu(self.batchnorm2(self.conv2(x)))
x = self.leakyrelu(self.batchnorm3(self.conv3(x)))
x = self.leakyrelu(self.batchnorm4(self.conv4(x)))
x = self.leakyrelu(self.batchnorm5(self.conv5(x)))
x = self.leakyrelu(self.batchnorm6(self.conv6(x)))
x = self.leakyrelu(self.batchnorm7(self.conv7(x)))
x = self.leakyrelu(self.batchnorm8(self.conv8(x)))
return x
class StageII_GAN_Dis(nn.Module):
def __init__(self, downsample):
super(StageII_GAN_Dis, self).__init__()
self.fc0 = nn.Linear(768, 128)
self.downsample = downsample()
self.conv1 = nn.Conv2d(640, 512, kernel_size=3, stride=1, padding = 1, bias = False)
self.batchnorm1 = nn.BatchNorm2d(512)
self.leakyrelu = nn.LeakyReLU(0.2)
self.conv2 = nn.Conv2d(512, 1, kernel_size = 4, stride = 4)
self.sigmoid = nn.Sigmoid()
def forward(self, x, text):
x = x.to(device)
text = text.to(device)
x = self.downsample(x)
text = self.fc0(text)
text = text.unsqueeze(2)
text = text.unsqueeze(3)
text = text.repeat(1, 1, 4, 4)
x = torch.cat((x, text), 1)
x = self.leakyrelu(self.batchnorm1(self.conv1(x)))
x = self.sigmoid(self.conv2(x))
x = x.squeeze(3)
x = x.squeeze(2)
return x
|
[
"torch.nn.ReLU",
"torch.nn.Tanh",
"torch.nn.Conv2d",
"torch.nn.BatchNorm1d",
"torch.cat",
"torch.randn",
"torch.squeeze",
"torch.exp",
"torch.nn.Upsample",
"torch.nn.BatchNorm2d",
"torch.cuda.is_available",
"torch.nn.LeakyReLU",
"torch.nn.Linear",
"torch.reshape",
"torch.nn.Sigmoid"
] |
[((228, 253), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (251, 253), False, 'import torch\n'), ((874, 893), 'torch.nn.Linear', 'nn.Linear', (['(768)', '(256)'], {}), '(768, 256)\n', (883, 893), True, 'import torch.nn as nn\n'), ((914, 923), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (921, 923), True, 'import torch.nn as nn\n'), ((1089, 1110), 'torch.exp', 'torch.exp', (['(logvar / 2)'], {}), '(logvar / 2)\n', (1098, 1110), False, 'import torch\n'), ((1585, 1628), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (1596, 1628), True, 'import torch.nn as nn\n'), ((1650, 1724), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128 * 8)', '(64 * 8)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(128 * 8, 64 * 8, kernel_size=3, stride=1, padding=1, bias=False)\n', (1659, 1724), True, 'import torch.nn as nn\n'), ((1749, 1771), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64 * 8)'], {}), '(64 * 8)\n', (1763, 1771), True, 'import torch.nn as nn\n'), ((1796, 1839), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (1807, 1839), True, 'import torch.nn as nn\n'), ((1861, 1934), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64 * 8)', '(32 * 8)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(64 * 8, 32 * 8, kernel_size=3, stride=1, padding=1, bias=False)\n', (1870, 1934), True, 'import torch.nn as nn\n'), ((1959, 1981), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32 * 8)'], {}), '(32 * 8)\n', (1973, 1981), True, 'import torch.nn as nn\n'), ((2006, 2049), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (2017, 2049), True, 'import torch.nn as nn\n'), ((2071, 2144), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32 * 8)', '(16 * 8)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(32 * 8, 16 * 8, kernel_size=3, stride=1, padding=1, bias=False)\n', (2080, 2144), True, 'import torch.nn as nn\n'), ((2169, 2191), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(16 * 8)'], {}), '(16 * 8)\n', (2183, 2191), True, 'import torch.nn as nn\n'), ((2216, 2259), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (2227, 2259), True, 'import torch.nn as nn\n'), ((2281, 2353), 'torch.nn.Conv2d', 'nn.Conv2d', (['(16 * 8)', '(8 * 8)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(16 * 8, 8 * 8, kernel_size=3, stride=1, padding=1, bias=False)\n', (2290, 2353), True, 'import torch.nn as nn\n'), ((2378, 2399), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(8 * 8)'], {}), '(8 * 8)\n', (2392, 2399), True, 'import torch.nn as nn\n'), ((2420, 2487), 'torch.nn.Conv2d', 'nn.Conv2d', (['(8 * 8)', '(3)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(8 * 8, 3, kernel_size=3, stride=1, padding=1, bias=False)\n', (2429, 2487), True, 'import torch.nn as nn\n'), ((2508, 2517), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (2515, 2517), True, 'import torch.nn as nn\n'), ((2538, 2547), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2545, 2547), True, 'import torch.nn as nn\n'), ((2703, 2723), 'torch.cat', 'torch.cat', (['(x, z)', '(1)'], {}), '((x, z), 1)\n', (2712, 2723), False, 'import torch\n'), ((2759, 2796), 'torch.reshape', 'torch.reshape', (['x', '(-1, 128 * 8, 4, 4)'], {}), '(x, (-1, 128 * 8, 4, 4))\n', (2772, 2796), False, 'import torch\n'), ((3263, 3327), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(3, 64, kernel_size=4, stride=2, padding=1, bias=False)\n', (3272, 3327), True, 'import torch.nn as nn\n'), ((3360, 3426), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(64, 128, kernel_size=4, stride=2, padding=1, bias=False)\n', (3369, 3426), True, 'import torch.nn as nn\n'), ((3455, 3474), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (3469, 3474), True, 'import torch.nn as nn\n'), ((3497, 3564), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(128, 256, kernel_size=4, stride=2, padding=1, bias=False)\n', (3506, 3564), True, 'import torch.nn as nn\n'), ((3593, 3612), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (3607, 3612), True, 'import torch.nn as nn\n'), ((3635, 3702), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(256, 512, kernel_size=4, stride=2, padding=1, bias=False)\n', (3644, 3702), True, 'import torch.nn as nn\n'), ((3731, 3750), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (3745, 3750), True, 'import torch.nn as nn\n'), ((3777, 3808), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (3789, 3808), True, 'import torch.nn as nn\n'), ((4246, 4265), 'torch.nn.Linear', 'nn.Linear', (['(768)', '(128)'], {}), '(768, 128)\n', (4255, 4265), True, 'import torch.nn as nn\n'), ((4326, 4382), 'torch.nn.Conv2d', 'nn.Conv2d', (['(640)', '(512)'], {'kernel_size': '(1)', 'stride': '(1)', 'bias': '(False)'}), '(640, 512, kernel_size=1, stride=1, bias=False)\n', (4335, 4382), True, 'import torch.nn as nn\n'), ((4411, 4430), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (4425, 4430), True, 'import torch.nn as nn\n'), ((4456, 4473), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (4468, 4473), True, 'import torch.nn as nn\n'), ((4496, 4538), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(1)'], {'kernel_size': '(4)', 'stride': '(1)'}), '(512, 1, kernel_size=4, stride=1)\n', (4505, 4538), True, 'import torch.nn as nn\n'), ((4566, 4578), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4576, 4578), True, 'import torch.nn as nn\n'), ((4823, 4861), 'torch.cat', 'torch.cat', (['(text, text, text, text)', '(2)'], {}), '((text, text, text, text), 2)\n', (4832, 4861), False, 'import torch\n'), ((4877, 4919), 'torch.cat', 'torch.cat', (['(text1, text1, text1, text1)', '(3)'], {}), '((text1, text1, text1, text1), 3)\n', (4886, 4919), False, 'import torch\n'), ((4932, 4955), 'torch.cat', 'torch.cat', (['(x, text)', '(1)'], {}), '((x, text), 1)\n', (4941, 4955), False, 'import torch\n'), ((5054, 5073), 'torch.squeeze', 'torch.squeeze', (['x', '(3)'], {}), '(x, 3)\n', (5067, 5073), False, 'import torch\n'), ((5086, 5105), 'torch.squeeze', 'torch.squeeze', (['x', '(2)'], {}), '(x, 2)\n', (5099, 5105), False, 'import torch\n'), ((5315, 5334), 'torch.nn.Linear', 'nn.Linear', (['(768)', '(256)'], {}), '(768, 256)\n', (5324, 5334), True, 'import torch.nn as nn\n'), ((5355, 5364), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5362, 5364), True, 'import torch.nn as nn\n'), ((5554, 5575), 'torch.exp', 'torch.exp', (['(logvar / 2)'], {}), '(logvar / 2)\n', (5563, 5575), False, 'import torch\n'), ((5821, 5886), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(128)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(3, 128, kernel_size=3, stride=1, padding=1, bias=False)\n', (5830, 5886), True, 'import torch.nn as nn\n'), ((5919, 5986), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(128, 256, kernel_size=4, stride=2, padding=1, bias=False)\n', (5928, 5986), True, 'import torch.nn as nn\n'), ((6015, 6034), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (6029, 6034), True, 'import torch.nn as nn\n'), ((6057, 6124), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(256, 512, kernel_size=4, stride=2, padding=1, bias=False)\n', (6066, 6124), True, 'import torch.nn as nn\n'), ((6153, 6172), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (6167, 6172), True, 'import torch.nn as nn\n'), ((6194, 6215), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (6201, 6215), True, 'import torch.nn as nn\n'), ((6557, 6624), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(512, 512, kernel_size=3, stride=1, padding=1, bias=False)\n', (6566, 6624), True, 'import torch.nn as nn\n'), ((6653, 6672), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (6667, 6672), True, 'import torch.nn as nn\n'), ((6695, 6762), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(512)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(512, 512, kernel_size=3, stride=1, padding=1, bias=False)\n', (6704, 6762), True, 'import torch.nn as nn\n'), ((6791, 6810), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (6805, 6810), True, 'import torch.nn as nn\n'), ((6832, 6841), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6839, 6841), True, 'import torch.nn as nn\n'), ((7205, 7248), 'torch.nn.Upsample', 'nn.Upsample', ([], {'scale_factor': '(2)', 'mode': '"""nearest"""'}), "(scale_factor=2, mode='nearest')\n", (7216, 7248), True, 'import torch.nn as nn\n'), ((7270, 7337), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(256)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(512, 256, kernel_size=3, stride=1, padding=1, bias=False)\n', (7279, 7337), True, 'import torch.nn as nn\n'), ((7366, 7385), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (7380, 7385), True, 'import torch.nn as nn\n'), ((7408, 7475), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(128)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(256, 128, kernel_size=3, stride=1, padding=1, bias=False)\n', (7417, 7475), True, 'import torch.nn as nn\n'), ((7504, 7523), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (7518, 7523), True, 'import torch.nn as nn\n'), ((7546, 7612), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(64)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(128, 64, kernel_size=3, stride=1, padding=1, bias=False)\n', (7555, 7612), True, 'import torch.nn as nn\n'), ((7641, 7659), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(64)'], {}), '(64)\n', (7655, 7659), True, 'import torch.nn as nn\n'), ((7682, 7747), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(32)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(64, 32, kernel_size=3, stride=1, padding=1, bias=False)\n', (7691, 7747), True, 'import torch.nn as nn\n'), ((7776, 7794), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(32)'], {}), '(32)\n', (7790, 7794), True, 'import torch.nn as nn\n'), ((7817, 7881), 'torch.nn.Conv2d', 'nn.Conv2d', (['(32)', '(3)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(32, 3, kernel_size=3, stride=1, padding=1, bias=False)\n', (7826, 7881), True, 'import torch.nn as nn\n'), ((7904, 7913), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7911, 7913), True, 'import torch.nn as nn\n'), ((8596, 8663), 'torch.nn.Conv2d', 'nn.Conv2d', (['(640)', '(512)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(640, 512, kernel_size=3, stride=1, padding=1, bias=False)\n', (8605, 8663), True, 'import torch.nn as nn\n'), ((8691, 8710), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (8705, 8710), True, 'import torch.nn as nn\n'), ((8731, 8744), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (8738, 8744), True, 'import torch.nn as nn\n'), ((8765, 8774), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (8772, 8774), True, 'import torch.nn as nn\n'), ((9056, 9079), 'torch.cat', 'torch.cat', (['(x, text)', '(1)'], {}), '((x, text), 1)\n', (9065, 9079), False, 'import torch\n'), ((9411, 9475), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(3, 64, kernel_size=4, stride=2, padding=1, bias=False)\n', (9420, 9475), True, 'import torch.nn as nn\n'), ((9514, 9580), 'torch.nn.Conv2d', 'nn.Conv2d', (['(64)', '(128)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(64, 128, kernel_size=4, stride=2, padding=1, bias=False)\n', (9523, 9580), True, 'import torch.nn as nn\n'), ((9615, 9634), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(128)'], {}), '(128)\n', (9629, 9634), True, 'import torch.nn as nn\n'), ((9657, 9724), 'torch.nn.Conv2d', 'nn.Conv2d', (['(128)', '(256)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(128, 256, kernel_size=4, stride=2, padding=1, bias=False)\n', (9666, 9724), True, 'import torch.nn as nn\n'), ((9759, 9778), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(256)'], {}), '(256)\n', (9773, 9778), True, 'import torch.nn as nn\n'), ((9801, 9868), 'torch.nn.Conv2d', 'nn.Conv2d', (['(256)', '(512)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(256, 512, kernel_size=4, stride=2, padding=1, bias=False)\n', (9810, 9868), True, 'import torch.nn as nn\n'), ((9903, 9922), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (9917, 9922), True, 'import torch.nn as nn\n'), ((9945, 10013), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(1024)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(512, 1024, kernel_size=4, stride=2, padding=1, bias=False)\n', (9954, 10013), True, 'import torch.nn as nn\n'), ((10048, 10068), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1024)'], {}), '(1024)\n', (10062, 10068), True, 'import torch.nn as nn\n'), ((10091, 10160), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(2048)'], {'kernel_size': '(4)', 'stride': '(2)', 'padding': '(1)', 'bias': '(False)'}), '(1024, 2048, kernel_size=4, stride=2, padding=1, bias=False)\n', (10100, 10160), True, 'import torch.nn as nn\n'), ((10195, 10215), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(2048)'], {}), '(2048)\n', (10209, 10215), True, 'import torch.nn as nn\n'), ((10238, 10307), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2048)', '(1024)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(2048, 1024, kernel_size=3, stride=1, padding=1, bias=False)\n', (10247, 10307), True, 'import torch.nn as nn\n'), ((10342, 10362), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1024)'], {}), '(1024)\n', (10356, 10362), True, 'import torch.nn as nn\n'), ((10385, 10453), 'torch.nn.Conv2d', 'nn.Conv2d', (['(1024)', '(512)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(1024, 512, kernel_size=3, stride=1, padding=1, bias=False)\n', (10394, 10453), True, 'import torch.nn as nn\n'), ((10488, 10507), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (10502, 10507), True, 'import torch.nn as nn\n'), ((10533, 10550), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (10545, 10550), True, 'import torch.nn as nn\n'), ((11224, 11243), 'torch.nn.Linear', 'nn.Linear', (['(768)', '(128)'], {}), '(768, 128)\n', (11233, 11243), True, 'import torch.nn as nn\n'), ((11304, 11371), 'torch.nn.Conv2d', 'nn.Conv2d', (['(640)', '(512)'], {'kernel_size': '(3)', 'stride': '(1)', 'padding': '(1)', 'bias': '(False)'}), '(640, 512, kernel_size=3, stride=1, padding=1, bias=False)\n', (11313, 11371), True, 'import torch.nn as nn\n'), ((11402, 11421), 'torch.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(512)'], {}), '(512)\n', (11416, 11421), True, 'import torch.nn as nn\n'), ((11447, 11464), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {}), '(0.2)\n', (11459, 11464), True, 'import torch.nn as nn\n'), ((11486, 11528), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(1)'], {'kernel_size': '(4)', 'stride': '(4)'}), '(512, 1, kernel_size=4, stride=4)\n', (11495, 11528), True, 'import torch.nn as nn\n'), ((11556, 11568), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (11566, 11568), True, 'import torch.nn as nn\n'), ((11847, 11870), 'torch.cat', 'torch.cat', (['(x, text)', '(1)'], {}), '((x, text), 1)\n', (11856, 11870), False, 'import torch\n'), ((1458, 1489), 'torch.nn.Linear', 'nn.Linear', (['(228)', '(4 * 4 * 128 * 8)'], {}), '(228, 4 * 4 * 128 * 8)\n', (1467, 1489), True, 'import torch.nn as nn\n'), ((1497, 1528), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(4 * 4 * 128 * 8)'], {}), '(4 * 4 * 128 * 8)\n', (1511, 1528), True, 'import torch.nn as nn\n'), ((1536, 1549), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (1543, 1549), True, 'import torch.nn as nn\n'), ((1127, 1157), 'torch.randn', 'torch.randn', (['(x.shape[0], 128)'], {}), '((x.shape[0], 128))\n', (1138, 1157), False, 'import torch\n'), ((2649, 2679), 'torch.randn', 'torch.randn', (['(x.shape[0], 100)'], {}), '((x.shape[0], 100))\n', (2660, 2679), False, 'import torch\n'), ((5592, 5622), 'torch.randn', 'torch.randn', (['(x.shape[0], 128)'], {}), '((x.shape[0], 128))\n', (5603, 5622), False, 'import torch\n')]
|
#!/share/apps/python/bin/python
import sys, os
import config as conf
import data as data
import module as module
type = conf.cps_type
assembly = sys.argv[1]
gtfFile = sys.argv[2]
chr = sys.argv[3]
outputdir = sys.argv[4]
#type = sys.argv[5]
if assembly == 'hg19':
tpseqAnno = data.hm_tpseqAll
tpseqIntr = conf.hm_tpseqInterval; tpseqThr = conf.hm_tpseqThreshold
elif assembly == 'mm9':
tpseq = data.ms_tpseqAll
tpseqIntr = conf.ms_tpseqInterval; tpseqThr = conf.ms_tpseqThreshold
#if type == 'all':
# inputdir = '/'.join(gtfFile.split('/')[:-1]) + '/pos/'
# tpseqAnno = os.listdir(inputdir)
# tpseqAnno = filter(lambda x: chr + '.cps.pos' in x, tpseqAnno)[0]
# tpseqAnno = inputdir + tpseqAnno
def flankingIntr(geneid, lastExon, trxToCheck, interval, sense):
trxToCheck = filter(lambda x: x.sense() == sense, trxToCheck)
if sense == '+':
trxToCheck = filter(lambda x: x.start() - lastExon.end() > 0 and (x.start() - lastExon.end()) + 100 < interval, trxToCheck) ##
trxToCheck = filter(lambda x: not len(x.exons()) == 1, trxToCheck)
# trxToCheck = filter(lambda x: not geneid == x.geneid(), trxToCheck) ##
if len(trxToCheck) > 0: interval = min(map(lambda x: (x.start() - lastExon.end()) + 100, trxToCheck)) ##
else: #elif sense == '-':
trxToCheck = filter(lambda x: lastExon.start() - x.end() > 0 and (lastExon.start() - x.end()) + 100 < interval, trxToCheck) ##
trxToCheck = filter(lambda x: not len(x.exons()) == 1, trxToCheck)
# trxToCheck = filter(lambda x: not geneid == x.geneid(), trxToCheck) ##
trxToCheck.reverse()
if len(trxToCheck) > 0: interval = min(map(lambda x: (lastExon.start() - x.end()) + 100, trxToCheck)) ##
return interval
def newTrx(trx, x):
chr = trx.chr(); sense = trx.sense(); exons = trx.exons()
nexons = []; nexon = ''
if sense == '+': #forward strand
start = trx.start(); end = x
if len(exons) == 1: #single-exon transcript
nexon = module.exon(chr, exons[-1].start(), x, sense)
nexons.append(nexon)
else: #multi-exon transcript
for i in xrange(1, len(exons)):
if exons[i].start() < x:
nexons.append(exons[i-1])
if i == len(exons) - 1:
nexon = module.exon(chr, exons[-1].start(), x, sense)
nexons.append(nexon)
else:
nexon = module.exon(chr, exons[i-1].start(), x, sense)
nexons.append(nexon)
break
else: #reverse strand
start = x; end = trx.end()
if len(exons) == 1: #single-exon transcript
nexon = module.exon(chr, x, exons[-1].end(), sense)
nexons.append(nexon)
else: #multi-exon transcript
for i in xrange(1, len(exons)):
if exons[-i-1].end() > x:
nexons.append(exons[-i])
if i == len(exons) - 1:
nexon = module.exon(chr, x, exons[0].end(), sense)
nexons.append(nexon)
else:
nexon = module.exon(chr, x, exons[-i].end(), sense)
nexons.append(nexon)
break
nexons.sort(module.cmp0)
# ntrxid = trx.trxid() + '_CPS.' + str(x)
ntrxid = trx.trxid().split('_CPS')[0] + '_CPS.' + str(x)
ntrx = module.transcript(chr, trx.geneid(), ntrxid, nexons[0].start(), nexons[-1].end(), nexons, trx.sense())
ntrx.setTpPos(x)
return ntrx
def filterOverTrxs(trxs):
ntrxs = []; trxNum = len(trxs)
for i in xrange(trxNum):
trx = trxs[i]; flag = True
# if trx.getTpPos() < 0:
if trx.getTpPos() < 0 or len(trx.exons()) == 1:
trxToCheck = trxs[max(0, i-20):i]
trxToCheck += trxs[i+1:min(trxNum, i+20)]
for trxT in trxToCheck:
if trxT.getTpPos() > 0 and module.overlappedTrxs(trx, trxT):
flag = False; break
del trxToCheck
if trx.getTpPos() < 0 and len(trx.exons()) == 1: flag = False
# if trx.trxid().find('RM') > 0: flag = True
if flag: ntrxs += [trx]
else:
if len(trx.exons()) == 1:
trxToCheck = trxs[max(0, i-20):i]
trxToCheck += trxs[i+1:min(trxNum, i+20)]
for trxT in trxToCheck:
if trxT.getTpPos() == trx.getTpPos() and trxT.exonNum() > 1:
flag = False; break
del trxToCheck
if flag: ntrxs += [trx]
else: ntrxs += [trx]
return ntrxs
def updating_cps(trxs, tpseqAnno, tpseqIntr, tpseqThr, type):
cSites = dict(); cSites['+'] = dict(); cSites['-'] = dict()
uSites = dict(); uSites['+'] = dict(); uSites['-'] = dict()
# if type == 'all':
# cSiteD = dict(); cSiteD[chr] = []
# tpseqAnno = open(tpseqAnno)
# for line in tpseqAnno:
# line = line.split('\t')
# if len(line) == 4: cSiteD[chr].append([int(line[1]), float(line[2]), line[3].strip()])
# else: cSiteD = module.readingAnno(tpseqAnno, 'polya')
cSiteD = module.readingAnno(tpseqAnno, 'polya')
for cSite in cSiteD[chr]: cSites[cSite[2]][cSite[0]] = ''
totalTrxNum = 0; correctedTrxNum = 0; ntotalTrxNum = 0
ntrxs = []; trxNum = len(trxs); trxs.sort(module.cmp0)
for i in xrange(trxNum):
trx = trxs[i]
exons = trx.exons(); sense = trx.sense()
interval = tpseqIntr #downstream interval
ntrx = []; trxToCheck = []; tpEnds = []; exonToCheck = exons
if sense == '+':
if len(exonToCheck) == 0: exonToCheck.append(module.exon(exons[-1].chr(), exons[-1].end()+1, exons[-1].end()+1, exons[-1].sense()))
firstExon = exonToCheck[0]; lastExon = exonToCheck[-1]
trxToCheck = filter(lambda x: x.sense() == sense, trxs[i+1:])
else: #elif sense == '-':
if len(exonToCheck) == 0: exonToCheck.append(module.exon(exons[0].chr(), exons[0].end()-1, exons[0].end()-1, exons[0].sense()))
firstExon = exonToCheck[-1]; lastExon = exonToCheck[0]
trxToCheck = filter(lambda x: x.sense() == sense, trxs[:i])
if len(trxToCheck) > 0: interval = flankingIntr(trx.geneid(), lastExon, trxToCheck, interval, sense)
tpEnds, utpEnds = module.getCPS(firstExon, lastExon, exonToCheck, cSiteD, interval, tpseqThr, type)
for tpEnd in tpEnds: uSites[lastExon.sense()][tpEnd] = ''
for utpEnd in utpEnds: uSites[lastExon.sense()][utpEnd] = ''
if len(tpEnds) > 0:
ntrx = map(lambda x: newTrx(trx, x), tpEnds)
tpEnds.sort()
j = -1
if sense == '+' and tpEnds[-1] < lastExon.start(): j = 1 ##
elif sense == '-' and tpEnds[0] > lastExon.end(): j = 1 ##
if j > 0:
ntrxid = trx.trxid() + '_RM'
ntrxs.append(module.transcript(trx.chr(), trx.geneid(), ntrxid, trx.start(), trx.end(), exons, sense))
if len(ntrx) > 0: ntrxs += ntrx; correctedTrxNum += 1
else: ntrxs += [trx]
# filter transcripts
ntrxs.sort(module.cmp0)
ntrxs = module.filterSameTrxs(ntrxs)
ntrxs = module.filterNoneTrxs(ntrxs)
ntrxs = module.checkProperTrxs(ntrxs)
# if type == 'all': ntrxs = filterOverTrxs(ntrxs)
ntrxs = filterOverTrxs(ntrxs)
totalTrxNum += len(trxs); ntotalTrxNum += len(ntrxs)
totalCpsNum = sum(map(lambda x: len(cSites[x].keys()), cSites.keys()))
assignCpsNum = sum(map(lambda x: len(uSites[x].keys()), uSites.keys()))
# printing output
# outputFile = open(outputdir + '/logs/transcripts_' + chr + '.tss.cps' + type + '.logs', 'w')
outputFile = open(outputdir + '/logs/transcripts_' + chr + '.cps.logs', 'w')
outputFile.write(chr + '\t' + str(totalTrxNum) + '\t' + str(correctedTrxNum) + '\t' + str(ntotalTrxNum) + '\t' + str(totalCpsNum) + '\t' + str(assignCpsNum))
outputFile.close()
newGtf = dict(); newGtf[chr] = ntrxs
# outputGtf = outputdir + '/transcripts_' + chr + '.tss.cps.' + type + '.gtf'
outputGtf = outputdir + '/transcripts_' + chr + '.tss.cps.gtf'
module.writeGtf(newGtf, outputGtf)
# if not type == 'all':
# nSiteD = dict(); nSiteD[chr] = []
# for cSite in cSiteD[chr]:
# if not cSite[0] in uSites[cSite[2]].keys(): nSiteD[chr].append(cSite)
# if not os.path.exists(outputdir + '/pos/'): os.makedirs(outputdir + '/pos/')
# outputFile2 = open(outputdir + '/pos/transcripts_' + chr + '.cps.pos', 'w')
# for nSite in nSiteD[chr]:
# outputFile2.write(chr + '\t' + str(nSite[0]) + '\t' + str(nSite[1]) + '\t' + str(nSite[2]) + '\n')
# outputFile2.close()
gtfFile = module.getGtf(gtfFile)
trxs = gtfFile[chr]
updating_cps(trxs, tpseqAnno, tpseqIntr, tpseqThr, type)
|
[
"module.overlappedTrxs",
"module.writeGtf",
"module.readingAnno",
"module.checkProperTrxs",
"module.getCPS",
"module.filterSameTrxs",
"module.getGtf",
"module.filterNoneTrxs"
] |
[((7738, 7760), 'module.getGtf', 'module.getGtf', (['gtfFile'], {}), '(gtfFile)\n', (7751, 7760), True, 'import module as module\n'), ((4467, 4505), 'module.readingAnno', 'module.readingAnno', (['tpseqAnno', '"""polya"""'], {}), "(tpseqAnno, 'polya')\n", (4485, 4505), True, 'import module as module\n'), ((6270, 6298), 'module.filterSameTrxs', 'module.filterSameTrxs', (['ntrxs'], {}), '(ntrxs)\n', (6291, 6298), True, 'import module as module\n'), ((6308, 6336), 'module.filterNoneTrxs', 'module.filterNoneTrxs', (['ntrxs'], {}), '(ntrxs)\n', (6329, 6336), True, 'import module as module\n'), ((6346, 6375), 'module.checkProperTrxs', 'module.checkProperTrxs', (['ntrxs'], {}), '(ntrxs)\n', (6368, 6375), True, 'import module as module\n'), ((7212, 7246), 'module.writeGtf', 'module.writeGtf', (['newGtf', 'outputGtf'], {}), '(newGtf, outputGtf)\n', (7227, 7246), True, 'import module as module\n'), ((5552, 5637), 'module.getCPS', 'module.getCPS', (['firstExon', 'lastExon', 'exonToCheck', 'cSiteD', 'interval', 'tpseqThr', 'type'], {}), '(firstExon, lastExon, exonToCheck, cSiteD, interval, tpseqThr,\n type)\n', (5565, 5637), True, 'import module as module\n'), ((3441, 3473), 'module.overlappedTrxs', 'module.overlappedTrxs', (['trx', 'trxT'], {}), '(trx, trxT)\n', (3462, 3473), True, 'import module as module\n')]
|
from rest_framework import serializers
from constants import help_text
from data import Organism
from interfaces.serializers.base import BaseSerializer
from interfaces.serializers.fields import SourceField, URLField
from interfaces.serializers.relationship import RelationshipSerializer, SourceRelationshipSerializer
class OrganismListSerializer(BaseSerializer):
model = Organism
# properties
name = serializers.CharField(required=True, max_length=200, help_text=help_text.organism_name)
ncbi_taxonomy = serializers.IntegerField(required=False, min_value=0, help_text=help_text.ncbi_taxonomy)
species = serializers.CharField(required=False, max_length=150, help_text=help_text.species)
strain = serializers.CharField(required=False, max_length=150, help_text=help_text.strain)
# write-only
refseq_accession = serializers.CharField(required=False, write_only=True, max_length=50,
help_text=help_text.refseq_accession)
refseq_ftp = serializers.CharField(required=False, write_only=True, max_length=250,
help_text=help_text.refseq_ftp)
genbank_accession = serializers.CharField(required=False, write_only=True, max_length=50,
help_text=help_text.genbank_accession)
genbank_ftp = serializers.CharField(required=False, write_only=True, max_length=250,
help_text=help_text.genbank_ftp)
ncbi_assembly = serializers.IntegerField(required=False, min_value=0, write_only=True,
help_text=help_text.ncbi_assembly)
assembly_accession = serializers.CharField(required=False, write_only=True, max_length=50,
help_text=help_text.assembly_accession)
# url
url = URLField(read_only=True,
view_name='organisms-detail',
lookup_field='protrend_id',
lookup_url_kwarg='protrend_id')
class OrganismDetailSerializer(OrganismListSerializer):
url = None
refseq_accession = serializers.CharField(required=False, max_length=50, help_text=help_text.refseq_accession)
refseq_ftp = serializers.CharField(required=False, max_length=250, help_text=help_text.refseq_ftp)
genbank_accession = serializers.CharField(required=False, max_length=50, help_text=help_text.genbank_accession)
genbank_ftp = serializers.CharField(required=False, max_length=250, help_text=help_text.genbank_ftp)
ncbi_assembly = serializers.IntegerField(required=False, min_value=0, help_text=help_text.ncbi_assembly)
assembly_accession = serializers.CharField(required=False, max_length=50, help_text=help_text.assembly_accession)
# relationships
data_source = SourceRelationshipSerializer(read_only=True,
child=SourceField(read_only=True))
regulator = RelationshipSerializer(read_only=True,
child=serializers.HyperlinkedRelatedField(
read_only=True,
view_name='regulators-detail',
lookup_field='protrend_id',
lookup_url_kwarg='protrend_id'))
gene = RelationshipSerializer(read_only=True,
child=serializers.HyperlinkedRelatedField(
read_only=True,
view_name='genes-detail',
lookup_field='protrend_id',
lookup_url_kwarg='protrend_id'))
tfbs = RelationshipSerializer(read_only=True,
child=serializers.HyperlinkedRelatedField(
read_only=True,
view_name='binding-sites-detail',
lookup_field='protrend_id',
lookup_url_kwarg='protrend_id'))
regulatory_interaction = RelationshipSerializer(read_only=True,
child=serializers.HyperlinkedRelatedField(
read_only=True,
view_name='interactions-detail',
lookup_field='protrend_id',
lookup_url_kwarg='protrend_id'))
|
[
"rest_framework.serializers.HyperlinkedRelatedField",
"interfaces.serializers.fields.URLField",
"rest_framework.serializers.IntegerField",
"rest_framework.serializers.CharField",
"interfaces.serializers.fields.SourceField"
] |
[((416, 508), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(True)', 'max_length': '(200)', 'help_text': 'help_text.organism_name'}), '(required=True, max_length=200, help_text=help_text.\n organism_name)\n', (437, 508), False, 'from rest_framework import serializers\n'), ((524, 617), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)', 'min_value': '(0)', 'help_text': 'help_text.ncbi_taxonomy'}), '(required=False, min_value=0, help_text=help_text.\n ncbi_taxonomy)\n', (548, 617), False, 'from rest_framework import serializers\n'), ((627, 714), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'max_length': '(150)', 'help_text': 'help_text.species'}), '(required=False, max_length=150, help_text=help_text.\n species)\n', (648, 714), False, 'from rest_framework import serializers\n'), ((723, 809), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'max_length': '(150)', 'help_text': 'help_text.strain'}), '(required=False, max_length=150, help_text=help_text.\n strain)\n', (744, 809), False, 'from rest_framework import serializers\n'), ((846, 957), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'write_only': '(True)', 'max_length': '(50)', 'help_text': 'help_text.refseq_accession'}), '(required=False, write_only=True, max_length=50,\n help_text=help_text.refseq_accession)\n', (867, 957), False, 'from rest_framework import serializers\n'), ((1016, 1122), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'write_only': '(True)', 'max_length': '(250)', 'help_text': 'help_text.refseq_ftp'}), '(required=False, write_only=True, max_length=250,\n help_text=help_text.refseq_ftp)\n', (1037, 1122), False, 'from rest_framework import serializers\n'), ((1182, 1294), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'write_only': '(True)', 'max_length': '(50)', 'help_text': 'help_text.genbank_accession'}), '(required=False, write_only=True, max_length=50,\n help_text=help_text.genbank_accession)\n', (1203, 1294), False, 'from rest_framework import serializers\n'), ((1355, 1462), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'write_only': '(True)', 'max_length': '(250)', 'help_text': 'help_text.genbank_ftp'}), '(required=False, write_only=True, max_length=250,\n help_text=help_text.genbank_ftp)\n', (1376, 1462), False, 'from rest_framework import serializers\n'), ((1519, 1628), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)', 'min_value': '(0)', 'write_only': '(True)', 'help_text': 'help_text.ncbi_assembly'}), '(required=False, min_value=0, write_only=True,\n help_text=help_text.ncbi_assembly)\n', (1543, 1628), False, 'from rest_framework import serializers\n'), ((1695, 1808), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'write_only': '(True)', 'max_length': '(50)', 'help_text': 'help_text.assembly_accession'}), '(required=False, write_only=True, max_length=50,\n help_text=help_text.assembly_accession)\n', (1716, 1808), False, 'from rest_framework import serializers\n'), ((1873, 1992), 'interfaces.serializers.fields.URLField', 'URLField', ([], {'read_only': '(True)', 'view_name': '"""organisms-detail"""', 'lookup_field': '"""protrend_id"""', 'lookup_url_kwarg': '"""protrend_id"""'}), "(read_only=True, view_name='organisms-detail', lookup_field=\n 'protrend_id', lookup_url_kwarg='protrend_id')\n", (1881, 1992), False, 'from interfaces.serializers.fields import SourceField, URLField\n'), ((2141, 2236), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'max_length': '(50)', 'help_text': 'help_text.refseq_accession'}), '(required=False, max_length=50, help_text=help_text.\n refseq_accession)\n', (2162, 2236), False, 'from rest_framework import serializers\n'), ((2249, 2339), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'max_length': '(250)', 'help_text': 'help_text.refseq_ftp'}), '(required=False, max_length=250, help_text=help_text.\n refseq_ftp)\n', (2270, 2339), False, 'from rest_framework import serializers\n'), ((2359, 2455), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'max_length': '(50)', 'help_text': 'help_text.genbank_accession'}), '(required=False, max_length=50, help_text=help_text.\n genbank_accession)\n', (2380, 2455), False, 'from rest_framework import serializers\n'), ((2469, 2560), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'max_length': '(250)', 'help_text': 'help_text.genbank_ftp'}), '(required=False, max_length=250, help_text=help_text.\n genbank_ftp)\n', (2490, 2560), False, 'from rest_framework import serializers\n'), ((2576, 2669), 'rest_framework.serializers.IntegerField', 'serializers.IntegerField', ([], {'required': '(False)', 'min_value': '(0)', 'help_text': 'help_text.ncbi_assembly'}), '(required=False, min_value=0, help_text=help_text.\n ncbi_assembly)\n', (2600, 2669), False, 'from rest_framework import serializers\n'), ((2690, 2787), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'required': '(False)', 'max_length': '(50)', 'help_text': 'help_text.assembly_accession'}), '(required=False, max_length=50, help_text=help_text.\n assembly_accession)\n', (2711, 2787), False, 'from rest_framework import serializers\n'), ((2920, 2947), 'interfaces.serializers.fields.SourceField', 'SourceField', ([], {'read_only': '(True)'}), '(read_only=True)\n', (2931, 2947), False, 'from interfaces.serializers.fields import SourceField, URLField\n'), ((3049, 3201), 'rest_framework.serializers.HyperlinkedRelatedField', 'serializers.HyperlinkedRelatedField', ([], {'read_only': '(True)', 'view_name': '"""regulators-detail"""', 'lookup_field': '"""protrend_id"""', 'lookup_url_kwarg': '"""protrend_id"""'}), "(read_only=True, view_name=\n 'regulators-detail', lookup_field='protrend_id', lookup_url_kwarg=\n 'protrend_id')\n", (3084, 3201), False, 'from rest_framework import serializers\n'), ((3456, 3598), 'rest_framework.serializers.HyperlinkedRelatedField', 'serializers.HyperlinkedRelatedField', ([], {'read_only': '(True)', 'view_name': '"""genes-detail"""', 'lookup_field': '"""protrend_id"""', 'lookup_url_kwarg': '"""protrend_id"""'}), "(read_only=True, view_name=\n 'genes-detail', lookup_field='protrend_id', lookup_url_kwarg='protrend_id')\n", (3491, 3598), False, 'from rest_framework import serializers\n'), ((3838, 3993), 'rest_framework.serializers.HyperlinkedRelatedField', 'serializers.HyperlinkedRelatedField', ([], {'read_only': '(True)', 'view_name': '"""binding-sites-detail"""', 'lookup_field': '"""protrend_id"""', 'lookup_url_kwarg': '"""protrend_id"""'}), "(read_only=True, view_name=\n 'binding-sites-detail', lookup_field='protrend_id', lookup_url_kwarg=\n 'protrend_id')\n", (3873, 3993), False, 'from rest_framework import serializers\n'), ((4264, 4418), 'rest_framework.serializers.HyperlinkedRelatedField', 'serializers.HyperlinkedRelatedField', ([], {'read_only': '(True)', 'view_name': '"""interactions-detail"""', 'lookup_field': '"""protrend_id"""', 'lookup_url_kwarg': '"""protrend_id"""'}), "(read_only=True, view_name=\n 'interactions-detail', lookup_field='protrend_id', lookup_url_kwarg=\n 'protrend_id')\n", (4299, 4418), False, 'from rest_framework import serializers\n')]
|
from click.testing import CliRunner
from pathlib import Path
from botrecon import botrecon
import warnings
import re
runner = CliRunner()
path = str(Path('tests', 'data', 'test.csv'))
regex = r'(?:[0-9]{1,3}\.){3}[0-9]{1,3}'
def test_batchify_percent():
with warnings.catch_warnings():
warnings.filterwarnings(
action='ignore',
category=DeprecationWarning,
module=r'.*patsy'
)
# For some reason this line causes a warning,
# but it doesn't happen in any of the other tests
result_batchified = runner.invoke(botrecon, ['-b', 10, '%', path])
assert result_batchified.exit_code == 0
ips_batchified = re.findall(regex, str(result_batchified.stdout_bytes))
result_normal = runner.invoke(botrecon, [path])
ips_normal = re.findall(regex, str(result_normal.stdout_bytes))
assert ips_normal == ips_batchified
def test_batchify_percent_uneven():
result_batchified = runner.invoke(botrecon, ['-b', 6, '%', path])
assert result_batchified.exit_code == 0
ips_batchified = re.findall(regex, str(result_batchified.stdout_bytes))
result_normal = runner.invoke(botrecon, [path])
ips_normal = re.findall(regex, str(result_normal.stdout_bytes))
assert ips_normal == ips_batchified
def test_batchify_percent_too_large():
result_batchified = runner.invoke(botrecon, ['-b', 101, '%', path])
assert result_batchified.exit_code == 2
def test_batchify_percent_negative():
result_batchified = runner.invoke(botrecon, ['-b', -2, '%', path])
assert result_batchified.exit_code == 2
def test_batchify_batches():
result_batchified = runner.invoke(botrecon, ['-b', 10, 'batches', path])
assert result_batchified.exit_code == 0
ips_batchified = re.findall(regex, str(result_batchified.stdout_bytes))
result_normal = runner.invoke(botrecon, [path])
ips_normal = re.findall(regex, str(result_normal.stdout_bytes))
assert ips_normal == ips_batchified
def test_batchify_batches_too_large():
# test data has 5k rows
result_batchified = runner.invoke(botrecon, ['-b', 10000, 'batches', path])
assert result_batchified.exit_code == 2
def test_batchify_batches_negative():
result_batchified = runner.invoke(botrecon, ['-b', -10, 'batches', path])
assert result_batchified.exit_code == 2
def test_batchify_batches_float():
result1 = runner.invoke(botrecon, ['-b', 10.5, 'batches', path])
assert result1.exit_code == 0
result2 = runner.invoke(botrecon, ['-b', 10, 'batches', path])
assert result2.exit_code == 0
ips1 = re.findall(regex, str(result1.stdout_bytes))
ips2 = re.findall(regex, str(result2.stdout_bytes))
assert ips1 == ips2
|
[
"click.testing.CliRunner",
"warnings.filterwarnings",
"warnings.catch_warnings",
"pathlib.Path"
] |
[((128, 139), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (137, 139), False, 'from click.testing import CliRunner\n'), ((151, 184), 'pathlib.Path', 'Path', (['"""tests"""', '"""data"""', '"""test.csv"""'], {}), "('tests', 'data', 'test.csv')\n", (155, 184), False, 'from pathlib import Path\n'), ((267, 292), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), '()\n', (290, 292), False, 'import warnings\n'), ((302, 393), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""', 'category': 'DeprecationWarning', 'module': '""".*patsy"""'}), "(action='ignore', category=DeprecationWarning,\n module='.*patsy')\n", (325, 393), False, 'import warnings\n')]
|
# -*- coding: utf-8 -*-
# Copyright © 2014-2017 <NAME>
#
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Render tag clouds."""
from nikola.plugin_categories import Task
from nikola import utils
from . import engine
import lxml.html
import natsort
import os
_LOGGER = utils.get_logger('render_static_tag_cloud', utils.STDERR_HANDLER)
_DEFAULT_CONFIG = {
# Tag cloud's name (used as CSS class). {0} will be replaced
# by the language.
'name': 'tc-{0}',
# Filename for the HTML fragment. {0} will be replaced by the
# language.
'filename': 'tagcloud-{0}.inc.html',
# The taxonomy type to obtain the classification ("tags")
# from.
'taxonomy_type': 'tag',
# Filename for the CSS. {0} will be replaced by the language.
'style_filename': 'assets/css/tagcloud-{0}.css',
# Maximum number of levels to be generated
'max_number_of_levels': 10,
# Maximum number of tags in cloud. Negative values mean
# that all tags will appear.
'max_tags': -1,
# Tags which appear less often than this number will be
# ignored.
'minimal_number_of_appearances': 1,
# Colors defining a gradient out of which the tag font colors
# are taken. The colors are specified as RGP triples with each
# component being a floating point number between 0.0 and 1.0.
'colors': ((0.4, 0.4, 0.4), (0.4, 0.4, 1.0), (1.0, 1.0, 1.0)),
# Colors defining a gradient out of which the tag background
# colors are taken.
'background_colors': ((0.133, 0.133, 0.133), ),
# Colors defining a gradient out of which the tag border colors
# are taken.
'border_colors': ((0.2, 0.2, 0.2), ),
# Interval (min_value, max_value) for the font size
'font_sizes': (6, 20),
# If positive, will be multiplied by font size to yield the
# CSS border radius and the vertical margin. (The horizontal
# margin is set to zero.)
'round_factor': 0.0,
}
class StaticTagCloud(Task):
"""Render tag clouds for various taxonomies."""
name = "render_static_tag_cloud"
def _render_tag_cloud_html(self, fn, tags, level_weights, config, lang, url_type):
"""Create tag cloud HTML fragment."""
assert fn.startswith(self.site.config["OUTPUT_FOLDER"])
# Create fragment
html = engine.create_tag_cloud_html(config['name'], tags, level_weights)
# Determine location (for link rewriting)
url_part = fn[len(self.site.config["OUTPUT_FOLDER"]) + 1:]
src = os.path.normpath(os.sep + url_part)
src = "/".join(src.split(os.sep))
# Rewrite links
parser = lxml.html.HTMLParser(remove_blank_text=True)
doc = lxml.html.fragment_fromstring(html, parser)
self.site.rewrite_links(doc, src, lang, url_type)
html = (doc.text or '').encode('utf-8') + b''.join([lxml.html.tostring(child, encoding='utf-8', method='html') for child in doc.iterchildren()])
# Write result to disk
with open(fn, "wb") as html_file:
html_file.write(html)
def _render_tag_cloud_css(self, css_fn, tags, level_weights, config):
"""Create tag cloud CSS."""
assert css_fn.startswith(self.site.config["OUTPUT_FOLDER"])
css = engine.create_tag_cloud_css(config['name'], level_weights,
colors=config['colors'],
background_colors=config['background_colors'],
border_colors=config['border_colors'],
font_sizes=config['font_sizes'],
round_factor=config['round_factor'])
with open(css_fn, "wb") as css_file:
css_file.write(css.encode('utf-8'))
def _prepare_tag_cloud(self, lang, config):
"""Create tag cloud task."""
# Collect information
fn = os.path.join(self.site.config['OUTPUT_FOLDER'], config['filename'])
css_fn = os.path.join(self.site.config['OUTPUT_FOLDER'], config['style_filename'])
taxonomy_type = config['taxonomy_type']
posts_per_tag = self.site.posts_per_classification[taxonomy_type][lang]
taxonomy = self.site.taxonomy_plugins[taxonomy_type]
# Compose list of tags, their post count and links
tag_count_url_list = []
for tag in natsort.humansorted(list(posts_per_tag.keys())):
tag_count_url_list.append((
taxonomy.get_classification_friendly_name(tag, lang),
len([post for post in posts_per_tag[tag] if self.site.config['SHOW_UNTRANSLATED_POSTS'] or post.is_translation_available(lang)]),
self.site.link(taxonomy_type, tag, lang)
))
# Get tag cloud data
tags, level_weights = engine.create_tag_cloud_data(tag_count_url_list, max_number_of_levels=config['max_number_of_levels'], max_tags=config['max_tags'], minimal_number_of_appearances=config['minimal_number_of_appearances'])
# Determine url type for rewriting. Must not be relative.
url_type = self.site.config['URL_TYPE']
if url_type == 'rel_path':
url_type = 'full_path'
# Create task for HTML fragment
task = {
'basename': self.name,
'name': fn,
'targets': [fn],
'actions': [(self._render_tag_cloud_html, [fn, tags, level_weights, config, lang, url_type])],
'clean': True,
'uptodate': [utils.config_changed({1: tags, 2: level_weights}, 'nikola.plugins.render_tag_cloud:tags'), utils.config_changed(config, 'nikola.plugins.render_tag_cloud:config')]
}
yield utils.apply_filters(task, self.site.config["FILTERS"])
# Create task for CSS
task = {
'basename': self.name,
'name': css_fn,
'targets': [css_fn],
'actions': [(self._render_tag_cloud_css, [css_fn, tags, level_weights, config])],
'clean': True,
'uptodate': [utils.config_changed({1: tags, 2: level_weights}, 'nikola.plugins.render_tag_cloud:tags'), utils.config_changed(config, 'nikola.plugins.render_tag_cloud:config')]
}
yield utils.apply_filters(task, self.site.config["FILTERS"])
def gen_tasks(self):
"""Generate tasks."""
self.site.scan_posts()
yield self.group_task()
# Create tag clouds
for name, config in self.site.config['RENDER_STATIC_TAG_CLOUDS'].items():
try:
# Generic complete config
generic_config = _DEFAULT_CONFIG.copy()
generic_config.update(config)
for lang in self.site.config['TRANSLATIONS'].keys():
# For a specific language, obtain the config by
# interpolation some strings with lang.
config = generic_config.copy()
config['name'] = config['name'].format(lang)
config['filename'] = config['filename'].format(lang)
config['style_filename'] = config['style_filename'].format(lang)
# Generate tasks
yield self._prepare_tag_cloud(lang, config)
except Exception as e:
_LOGGER.error("Error occured while creating tag cloud '{0}': {1}".format(name, e))
raise e
|
[
"nikola.utils.get_logger",
"os.path.normpath",
"os.path.join",
"nikola.utils.config_changed",
"nikola.utils.apply_filters"
] |
[((1290, 1355), 'nikola.utils.get_logger', 'utils.get_logger', (['"""render_static_tag_cloud"""', 'utils.STDERR_HANDLER'], {}), "('render_static_tag_cloud', utils.STDERR_HANDLER)\n", (1306, 1355), False, 'from nikola import utils\n'), ((3499, 3534), 'os.path.normpath', 'os.path.normpath', (['(os.sep + url_part)'], {}), '(os.sep + url_part)\n', (3515, 3534), False, 'import os\n'), ((4905, 4972), 'os.path.join', 'os.path.join', (["self.site.config['OUTPUT_FOLDER']", "config['filename']"], {}), "(self.site.config['OUTPUT_FOLDER'], config['filename'])\n", (4917, 4972), False, 'import os\n'), ((4990, 5063), 'os.path.join', 'os.path.join', (["self.site.config['OUTPUT_FOLDER']", "config['style_filename']"], {}), "(self.site.config['OUTPUT_FOLDER'], config['style_filename'])\n", (5002, 5063), False, 'import os\n'), ((6680, 6734), 'nikola.utils.apply_filters', 'utils.apply_filters', (['task', "self.site.config['FILTERS']"], {}), "(task, self.site.config['FILTERS'])\n", (6699, 6734), False, 'from nikola import utils\n'), ((7211, 7265), 'nikola.utils.apply_filters', 'utils.apply_filters', (['task', "self.site.config['FILTERS']"], {}), "(task, self.site.config['FILTERS'])\n", (7230, 7265), False, 'from nikola import utils\n'), ((6493, 6590), 'nikola.utils.config_changed', 'utils.config_changed', (['{(1): tags, (2): level_weights}', '"""nikola.plugins.render_tag_cloud:tags"""'], {}), "({(1): tags, (2): level_weights},\n 'nikola.plugins.render_tag_cloud:tags')\n", (6513, 6590), False, 'from nikola import utils\n'), ((6584, 6654), 'nikola.utils.config_changed', 'utils.config_changed', (['config', '"""nikola.plugins.render_tag_cloud:config"""'], {}), "(config, 'nikola.plugins.render_tag_cloud:config')\n", (6604, 6654), False, 'from nikola import utils\n'), ((7024, 7121), 'nikola.utils.config_changed', 'utils.config_changed', (['{(1): tags, (2): level_weights}', '"""nikola.plugins.render_tag_cloud:tags"""'], {}), "({(1): tags, (2): level_weights},\n 'nikola.plugins.render_tag_cloud:tags')\n", (7044, 7121), False, 'from nikola import utils\n'), ((7115, 7185), 'nikola.utils.config_changed', 'utils.config_changed', (['config', '"""nikola.plugins.render_tag_cloud:config"""'], {}), "(config, 'nikola.plugins.render_tag_cloud:config')\n", (7135, 7185), False, 'from nikola import utils\n')]
|
import pyeccodes.accessors as _
def load(h):
_.Template('grib1/mars_labeling.def').load(h)
h.add(_.Constant('GRIBEXSection1Problem', (80 - _.Get('section1Length'))))
h.add(_.Unsigned('number', 1))
h.alias('perturbationNumber', 'number')
h.add(_.Unsigned('ensembleSize', 1))
h.alias('totalNumber', 'ensembleSize')
h.add(_.Sprintf('quantile', "%s:%s", _.Get('number'), _.Get('ensembleSize')))
h.add(_.Unsigned('versionNumberOfExperimentalSuite', 1))
h.alias('powerOfTenUsedToScaleClimateWeight', 'versionNumberOfExperimentalSuite')
h.add(_.Unsigned('implementationDateOfModelCycle', 4))
h.alias('weightAppliedToClimateMonth1', 'implementationDateOfModelCycle')
h.add(_.Unsigned('numberOfReforecastYearsInModelClimate', 3))
h.alias('firstMonthUsedToBuildClimateMonth1', 'numberOfReforecastYearsInModelClimate')
h.add(_.Unsigned('numberOfDaysInClimateSamplingWindow', 3))
h.alias('lastMonthUsedToBuildClimateMonth1', 'numberOfDaysInClimateSamplingWindow')
h.add(_.Unsigned('sampleSizeOfModelClimate', 3))
h.alias('firstMonthUsedToBuildClimateMonth2', 'sampleSizeOfModelClimate')
h.add(_.Unsigned('versionOfModelClimate', 3))
h.alias('lastMonthUsedToBuildClimateMonth2', 'versionOfModelClimate')
h.add(_.Unsigned('efiOrder', 1))
h.add(_.Pad('padding_loc19_2', 11))
|
[
"pyeccodes.accessors.Unsigned",
"pyeccodes.accessors.Pad",
"pyeccodes.accessors.Template",
"pyeccodes.accessors.Get"
] |
[((187, 210), 'pyeccodes.accessors.Unsigned', '_.Unsigned', (['"""number"""', '(1)'], {}), "('number', 1)\n", (197, 210), True, 'import pyeccodes.accessors as _\n'), ((266, 295), 'pyeccodes.accessors.Unsigned', '_.Unsigned', (['"""ensembleSize"""', '(1)'], {}), "('ensembleSize', 1)\n", (276, 295), True, 'import pyeccodes.accessors as _\n'), ((432, 481), 'pyeccodes.accessors.Unsigned', '_.Unsigned', (['"""versionNumberOfExperimentalSuite"""', '(1)'], {}), "('versionNumberOfExperimentalSuite', 1)\n", (442, 481), True, 'import pyeccodes.accessors as _\n'), ((579, 626), 'pyeccodes.accessors.Unsigned', '_.Unsigned', (['"""implementationDateOfModelCycle"""', '(4)'], {}), "('implementationDateOfModelCycle', 4)\n", (589, 626), True, 'import pyeccodes.accessors as _\n'), ((716, 770), 'pyeccodes.accessors.Unsigned', '_.Unsigned', (['"""numberOfReforecastYearsInModelClimate"""', '(3)'], {}), "('numberOfReforecastYearsInModelClimate', 3)\n", (726, 770), True, 'import pyeccodes.accessors as _\n'), ((873, 925), 'pyeccodes.accessors.Unsigned', '_.Unsigned', (['"""numberOfDaysInClimateSamplingWindow"""', '(3)'], {}), "('numberOfDaysInClimateSamplingWindow', 3)\n", (883, 925), True, 'import pyeccodes.accessors as _\n'), ((1025, 1066), 'pyeccodes.accessors.Unsigned', '_.Unsigned', (['"""sampleSizeOfModelClimate"""', '(3)'], {}), "('sampleSizeOfModelClimate', 3)\n", (1035, 1066), True, 'import pyeccodes.accessors as _\n'), ((1156, 1194), 'pyeccodes.accessors.Unsigned', '_.Unsigned', (['"""versionOfModelClimate"""', '(3)'], {}), "('versionOfModelClimate', 3)\n", (1166, 1194), True, 'import pyeccodes.accessors as _\n'), ((1280, 1305), 'pyeccodes.accessors.Unsigned', '_.Unsigned', (['"""efiOrder"""', '(1)'], {}), "('efiOrder', 1)\n", (1290, 1305), True, 'import pyeccodes.accessors as _\n'), ((1317, 1345), 'pyeccodes.accessors.Pad', '_.Pad', (['"""padding_loc19_2"""', '(11)'], {}), "('padding_loc19_2', 11)\n", (1322, 1345), True, 'import pyeccodes.accessors as _\n'), ((52, 89), 'pyeccodes.accessors.Template', '_.Template', (['"""grib1/mars_labeling.def"""'], {}), "('grib1/mars_labeling.def')\n", (62, 89), True, 'import pyeccodes.accessors as _\n'), ((381, 396), 'pyeccodes.accessors.Get', '_.Get', (['"""number"""'], {}), "('number')\n", (386, 396), True, 'import pyeccodes.accessors as _\n'), ((398, 419), 'pyeccodes.accessors.Get', '_.Get', (['"""ensembleSize"""'], {}), "('ensembleSize')\n", (403, 419), True, 'import pyeccodes.accessors as _\n'), ((150, 173), 'pyeccodes.accessors.Get', '_.Get', (['"""section1Length"""'], {}), "('section1Length')\n", (155, 173), True, 'import pyeccodes.accessors as _\n')]
|
from railrl.launchers.launcher_util import run_experiment
import railrl.misc.hyperparameter as hyp
from railrl.launchers.experiments.murtaza.rfeatures_rl import state_td3bc_experiment
from railrl.launchers.arglauncher import run_variants
if __name__ == "__main__":
variant = dict(
env_id='SawyerPushNIPSEasy-v0',
algo_kwargs=dict(
batch_size=1024,
num_epochs=1000,
num_eval_steps_per_epoch=1000,
num_expl_steps_per_train_loop=1000,
num_trains_per_train_loop=1000,
min_num_steps_before_training=10000,
max_path_length=50,
),
td3_trainer_kwargs=dict(
discount=0.99,
),
td3_bc_trainer_kwargs=dict(
discount=0.99,
demo_path=None,
demo_off_policy_path=None,
bc_num_pretrain_steps=10000,
q_num_pretrain_steps=10000,
rl_weight=1.0,
bc_weight=0,
reward_scale=1.0,
target_update_period=2,
policy_update_period=2,
),
replay_buffer_kwargs=dict(
max_size=int(1e6),
fraction_goals_rollout_goals=0.2,
fraction_goals_env_goals=0.5,
),
qf_kwargs=dict(
hidden_sizes=[400, 300],
),
policy_kwargs=dict(
hidden_sizes=[400, 300],
),
save_video=False,
exploration_noise=.5,
td3_bc=True,
num_exps_per_instance=1,
region='us-west-2',
logger_variant=dict(
tensorboard=True,
),
)
search_space = {
'seedid': range(5),
}
sweeper = hyp.DeterministicHyperparameterSweeper(
search_space, default_parameters=variant,
)
variants = []
for variant in sweeper.iterate_hyperparameters():
variants.append(variant)
run_variants(state_td3bc_experiment, variants, run_id=0)
|
[
"railrl.launchers.arglauncher.run_variants",
"railrl.misc.hyperparameter.DeterministicHyperparameterSweeper"
] |
[((1684, 1769), 'railrl.misc.hyperparameter.DeterministicHyperparameterSweeper', 'hyp.DeterministicHyperparameterSweeper', (['search_space'], {'default_parameters': 'variant'}), '(search_space, default_parameters=variant\n )\n', (1722, 1769), True, 'import railrl.misc.hyperparameter as hyp\n'), ((1891, 1947), 'railrl.launchers.arglauncher.run_variants', 'run_variants', (['state_td3bc_experiment', 'variants'], {'run_id': '(0)'}), '(state_td3bc_experiment, variants, run_id=0)\n', (1903, 1947), False, 'from railrl.launchers.arglauncher import run_variants\n')]
|
from scipy.integrate import solve_ivp
import numpy as np
import matplotlib.pyplot as plt
# Milne-Simpson PC method
def milnePC(def_fn, xa, xb, ya, N):
f = def_fn # intakes function to method to approximate
h = (xb - xa) / N # creates step size based on input values of a, b, N
t = np.arange(xa, xb + h, h) # array initialized to hold mesh points t
y = np.zeros((N + 1,)) # array to hold Midpoint Method approximated y values
y[0] = ya # initial condition
# using RK4 to obtain the first 3 points
for i in range(0, N):
if i in range(0, 3):
k1 = h * f(t[i], y[i])
k2 = h * f(t[i] + (h / 2.0), y[i] + (k1 / 2.0))
k3 = h * f(t[i] + (h / 2.0), y[i] + (k2 / 2.0))
k4 = h * f(t[i] + h, y[i] + k3)
y[i + 1] = y[i] + (k1 + 2.0 * k2 + 2.0 * k3 + k4) / 6.0
else:
y[i + 1] = y[i-3] + (4*h/3)*(2*f(t[i], y[i]) - f(t[i-1], y[i-1])
+ 2*f(t[i-2], y[i-2]))
y[i + 1] = y[i-1] + (h/3)*(f(t[i+1], y[i+1]) + 4*f(t[i], y[i])
+ f(t[i-1], y[i-1]))
return t, y
# Adams Fourth Order PC
def adamsPC(def_fn, xa, xb, ya, h):
f = def_fn # intakes function to method to approximate
N = int((xb - xa) / h) # creates step size based on input values of a, b, N
t = np.arange(xa, xb + h, h) # array intialized to hold mesh points t
y = np.zeros((N + 1,)) # array to hold Midpoint Method approximated y values
y[0] = ya # initial condition
# using RK4 to obtain the first 3 points
for i in range(0, N):
if i in range(0, 3):
k1 = h * f(t[i], y[i])
k2 = h * f(t[i] + (h / 2.0), y[i] + (k1 / 2.0))
k3 = h * f(t[i] + (h / 2.0), y[i] + (k2 / 2.0))
k4 = h * f(t[i] + h, y[i] + k3)
y[i + 1] = y[i] + (k1 + 2.0 * k2 + 2.0 * k3 + k4) / 6.0
else:
y[i + 1] = y[i] + (h/24.0) * (55.0 * f(t[i], y[i]) - 59.0 * f(t[i - 1], y[i - 1])
+ 37.0 * f(t[i - 2], y[i - 2]) - 9.0 * f(t[i - 3], y[i - 3]))
y[i + 1] = y[i] + (h/24.0) * (9.0 * f(t[i + 1], y[i + 1])
+ 19.0 * f(t[i],y[i]) - 5.0 * f(t[i - 1], y[i - 1]) + f(t[i - 2], y[i - 2]))
return t, y
if __name__ == "__main__":
d_f = lambda x, y: (2 - 2*x*y)/(x**2 + 1)
f = lambda x: (2*x + 1)/(x**2 + 1)
x_1 = np.arange(0, 1.1, 0.1)
x_2 = np.arange(0, 1.05, 0.05)
x_milne_1, result_milne_1 = milnePC(d_f, 0, 1, 1, 10)
x_milne_2, result_milne_2 = milnePC(d_f, 0, 1, 1, 20)
x_adam_1, result_adam_1 = adamsPC(d_f, 0, 1, 1, 0.1)
x_adam_2, result_adam_2 = adamsPC(d_f, 0, 1, 1, 0.05)
y_exact_1 = f(x_1)
y_exact_2 = f(x_2)
print(result_adam_1)
err_milne_1 = np.abs(y_exact_1 - result_milne_1)
err_adam_1 = np.abs(y_exact_1 - result_adam_1)
err_milne_2 = np.abs(y_exact_2 - result_milne_2)
err_adam_2 = np.abs(y_exact_2 - result_adam_2)
print(err_adam_1)
print(err_adam_2)
for i in range(len(err_adam_1)):
print(err_adam_1[i] / err_adam_2[i*2])
print(err_milne_1)
print(err_milne_2)
for i in range(len(err_milne_1)):
print(err_milne_1[i] / err_milne_2[i*2])
plt.figure(1)
plt.plot(x_1, err_adam_1, label='ABM4')
plt.plot(x_1, err_milne_1, label='Milne-Simpson')
#plt.plot(x_2, err_adam_2, label='h=0.05')
plt.xlabel('t')
plt.ylabel('Absolute Error')
plt.title('Stability Comparison when h = 0.1')
plt.legend()
plt.figure(2)
plt.plot(x_1, err_milne_1, label='h=0.1')
plt.plot(x_2, err_milne_2, label='h=0.05')
plt.xlabel('t')
plt.ylabel('Absolute Error')
plt.title('Milne-Simpson Predictor-Corrector')
plt.legend()
plt.show()
|
[
"matplotlib.pyplot.title",
"numpy.abs",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.legend",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.arange",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((301, 325), 'numpy.arange', 'np.arange', (['xa', '(xb + h)', 'h'], {}), '(xa, xb + h, h)\n', (310, 325), True, 'import numpy as np\n'), ((378, 396), 'numpy.zeros', 'np.zeros', (['(N + 1,)'], {}), '((N + 1,))\n', (386, 396), True, 'import numpy as np\n'), ((1381, 1405), 'numpy.arange', 'np.arange', (['xa', '(xb + h)', 'h'], {}), '(xa, xb + h, h)\n', (1390, 1405), True, 'import numpy as np\n'), ((1457, 1475), 'numpy.zeros', 'np.zeros', (['(N + 1,)'], {}), '((N + 1,))\n', (1465, 1475), True, 'import numpy as np\n'), ((2468, 2490), 'numpy.arange', 'np.arange', (['(0)', '(1.1)', '(0.1)'], {}), '(0, 1.1, 0.1)\n', (2477, 2490), True, 'import numpy as np\n'), ((2501, 2525), 'numpy.arange', 'np.arange', (['(0)', '(1.05)', '(0.05)'], {}), '(0, 1.05, 0.05)\n', (2510, 2525), True, 'import numpy as np\n'), ((2851, 2885), 'numpy.abs', 'np.abs', (['(y_exact_1 - result_milne_1)'], {}), '(y_exact_1 - result_milne_1)\n', (2857, 2885), True, 'import numpy as np\n'), ((2903, 2936), 'numpy.abs', 'np.abs', (['(y_exact_1 - result_adam_1)'], {}), '(y_exact_1 - result_adam_1)\n', (2909, 2936), True, 'import numpy as np\n'), ((2955, 2989), 'numpy.abs', 'np.abs', (['(y_exact_2 - result_milne_2)'], {}), '(y_exact_2 - result_milne_2)\n', (2961, 2989), True, 'import numpy as np\n'), ((3007, 3040), 'numpy.abs', 'np.abs', (['(y_exact_2 - result_adam_2)'], {}), '(y_exact_2 - result_adam_2)\n', (3013, 3040), True, 'import numpy as np\n'), ((3312, 3325), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3322, 3325), True, 'import matplotlib.pyplot as plt\n'), ((3331, 3370), 'matplotlib.pyplot.plot', 'plt.plot', (['x_1', 'err_adam_1'], {'label': '"""ABM4"""'}), "(x_1, err_adam_1, label='ABM4')\n", (3339, 3370), True, 'import matplotlib.pyplot as plt\n'), ((3375, 3424), 'matplotlib.pyplot.plot', 'plt.plot', (['x_1', 'err_milne_1'], {'label': '"""Milne-Simpson"""'}), "(x_1, err_milne_1, label='Milne-Simpson')\n", (3383, 3424), True, 'import matplotlib.pyplot as plt\n'), ((3477, 3492), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (3487, 3492), True, 'import matplotlib.pyplot as plt\n'), ((3497, 3525), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Absolute Error"""'], {}), "('Absolute Error')\n", (3507, 3525), True, 'import matplotlib.pyplot as plt\n'), ((3530, 3576), 'matplotlib.pyplot.title', 'plt.title', (['"""Stability Comparison when h = 0.1"""'], {}), "('Stability Comparison when h = 0.1')\n", (3539, 3576), True, 'import matplotlib.pyplot as plt\n'), ((3581, 3593), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3591, 3593), True, 'import matplotlib.pyplot as plt\n'), ((3599, 3612), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (3609, 3612), True, 'import matplotlib.pyplot as plt\n'), ((3618, 3659), 'matplotlib.pyplot.plot', 'plt.plot', (['x_1', 'err_milne_1'], {'label': '"""h=0.1"""'}), "(x_1, err_milne_1, label='h=0.1')\n", (3626, 3659), True, 'import matplotlib.pyplot as plt\n'), ((3664, 3706), 'matplotlib.pyplot.plot', 'plt.plot', (['x_2', 'err_milne_2'], {'label': '"""h=0.05"""'}), "(x_2, err_milne_2, label='h=0.05')\n", (3672, 3706), True, 'import matplotlib.pyplot as plt\n'), ((3711, 3726), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""t"""'], {}), "('t')\n", (3721, 3726), True, 'import matplotlib.pyplot as plt\n'), ((3731, 3759), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Absolute Error"""'], {}), "('Absolute Error')\n", (3741, 3759), True, 'import matplotlib.pyplot as plt\n'), ((3764, 3810), 'matplotlib.pyplot.title', 'plt.title', (['"""Milne-Simpson Predictor-Corrector"""'], {}), "('Milne-Simpson Predictor-Corrector')\n", (3773, 3810), True, 'import matplotlib.pyplot as plt\n'), ((3815, 3827), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3825, 3827), True, 'import matplotlib.pyplot as plt\n'), ((3833, 3843), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3841, 3843), True, 'import matplotlib.pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 30 10:38:02 2021
@author: Oli
"""
#### Load
from model_interface.wham import WHAM
from Core_functionality.AFTs.agent_class import AFT
from Core_functionality.AFTs.arable_afts import Swidden, SOSH, MOSH, Intense_arable
from Core_functionality.AFTs.livestock_afts import Pastoralist, Ext_LF_r, Int_LF_r, Ext_LF_p, Int_LF_p
from Core_functionality.AFTs.forestry_afts import Agroforestry, Logger, Managed_forestry, Abandoned_forestry
from Core_functionality.AFTs.nonex_afts import Hunter_gatherer, Recreationalist, SLM, Conservationist
from Core_functionality.AFTs.land_system_class import land_system
from Core_functionality.AFTs.land_systems import Cropland, Pasture, Rangeland, Forestry, Urban, Unoccupied, Nonex
from Core_functionality.top_down_processes.arson import arson
from Core_functionality.top_down_processes.background_ignitions import background_rate
from Core_functionality.top_down_processes.fire_constraints import fuel_ct, dominant_afr_ct
from Core_functionality.Trees.Transfer_tree import define_tree_links, predict_from_tree, update_pars, predict_from_tree_fast
from Core_functionality.prediction_tools.regression_families import regression_link, regression_transformation
from Core_functionality.Trees.parallel_predict import make_boot_frame, make_boot_frame_AFT, parallel_predict, combine_bootstrap
from dask.distributed import Client
from copy import deepcopy
#################################################
### Instantiate
#################################################
all_afts = [Swidden, SOSH, MOSH, Intense_arable,
Pastoralist, Ext_LF_r, Int_LF_r, Ext_LF_p, Int_LF_p,
Agroforestry, Logger, Managed_forestry, Abandoned_forestry,
Hunter_gatherer, Recreationalist, SLM, Conservationist]
parameters = {
'xlen': 192,
'ylen': 144,
'AFTs': all_afts,
'LS' : [Cropland, Pasture, Rangeland, Forestry, Urban, Unoccupied, Nonex],
'Observers': {'background_rate': background_rate,
'arson': arson,
'fuel_constraint': fuel_ct,
'dominant_afr_constraint': dominant_afr_ct},
'AFT_pars': Core_pars,
'Maps' : Map_data,
'Fire_seasonality': Seasonality,
'Fire_types': {'cfp': 'Vegetation', 'crb': 'Arable', 'hg': 'Vegetation',
'pasture': 'Pasture', 'pyrome': 'Vegetation'},
'Constraint_pars': {'Soil_threshold': 0.1325,
'Dominant_afr_threshold': 0.5,
'Rangeland_stocking_contstraint': True,
'R_s_c_Positive' : False,
'HG_Market_constraint': 7800,
'Arson_threshold': 0.5},
'timestep': 0,
'end_run' : 1,
'reporters': ['Managed_fire', 'Background_ignitions', 'Arson'],
'theta' : 0.1,
'bootstrap': False,
'Seasonality': False
}
### instantiate
test = WHAM(parameters)
### setup
test.setup()
### go
test.go()
|
[
"model_interface.wham.WHAM"
] |
[((3028, 3044), 'model_interface.wham.WHAM', 'WHAM', (['parameters'], {}), '(parameters)\n', (3032, 3044), False, 'from model_interface.wham import WHAM\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Train or evaluate a single classifier with its given set of hyperparameters.
Created on Wed Sep 29 14:23:48 2021
@author: mkalcher, magmueller, shagemann
"""
import argparse
import pickle
from sklearn.dummy import DummyClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC, SVC
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import (
accuracy_score,
cohen_kappa_score,
balanced_accuracy_score,
classification_report,
)
from code.util import (
KNN_K,
MAX_ITER_LOGISTIC,
MAX_ITER_LINEAR_SVC,
ALPHA_SGD,
MAX_ITER_SGD,
)
def load_args():
# setting up CLI
parser = argparse.ArgumentParser(description="Classifier")
parser.add_argument("input_file", help="path to the input pickle file")
parser.add_argument(
"-s",
"--seed",
type=int,
help="seed for the random number generator",
default=None,
)
parser.add_argument(
"-e",
"--export_file",
help="export the trained classifier to the given location",
default=None,
)
parser.add_argument(
"-i",
"--import_file",
help="import a trained classifier from the given location",
default=None,
)
parser.add_argument(
"-m", "--majority", action="store_true", help="majority class classifier"
)
parser.add_argument(
"-f", "--frequency", action="store_true", help="label frequency classifier"
)
parser.add_argument("-v", "--svm", action="store_true", help="SVM classifier")
parser.add_argument("--SGDClassifier", action="store_true", help="SGD classifier")
parser.add_argument(
"--LogisticRegression", action="store_true", help="LogisticRegression"
)
parser.add_argument("--LinearSVC", action="store_true", help="LinearSVC")
parser.add_argument("--MultinomialNB", action="store_true", help="MultinomialNB")
parser.add_argument(
"--knn",
action="store_true",
help="k nearest neighbor classifier with the specified value of k (in util.py",
)
parser.add_argument(
"-a", "--accuracy", action="store_true", help="evaluate using accuracy"
)
parser.add_argument(
"-k", "--kappa", action="store_true", help="evaluate using Cohen's kappa"
)
parser.add_argument(
"--balanced_accuracy",
action="store_true",
help="evaluate using balanced_accuracy",
)
parser.add_argument(
"--classification_report",
action="store_true",
help="evaluate using classification_report",
)
parser.add_argument(
"--verbose", action="store_true", help="print information during training",
)
parser.add_argument(
"--small", type=int, help="not use all data but just subset", default=None
)
parser.add_argument(
"--balanced_data_set",
action="store_true",
help="arg for classifier, use balanced data",
)
args = parser.parse_args()
return args
def load_dataset(args):
"""load a pickle file and reduce samples"""
# load data
with open(args.input_file, "rb") as f_in:
data = pickle.load(f_in)
# use less data to safe time for testing
if args.small is not None:
# if limit is given
max_length = len(data["features"])
limit = min(args.small, max_length)
# go through data and limit it
for key, value in data.items():
data[key] = value[:limit]
return data
def create_classifier(args, data):
"""Load or create a classifier with given args and sklearn methods."""
# use balanced data in classifier
balanced = "balanced" if args.balanced_data_set else None
verbose = True if args.verbose else False
if args.import_file is not None:
# import a pre-trained classifier
with open(args.import_file, "rb") as f_in:
classifier = pickle.load(f_in)
else: # manually set up a classifier
if args.majority:
# majority vote classifier
classifier = DummyClassifier(
strategy="most_frequent", random_state=args.seed
)
elif args.frequency:
# label frequency classifier
classifier = DummyClassifier(strategy="stratified", random_state=args.seed)
elif args.svm:
classifier = make_pipeline(
StandardScaler(), SVC(probability=True, verbose=verbose)
)
elif args.knn:
print(" {0} nearest neighbor classifier".format(KNN_K))
standardizer = StandardScaler()
knn_classifier = KNeighborsClassifier(KNN_K, n_jobs=-1)
classifier = make_pipeline(standardizer, knn_classifier)
elif args.SGDClassifier:
# standardizer = StandardScaler()
classifier = SGDClassifier(
class_weight=balanced,
random_state=args.seed,
n_jobs=-1,
verbose=verbose,
alpha=ALPHA_SGD,
max_iter=MAX_ITER_SGD,
)
elif args.MultinomialNB:
classifier = MultinomialNB()
elif args.LogisticRegression:
standardizer = StandardScaler()
classifier = LogisticRegression(
class_weight=balanced,
n_jobs=-1,
random_state=args.seed,
verbose=verbose,
max_iter=MAX_ITER_LOGISTIC,
)
elif args.LinearSVC:
classifier = LinearSVC(
class_weight=balanced,
random_state=args.seed,
verbose=verbose,
max_iter=MAX_ITER_LINEAR_SVC,
)
try:
classifier.fit(data["features"], data["labels"].ravel())
except:
raise UnboundLocalError("Import an classifier or choose one.")
return classifier
def evaluate_classifier(args, data, prediction):
print('\n')
# collect all evaluation metrics
evaluation_metrics = []
if args.accuracy:
evaluation_metrics.append(("accuracy", accuracy_score))
if args.kappa:
evaluation_metrics.append(("Cohen's kappa", cohen_kappa_score))
if args.balanced_accuracy:
evaluation_metrics.append(("balanced accuracy", balanced_accuracy_score))
# compute and print them
print('\n')
for metric_name, metric in evaluation_metrics:
print(" {0}: {1}".format(metric_name, metric(data["labels"], prediction)))
if args.classification_report:
categories = ["Flop", "Viral"]
print(
classification_report(data["labels"], prediction, target_names=categories)
)
def export_classifier(args, classifier):
# export the trained classifier if the user wants us to do so
if args.export_file is not None:
# pdb.set_trace()
with open(args.export_file, "wb") as f_out:
pickle.dump(classifier, f_out)
if __name__ == "__main__":
args = load_args()
data = load_dataset(args)
classifier = create_classifier(args, data)
# now classify the given data
prediction = classifier.predict(data["features"])
evaluate_classifier(args, data, prediction)
export_classifier(args, classifier)
|
[
"sklearn.pipeline.make_pipeline",
"sklearn.dummy.DummyClassifier",
"pickle.dump",
"sklearn.preprocessing.StandardScaler",
"argparse.ArgumentParser",
"sklearn.linear_model.SGDClassifier",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.metrics.classification_report",
"sklearn.neighbors.KNeighborsClassifier",
"pickle.load",
"sklearn.linear_model.LogisticRegression",
"sklearn.svm.SVC",
"sklearn.svm.LinearSVC"
] |
[((916, 965), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Classifier"""'}), "(description='Classifier')\n", (939, 965), False, 'import argparse\n'), ((3442, 3459), 'pickle.load', 'pickle.load', (['f_in'], {}), '(f_in)\n', (3453, 3459), False, 'import pickle\n'), ((4199, 4216), 'pickle.load', 'pickle.load', (['f_in'], {}), '(f_in)\n', (4210, 4216), False, 'import pickle\n'), ((4351, 4416), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""most_frequent"""', 'random_state': 'args.seed'}), "(strategy='most_frequent', random_state=args.seed)\n", (4366, 4416), False, 'from sklearn.dummy import DummyClassifier\n'), ((6912, 6986), 'sklearn.metrics.classification_report', 'classification_report', (["data['labels']", 'prediction'], {'target_names': 'categories'}), "(data['labels'], prediction, target_names=categories)\n", (6933, 6986), False, 'from sklearn.metrics import accuracy_score, cohen_kappa_score, balanced_accuracy_score, classification_report\n'), ((7233, 7263), 'pickle.dump', 'pickle.dump', (['classifier', 'f_out'], {}), '(classifier, f_out)\n', (7244, 7263), False, 'import pickle\n'), ((4542, 4604), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""stratified"""', 'random_state': 'args.seed'}), "(strategy='stratified', random_state=args.seed)\n", (4557, 4604), False, 'from sklearn.dummy import DummyClassifier\n'), ((4684, 4700), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4698, 4700), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4702, 4740), 'sklearn.svm.SVC', 'SVC', ([], {'probability': '(True)', 'verbose': 'verbose'}), '(probability=True, verbose=verbose)\n', (4705, 4740), False, 'from sklearn.svm import LinearSVC, SVC\n'), ((4876, 4892), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4890, 4892), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4922, 4960), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', (['KNN_K'], {'n_jobs': '(-1)'}), '(KNN_K, n_jobs=-1)\n', (4942, 4960), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((4986, 5029), 'sklearn.pipeline.make_pipeline', 'make_pipeline', (['standardizer', 'knn_classifier'], {}), '(standardizer, knn_classifier)\n', (4999, 5029), False, 'from sklearn.pipeline import make_pipeline\n'), ((5134, 5266), 'sklearn.linear_model.SGDClassifier', 'SGDClassifier', ([], {'class_weight': 'balanced', 'random_state': 'args.seed', 'n_jobs': '(-1)', 'verbose': 'verbose', 'alpha': 'ALPHA_SGD', 'max_iter': 'MAX_ITER_SGD'}), '(class_weight=balanced, random_state=args.seed, n_jobs=-1,\n verbose=verbose, alpha=ALPHA_SGD, max_iter=MAX_ITER_SGD)\n', (5147, 5266), False, 'from sklearn.linear_model import SGDClassifier\n'), ((5432, 5447), 'sklearn.naive_bayes.MultinomialNB', 'MultinomialNB', ([], {}), '()\n', (5445, 5447), False, 'from sklearn.naive_bayes import MultinomialNB\n'), ((5513, 5529), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5527, 5529), False, 'from sklearn.preprocessing import StandardScaler\n'), ((5555, 5680), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'class_weight': 'balanced', 'n_jobs': '(-1)', 'random_state': 'args.seed', 'verbose': 'verbose', 'max_iter': 'MAX_ITER_LOGISTIC'}), '(class_weight=balanced, n_jobs=-1, random_state=args.seed,\n verbose=verbose, max_iter=MAX_ITER_LOGISTIC)\n', (5573, 5680), False, 'from sklearn.linear_model import LogisticRegression\n'), ((5826, 5933), 'sklearn.svm.LinearSVC', 'LinearSVC', ([], {'class_weight': 'balanced', 'random_state': 'args.seed', 'verbose': 'verbose', 'max_iter': 'MAX_ITER_LINEAR_SVC'}), '(class_weight=balanced, random_state=args.seed, verbose=verbose,\n max_iter=MAX_ITER_LINEAR_SVC)\n', (5835, 5933), False, 'from sklearn.svm import LinearSVC, SVC\n')]
|
import os
from dotenv import load_dotenv
basedir = os.path.abspath(os.path.dirname(__file__))
load_dotenv(os.path.join(basedir, '.env.flask'))
def env_to_bool(value, default=False):
if value is None:
return default
val = value.lower()
if val in ['false', 'f', 'no', 'n', '1']:
return False
elif val in ['true', 't', 'yes', 'y', '0']:
return True
return default
def env_to_int(value, default):
try:
return int(value)
except Error:
return default
return default
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
FRONTEND_ROOT = os.environ.get('FRONTEND_ROOT') or 'http://localhost:5005'
DB_USERNAME = os.environ.get('DB_USERNAME') or 'darkchess'
DB_PASSWORD = os.environ.get('DB_PASSWORD') or '<PASSWORD>chess'
# Currently both host and port are both rolled up into the `DB_HOST`
# variable.
DB_HOST = os.environ.get('DB_HOST') or 'localhost'
DB_NAME = os.environ.get('DB_NAME') or 'darkchess'
DB_SSL = env_to_bool(os.environ.get('DB_SSL'), False)
DB_SSL_CA_LOC = os.environ.get('DB_SSL_CA_LOC') or None
DB_SSL_CLIENT_CERT_LOC = os.environ.get('DB_SSL_CLIENT_CERT_LOC') or None
DB_SSL_CLIENT_KEY_LOC = os.environ.get('DB_SSL_CLIENT_KEY_LOC') or None
if DB_SSL:
DB_SSL_STRING = f'?ssl_ca={DB_SSL_CA_LOC}&ssl_key={DB_SSL_CLIENT_KEY_LOC}&ssl_cert={DB_SSL_CLIENT_CERT_LOC}'
else:
DB_SSL_STRING=''
# DB_SSL_REQUIRED = os.environ.get('DB_SSL_REQUIRED') or None
# DB_SSL_STRING = '?sslmode=require' if DB_SSL_REQUIRED else ''
### database ###
DATABASE_URIS = {
'MYSQL' : f'mysql+mysqldb://{DB_USERNAME}:{DB_PASSWORD}@{DB_HOST}/{DB_NAME}{DB_SSL_STRING}',
'POSTGRES': f'postgresql+psycopg2://{DB_USERNAME}:{DB_PASSWORD}@{DB_HOST}/{DB_NAME}{DB_SSL_STRING}',
'SQLITE' : 'sqlite:///' + os.path.join(basedir, 'app.db')
}
CHOSEN_DATABASE = os.environ.get('CHOSEN_DATABASE') or 'SQLITE'
SQLALCHEMY_TRACK_MODIFICATIONS = False
SQLALCHEMY_DATABASE_URI = DATABASE_URIS[CHOSEN_DATABASE]
MAIL_SERVER = os.environ.get('MAIL_SERVER')
MAIL_PORT = env_to_int(os.environ.get('MAIL_PORT'), 25)
MAIL_USE_TLS = env_to_bool(os.environ.get('MAIL_USE_TLS'), False)
MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
ERROR_REPORT_EMAIL = os.environ.get('ERROR_REPORT_EMAIL')
TOKEN_LIFESPAN_MINUTES = env_to_int(os.environ.get('TOKEN_LIFESPAN_MINUTES'), 120)
BETA_KEYS_REQUIRED = env_to_bool(os.environ.get('BETA_KEYS_REQUIRED'), False)
|
[
"os.environ.get",
"os.path.dirname",
"os.path.join"
] |
[((68, 93), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (83, 93), False, 'import os\n'), ((107, 142), 'os.path.join', 'os.path.join', (['basedir', '""".env.flask"""'], {}), "(basedir, '.env.flask')\n", (119, 142), False, 'import os\n'), ((511, 539), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (525, 539), False, 'import os\n'), ((1951, 1980), 'os.environ.get', 'os.environ.get', (['"""MAIL_SERVER"""'], {}), "('MAIL_SERVER')\n", (1965, 1980), False, 'import os\n'), ((2122, 2153), 'os.environ.get', 'os.environ.get', (['"""MAIL_USERNAME"""'], {}), "('MAIL_USERNAME')\n", (2136, 2153), False, 'import os\n'), ((2171, 2202), 'os.environ.get', 'os.environ.get', (['"""MAIL_PASSWORD"""'], {}), "('MAIL_PASSWORD')\n", (2185, 2202), False, 'import os\n'), ((2225, 2261), 'os.environ.get', 'os.environ.get', (['"""ERROR_REPORT_EMAIL"""'], {}), "('ERROR_REPORT_EMAIL')\n", (2239, 2261), False, 'import os\n'), ((558, 589), 'os.environ.get', 'os.environ.get', (['"""FRONTEND_ROOT"""'], {}), "('FRONTEND_ROOT')\n", (572, 589), False, 'import os\n'), ((633, 662), 'os.environ.get', 'os.environ.get', (['"""DB_USERNAME"""'], {}), "('DB_USERNAME')\n", (647, 662), False, 'import os\n'), ((693, 722), 'os.environ.get', 'os.environ.get', (['"""DB_PASSWORD"""'], {}), "('DB_PASSWORD')\n", (707, 722), False, 'import os\n'), ((839, 864), 'os.environ.get', 'os.environ.get', (['"""DB_HOST"""'], {}), "('DB_HOST')\n", (853, 864), False, 'import os\n'), ((891, 916), 'os.environ.get', 'os.environ.get', (['"""DB_NAME"""'], {}), "('DB_NAME')\n", (905, 916), False, 'import os\n'), ((955, 979), 'os.environ.get', 'os.environ.get', (['"""DB_SSL"""'], {}), "('DB_SSL')\n", (969, 979), False, 'import os\n'), ((1005, 1036), 'os.environ.get', 'os.environ.get', (['"""DB_SSL_CA_LOC"""'], {}), "('DB_SSL_CA_LOC')\n", (1019, 1036), False, 'import os\n'), ((1071, 1111), 'os.environ.get', 'os.environ.get', (['"""DB_SSL_CLIENT_CERT_LOC"""'], {}), "('DB_SSL_CLIENT_CERT_LOC')\n", (1085, 1111), False, 'import os\n'), ((1145, 1184), 'os.environ.get', 'os.environ.get', (['"""DB_SSL_CLIENT_KEY_LOC"""'], {}), "('DB_SSL_CLIENT_KEY_LOC')\n", (1159, 1184), False, 'import os\n'), ((1791, 1824), 'os.environ.get', 'os.environ.get', (['"""CHOSEN_DATABASE"""'], {}), "('CHOSEN_DATABASE')\n", (1805, 1824), False, 'import os\n'), ((2005, 2032), 'os.environ.get', 'os.environ.get', (['"""MAIL_PORT"""'], {}), "('MAIL_PORT')\n", (2019, 2032), False, 'import os\n'), ((2066, 2096), 'os.environ.get', 'os.environ.get', (['"""MAIL_USE_TLS"""'], {}), "('MAIL_USE_TLS')\n", (2080, 2096), False, 'import os\n'), ((2300, 2340), 'os.environ.get', 'os.environ.get', (['"""TOKEN_LIFESPAN_MINUTES"""'], {}), "('TOKEN_LIFESPAN_MINUTES')\n", (2314, 2340), False, 'import os\n'), ((2382, 2418), 'os.environ.get', 'os.environ.get', (['"""BETA_KEYS_REQUIRED"""'], {}), "('BETA_KEYS_REQUIRED')\n", (2396, 2418), False, 'import os\n'), ((1737, 1768), 'os.path.join', 'os.path.join', (['basedir', '"""app.db"""'], {}), "(basedir, 'app.db')\n", (1749, 1768), False, 'import os\n')]
|
import os
APP_ROOT = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../../../'))
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = APP_ROOT + '/media/upload'
STATIC_ROOT = APP_ROOT + '/resources'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/upload/'
STATIC_URL = '/static/'
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
PIPELINE = True
PIPELINE_VERSION = True
PIPELINE_VERSION_REMOVED_OLD = True
PIPELINE_CSS_COMPRESSOR = ()
PIPELINE_JS_COMPRESSOR = ()
#PIPELINE SETTINGS ARE IN <site_name>.settings.media file
|
[
"os.path.abspath"
] |
[((66, 91), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (81, 91), False, 'import os\n')]
|
import requests
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager, \
PermissionsMixin
def find_region():
ip_to_region_dict = {
"US": "US-East"
}
ip_data = requests.get("https://ipinfo.io/ip", verify=False)
ip = ip_data.text.split('\n')[0]
geo_ip_data = requests.get("https://json.geoiplookup.io/" + ip, verify=False)
geo_dict = geo_ip_data.json()
return ip_to_region_dict[geo_dict["country_code"]]
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **new_params):
"""Create new user, set and save the password."""
# pass email and any new params to new model 'user'.
user = self.model(email=email, **new_params)
user.set_password(password)
# Add support for multiple databases.
user.save(using=self.db)
return user
def create_superuser(self, email, password):
"""Create new superuser."""
user = self.create_user(email, password)
user.is_staff = True
user.is_superuser = True
user.save(using=self.db)
return user
def create_user_with_ip_to_aws_region(self, email, password, **new_params):
"""Create user with ip to region translation."""
user = self.create_user(email, password, **new_params)
user.is_staff = True
user.region = find_region()
user.save(using=self.db)
return user
class User(AbstractBaseUser, PermissionsMixin):
"""Custom user model to support email only records."""
# One-to-one relationship with emails and their users
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
region = models.CharField(max_length=63, default='NOREGION')
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=True)
is_superuser = models.BooleanField(default=True)
# instantiate UserManager object
objects = UserManager()
USERNAME_FIELD = "email"
|
[
"django.db.models.CharField",
"django.db.models.BooleanField",
"requests.get",
"django.db.models.EmailField"
] |
[((234, 284), 'requests.get', 'requests.get', (['"""https://ipinfo.io/ip"""'], {'verify': '(False)'}), "('https://ipinfo.io/ip', verify=False)\n", (246, 284), False, 'import requests\n'), ((340, 403), 'requests.get', 'requests.get', (["('https://json.geoiplookup.io/' + ip)"], {'verify': '(False)'}), "('https://json.geoiplookup.io/' + ip, verify=False)\n", (352, 403), False, 'import requests\n'), ((1654, 1700), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(255)', 'unique': '(True)'}), '(max_length=255, unique=True)\n', (1671, 1700), False, 'from django.db import models\n'), ((1712, 1744), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (1728, 1744), False, 'from django.db import models\n'), ((1758, 1809), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(63)', 'default': '"""NOREGION"""'}), "(max_length=63, default='NOREGION')\n", (1774, 1809), False, 'from django.db import models\n'), ((1826, 1859), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1845, 1859), False, 'from django.db import models\n'), ((1875, 1908), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1894, 1908), False, 'from django.db import models\n'), ((1928, 1961), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (1947, 1961), False, 'from django.db import models\n')]
|
from threading import Semaphore, Barrier
from time import sleep
class H2O:
def __init__(self):
self._h2o = Barrier(3)
self._atom_h = Semaphore(2)
self._atom_o = Semaphore(1)
pass
def hydrogen(self, releaseHydrogen: 'Callable[[], None]') -> None:
self._atom_h.acquire()
# use _h2o barrier to make we have 2 H and 1 O
self._h2o.wait()
# releaseHydrogen() outputs "H". Do not change or remove this line.
releaseHydrogen()
self._atom_h.release()
def oxygen(self, releaseOxygen: 'Callable[[], None]') -> None:
self._atom_o.acquire()
# use _h2o barrier to make we have 2 H and 1 O
self._h2o.wait()
# releaseOxygen() outputs "O". Do not change or remove this line.
releaseOxygen()
self._atom_o.release()
## Time Complexity : O ( 1 )
#
# The overhead is the semaphore and barrier with constant resource lock
## Space Complexity : O( 1 )
#
# The overhead is the variable for semaphore and barrier of constant size
|
[
"threading.Semaphore",
"threading.Barrier"
] |
[((136, 146), 'threading.Barrier', 'Barrier', (['(3)'], {}), '(3)\n', (143, 146), False, 'from threading import Semaphore, Barrier\n'), ((173, 185), 'threading.Semaphore', 'Semaphore', (['(2)'], {}), '(2)\n', (182, 185), False, 'from threading import Semaphore, Barrier\n'), ((212, 224), 'threading.Semaphore', 'Semaphore', (['(1)'], {}), '(1)\n', (221, 224), False, 'from threading import Semaphore, Barrier\n')]
|
import os
import subprocess
from bsm.util import safe_rmdir
from bsm.util import expand_path
from bsm.logger import get_logger
_logger = get_logger()
class GitError(Exception):
pass
class GitNotFoundError(GitError):
pass
class GitUnknownCommandError(GitError):
pass
class GitEmptyUrlError(GitError):
pass
COMMAND_MAP = {
'clone': [None, 'clone', '{url}', '{path}'],
'checkout': ['{path}', 'checkout', '{branch}'],
'ls-remote-branches': [None, 'ls-remote', '--refs', '--heads', '{url}'],
'ls-remote-tags': [None, 'ls-remote', '--refs', '--tags', '{url}'],
'ls-branches': ['{path}', 'for-each-ref', '--format=%(refname:short)', 'refs/heads'],
'ls-tags': ['{path}', 'for-each-ref', '--format=%(refname:short)', 'refs/tags'],
}
def _git_cmd(cwd, exe, *args):
full_cmd = [exe] + list(args)
_logger.debug('Run git command: {0}'.format(full_cmd))
try:
p = subprocess.Popen(full_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd)
out, err = p.communicate()
ret = p.returncode
except Exception as e:
raise GitError('Exception while running git command "{0}": {1}'.format(full_cmd, e))
if ret != 0:
raise GitError('Git command "{0}" failed with exit code {1}: {2}'.format(' '.join(full_cmd), ret, err))
return out.decode()
def _find_git(git_temp=None):
try:
_git_cmd(None, 'git', 'version')
_logger.debug('Use system git')
return 'git'
except Exception as e:
pass
if git_temp is not None:
try:
_git_cmd(None, expand_path(git_temp), 'version')
_logger.debug('Use temporary git from {0}'.format(git_temp))
return git_temp
except Exception as e:
pass
_logger.error('Git command not found')
raise GitNotFoundError('Can not find git command')
class Git(object):
def __init__(self, path=None, git_temp=None):
self.__path = path
self.__git_exec = _find_git(git_temp)
def __run_cmd(self, command, **kwargs):
if command not in COMMAND_MAP:
_logger.error('Do not known how to run: {0}'.format(command))
raise GitUnknownCommandError('Do not known how to run: {0}'.format(command))
params = kwargs.copy()
if self.__path is not None:
params['path'] = self.__path
cwd = COMMAND_MAP[command][0]
if cwd is not None:
cwd = cwd.format(**params)
git_args = [v.format(**params) for v in COMMAND_MAP[command][1:]]
return _git_cmd(cwd, self.__git_exec, *git_args)
def clone(self, url):
self.__run_cmd('clone', url=url)
def checkout(self, branch):
self.__run_cmd('checkout', branch=branch)
def clear_git_info(self):
if self.__path is not None:
safe_rmdir(os.path.join(self.__path, '.git'))
def __parse_ref_list(self, out):
return [i.strip() for i in out.splitlines()]
def ls_branches(self):
out = self.__run_cmd('ls-branches')
return self.__parse_ref_list(out)
def ls_tags(self):
out = self.__run_cmd('ls-tags')
return self.__parse_ref_list(out)
def __parse_remote_list(self, out):
refs = []
for line in out.splitlines():
name = line.strip().split()[1]
name_short = name.split('/')[2]
refs.append(name_short)
return refs
def __ls_remote(self, url, ls_type):
if not url:
raise GitEmptyUrlError('Git url not specified')
out = self.__run_cmd('ls-remote-'+ls_type, url=url)
return self.__parse_remote_list(out)
def ls_remote_branches(self, url):
return self.__ls_remote(url, 'branches')
def ls_remote_tags(self, url):
return self.__ls_remote(url, 'tags')
|
[
"subprocess.Popen",
"os.path.join",
"bsm.logger.get_logger",
"bsm.util.expand_path"
] |
[((139, 151), 'bsm.logger.get_logger', 'get_logger', ([], {}), '()\n', (149, 151), False, 'from bsm.logger import get_logger\n'), ((921, 1008), 'subprocess.Popen', 'subprocess.Popen', (['full_cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'cwd': 'cwd'}), '(full_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n cwd=cwd)\n', (937, 1008), False, 'import subprocess\n'), ((1594, 1615), 'bsm.util.expand_path', 'expand_path', (['git_temp'], {}), '(git_temp)\n', (1605, 1615), False, 'from bsm.util import expand_path\n'), ((2854, 2887), 'os.path.join', 'os.path.join', (['self.__path', '""".git"""'], {}), "(self.__path, '.git')\n", (2866, 2887), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
# pragma pylint: disable=unused-argument, no-self-use
# (c) Copyright IBM Corp. 2010, 2018. All Rights Reserved.
""" Resilient functions component to run an Umbrella investigate Query - Latest Malicious Domains for an IP against a
Cisco Umbrella server """
# Set up:
# Destination: a Queue named "umbrella_investigate".
# Manual Action: Execute a REST query against a Cisco Umbrella server.
import json
import logging
from datetime import datetime
from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError
from fn_cisco_umbrella_inv.util.resilient_inv import ResilientInv
from fn_cisco_umbrella_inv.util.helpers import validate_opts, validate_params, process_params, is_none
class FunctionComponent(ResilientComponent):
"""Component that implements Resilient function 'umbrella_ip_latest_malicious_domains' of
package fn_cisco_umbrella_inv.
The Function does a Cisco Umbrella Investigate query lookup takes the following parameters:
umbinv_ipaddr
An example of a set of query parameter might look like the following:
umbinv_ipaddr = "172.16.31.10"
The Investigate Query will executes a REST call against the Cisco Umbrella Investigate server and returns a result
in JSON format similar to the following.
{'ip_address': '192.168.3.11',
'query_execution_time': '2018-05-02 16:22:14',
'latest_malicious_domains': [u'textspeier.de']
}
"""
def __init__(self, opts):
"""constructor provides access to the configuration options"""
super(FunctionComponent, self).__init__(opts)
self.options = opts.get("fn_cisco_umbrella_inv", {})
validate_opts(self)
@handler("reload")
def _reload(self, event, opts):
"""Configuration options have changed, save new values"""
self.options = opts.get("fn_cisco_umbrella_inv", {})
@function("umbrella_ip_latest_malicious_domains")
def _umbrella_ip_latest_malicious_domains_function(self, event, *args, **kwargs):
"""Function: Resilient Function : Cisco Umbrella Investigate for Latest Malicious Domains for an IP."""
try:
# Get the function parameters:
umbinv_ipaddr = kwargs.get("umbinv_ipaddr") # text
log = logging.getLogger(__name__)
log.info("umbinv_ipaddr: %s", umbinv_ipaddr)
if is_none(umbinv_ipaddr):
raise ValueError("Required parameter 'umbinv_ipaddr' not set")
yield StatusMessage("Starting...")
ipaddr = None
process_result = {}
params = {"ipaddr": umbinv_ipaddr.strip()}
validate_params(params)
process_params(params, process_result)
if "_ipaddr" not in process_result:
raise ValueError("Parameter 'ipaddr' was not processed correctly")
else:
ipaddr = process_result.pop("_ipaddr")
api_token = self.options.get("api_token")
base_url = self.options.get("base_url")
rinv = ResilientInv(api_token, base_url)
yield StatusMessage("Running Cisco Investigate query...")
rtn = rinv.latest_domains(ipaddr)
query_execution_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if len(rtn) == 0:
log.debug(json.dumps(rtn))
yield StatusMessage("No Results returned for ip address '{}'.".format(ipaddr))
results = {}
else:
# Add "query_execution_time" and "ip_address" to result to facilitate post-processing.
results = {"latest_malicious_domains": json.loads(json.dumps(rtn)), "ip_address": ipaddr,
"query_execution_time": query_execution_time}
yield StatusMessage("Returning 'latest_malicious_domains' results for ip address '{}'.".format(ipaddr))
yield StatusMessage("Done...")
log.debug(json.dumps(results))
# Produce a FunctionResult with the results
yield FunctionResult(results)
except Exception:
log.exception("Exception in Resilient Function.")
yield FunctionError()
|
[
"resilient_circuits.function",
"resilient_circuits.handler",
"resilient_circuits.StatusMessage",
"json.dumps",
"resilient_circuits.FunctionError",
"fn_cisco_umbrella_inv.util.helpers.process_params",
"fn_cisco_umbrella_inv.util.helpers.is_none",
"fn_cisco_umbrella_inv.util.helpers.validate_params",
"resilient_circuits.FunctionResult",
"fn_cisco_umbrella_inv.util.resilient_inv.ResilientInv",
"datetime.datetime.now",
"fn_cisco_umbrella_inv.util.helpers.validate_opts",
"logging.getLogger"
] |
[((1758, 1775), 'resilient_circuits.handler', 'handler', (['"""reload"""'], {}), "('reload')\n", (1765, 1775), False, 'from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError\n'), ((1945, 1993), 'resilient_circuits.function', 'function', (['"""umbrella_ip_latest_malicious_domains"""'], {}), "('umbrella_ip_latest_malicious_domains')\n", (1953, 1993), False, 'from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError\n'), ((1732, 1751), 'fn_cisco_umbrella_inv.util.helpers.validate_opts', 'validate_opts', (['self'], {}), '(self)\n', (1745, 1751), False, 'from fn_cisco_umbrella_inv.util.helpers import validate_opts, validate_params, process_params, is_none\n'), ((2331, 2358), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2348, 2358), False, 'import logging\n'), ((2432, 2454), 'fn_cisco_umbrella_inv.util.helpers.is_none', 'is_none', (['umbinv_ipaddr'], {}), '(umbinv_ipaddr)\n', (2439, 2454), False, 'from fn_cisco_umbrella_inv.util.helpers import validate_opts, validate_params, process_params, is_none\n'), ((2709, 2732), 'fn_cisco_umbrella_inv.util.helpers.validate_params', 'validate_params', (['params'], {}), '(params)\n', (2724, 2732), False, 'from fn_cisco_umbrella_inv.util.helpers import validate_opts, validate_params, process_params, is_none\n'), ((2745, 2783), 'fn_cisco_umbrella_inv.util.helpers.process_params', 'process_params', (['params', 'process_result'], {}), '(params, process_result)\n', (2759, 2783), False, 'from fn_cisco_umbrella_inv.util.helpers import validate_opts, validate_params, process_params, is_none\n'), ((3117, 3150), 'fn_cisco_umbrella_inv.util.resilient_inv.ResilientInv', 'ResilientInv', (['api_token', 'base_url'], {}), '(api_token, base_url)\n', (3129, 3150), False, 'from fn_cisco_umbrella_inv.util.resilient_inv import ResilientInv\n'), ((2554, 2582), 'resilient_circuits.StatusMessage', 'StatusMessage', (['"""Starting..."""'], {}), "('Starting...')\n", (2567, 2582), False, 'from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError\n'), ((3170, 3221), 'resilient_circuits.StatusMessage', 'StatusMessage', (['"""Running Cisco Investigate query..."""'], {}), "('Running Cisco Investigate query...')\n", (3183, 3221), False, 'from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError\n'), ((3985, 4009), 'resilient_circuits.StatusMessage', 'StatusMessage', (['"""Done..."""'], {}), "('Done...')\n", (3998, 4009), False, 'from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError\n'), ((4033, 4052), 'json.dumps', 'json.dumps', (['results'], {}), '(results)\n', (4043, 4052), False, 'import json\n'), ((4128, 4151), 'resilient_circuits.FunctionResult', 'FunctionResult', (['results'], {}), '(results)\n', (4142, 4151), False, 'from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError\n'), ((3303, 3317), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (3315, 3317), False, 'from datetime import datetime\n'), ((3404, 3419), 'json.dumps', 'json.dumps', (['rtn'], {}), '(rtn)\n', (3414, 3419), False, 'import json\n'), ((4258, 4273), 'resilient_circuits.FunctionError', 'FunctionError', ([], {}), '()\n', (4271, 4273), False, 'from resilient_circuits import ResilientComponent, function, handler, StatusMessage, FunctionResult, FunctionError\n'), ((3733, 3748), 'json.dumps', 'json.dumps', (['rtn'], {}), '(rtn)\n', (3743, 3748), False, 'import json\n')]
|
import logging
import tempfile
import validators
from pytube import YouTube
# Global variables are reused across execution contexts (if available)
logging.basicConfig(
format='%(asctime)s %(name)-25s %(levelname)-8s %(message)s',
level=logging.INFO)
logging.getLogger('boto3').setLevel(logging.ERROR)
logging.getLogger('botocore').setLevel(logging.ERROR)
LOG = logging.getLogger()
class YoutubeToS3:
def __init__(self, url, bucket_name):
self.url = url
self.bucket_name = bucket_name
self.base_dir = tempfile.mkdtemp()
def run(self):
streams = YouTube(self.url).streams
streams.filter(only_audio=True)
pass
class ProcessingError(Exception):
def __init__(self, http_return_code, message):
self.http_return_code = http_return_code
self.message = message
def _get_url(event):
try:
url = event['body']['url']
except (KeyError, TypeError):
raise ProcessingError(400, '"url" is required in request body.')
if not validators.url(url):
raise ProcessingError(400, 'Invalid "url" in request body.')
return url
def _create_response(status_code, text):
return {
"statusCode": status_code,
"body": text
}
def _get_bucket_name():
return 'TODO'
def lambda_handler(event, context):
LOG.info('Entering handler')
try:
url = _get_url(event)
bucket_name = _get_bucket_name()
except ProcessingError as p:
LOG.warning('Exiting with status {} - {}'.format(p.http_return_code, p.message))
return _create_response(p.http_return_code, p.message)
YoutubeToS3(url=url, bucket_name=bucket_name).run()
# yt = YouTube(url)
# print(yt.streams.filter(only_audio=True).all())
return _create_response(200, 'ok')
|
[
"logging.basicConfig",
"pytube.YouTube",
"validators.url",
"tempfile.mkdtemp",
"logging.getLogger"
] |
[((150, 256), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(name)-25s %(levelname)-8s %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s %(name)-25s %(levelname)-8s %(message)s', level=logging.INFO)\n", (169, 256), False, 'import logging\n'), ((372, 391), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (389, 391), False, 'import logging\n'), ((261, 287), 'logging.getLogger', 'logging.getLogger', (['"""boto3"""'], {}), "('boto3')\n", (278, 287), False, 'import logging\n'), ((312, 341), 'logging.getLogger', 'logging.getLogger', (['"""botocore"""'], {}), "('botocore')\n", (329, 341), False, 'import logging\n'), ((541, 559), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (557, 559), False, 'import tempfile\n'), ((1029, 1048), 'validators.url', 'validators.url', (['url'], {}), '(url)\n', (1043, 1048), False, 'import validators\n'), ((598, 615), 'pytube.YouTube', 'YouTube', (['self.url'], {}), '(self.url)\n', (605, 615), False, 'from pytube import YouTube\n')]
|
import json
from django.contrib.auth import get_user_model
from channels import Group
from .faucets.models import CoinSpawn, Faucet, Session
from .serializers import CoinSpawnSerializer
def ws_connect(message):
message.reply_channel.send({"accept": True})
Group('cryptoquest').add(message.reply_channel)
spawns = CoinSpawn.objects.filter(state='spawned')
serializer = CoinSpawnSerializer(spawns, many=True)
message.reply_channel.send({
'text': json.dumps({
'type': 'spawn_list',
'data': {
'spawns': serializer.data
}
})
});
def ws_disconnect(message):
Group('cryptoquest').discard(message.reply_channel)
def ws_message(message):
print(message['text'])
event_info = json.loads(message['text'])
event_type = event_info['type']
event_data = event_info['data']
if event_type == 'shoot':
coin_spawn = CoinSpawn.objects.select_for_update().get(pk=event_data['spawn_id'])
if coin_spawn.state != 'spawned':
return
coin_spawn.health -= 1
if coin_spawn.health == 0:
coin_spawn.captured_by = get_user_model().objects.get(pk=event_data['user_id'])
coin_spawn.save()
elif event_type == 'spawn_add':
CoinSpawn.objects.create(
faucet=Faucet.objects.get(pk=7),
amount=1,
state='spawned',
)
elif event_type == 'session_start':
Session.objects.create(
user_id=event_data['user_id'],
location_id=1
)
elif event_type == 'shitcoin':
CoinSpawn.objects.filter(faucet__coin__name='Shitcoin').delete()
CoinSpawn.objects.create(
faucet_id=6,
amount=1,
type='boss',
state='spawned',
health=9
)
elif event_type == 'balance_update':
Group('cryptoquest').send({
'text': json.dumps({
'type': 'balance_updated',
'data': {}
})
})
|
[
"channels.Group",
"django.contrib.auth.get_user_model",
"json.loads",
"json.dumps"
] |
[((779, 806), 'json.loads', 'json.loads', (["message['text']"], {}), "(message['text'])\n", (789, 806), False, 'import json\n'), ((270, 290), 'channels.Group', 'Group', (['"""cryptoquest"""'], {}), "('cryptoquest')\n", (275, 290), False, 'from channels import Group\n'), ((478, 549), 'json.dumps', 'json.dumps', (["{'type': 'spawn_list', 'data': {'spawns': serializer.data}}"], {}), "({'type': 'spawn_list', 'data': {'spawns': serializer.data}})\n", (488, 549), False, 'import json\n'), ((656, 676), 'channels.Group', 'Group', (['"""cryptoquest"""'], {}), "('cryptoquest')\n", (661, 676), False, 'from channels import Group\n'), ((1163, 1179), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1177, 1179), False, 'from django.contrib.auth import get_user_model\n'), ((1894, 1914), 'channels.Group', 'Group', (['"""cryptoquest"""'], {}), "('cryptoquest')\n", (1899, 1914), False, 'from channels import Group\n'), ((1942, 1993), 'json.dumps', 'json.dumps', (["{'type': 'balance_updated', 'data': {}}"], {}), "({'type': 'balance_updated', 'data': {}})\n", (1952, 1993), False, 'import json\n')]
|
from pydantic import BaseModel
from fastapi import APIRouter
from fastapi.responses import JSONResponse
import pymongo
import jwt
from config import db, SECRET_KEY
router = APIRouter(prefix='/api/admin')
account_collection = db.get_collection('accounts')
coin_collection = db.get_collection('coins')
class Dashboard(BaseModel):
token: str
@router.post('/dashboard')
async def dashboard(dashboard: Dashboard):
try:
payload = jwt.decode(dashboard.token, SECRET_KEY, algorithms=['HS256'])
account = account_collection.find_one({'_id': payload['_id']})
if account:
if account['type'] == 'admin' or account['type'] == 'co-owner' or account['type'] == 'owner' or account['type'] == 'developer':
accounts = account_collection.find({})
accounts = account_collection.find({}).sort(key_or_list='updatedAt', direction=pymongo.DESCENDING)
accounts = [{'username': account_['username'], 'updatedAt': account_['updatedAt'].isoformat()} for account_ in accounts]
return JSONResponse({'message': 'successfully found dashboard data', 'success': True, 'recentUsers': accounts[:5]})
else:
return JSONResponse({'message': 'account not permitted to use this page, incident will be reported', 'success': False}, status_code=403)
else:
return JSONResponse({'message': 'cant find account', 'success': False}, status_code=404)
except jwt.exceptions.DecodeError:
return JSONResponse({'message': 'invalid token', 'success': False}, status_code=401)
except jwt.exceptions.ExpiredSignatureError:
return JSONResponse({'message': 'token expired', 'success': False}, status_code=401)
except Exception as e:
return JSONResponse(
{'message': 'unknown error', 'error': str(e), 'success': False}, status_code=500
)
|
[
"jwt.decode",
"config.db.get_collection",
"fastapi.responses.JSONResponse",
"fastapi.APIRouter"
] |
[((175, 205), 'fastapi.APIRouter', 'APIRouter', ([], {'prefix': '"""/api/admin"""'}), "(prefix='/api/admin')\n", (184, 205), False, 'from fastapi import APIRouter\n'), ((227, 256), 'config.db.get_collection', 'db.get_collection', (['"""accounts"""'], {}), "('accounts')\n", (244, 256), False, 'from config import db, SECRET_KEY\n'), ((275, 301), 'config.db.get_collection', 'db.get_collection', (['"""coins"""'], {}), "('coins')\n", (292, 301), False, 'from config import db, SECRET_KEY\n'), ((444, 505), 'jwt.decode', 'jwt.decode', (['dashboard.token', 'SECRET_KEY'], {'algorithms': "['HS256']"}), "(dashboard.token, SECRET_KEY, algorithms=['HS256'])\n", (454, 505), False, 'import jwt\n'), ((1393, 1478), 'fastapi.responses.JSONResponse', 'JSONResponse', (["{'message': 'cant find account', 'success': False}"], {'status_code': '(404)'}), "({'message': 'cant find account', 'success': False},\n status_code=404)\n", (1405, 1478), False, 'from fastapi.responses import JSONResponse\n'), ((1530, 1607), 'fastapi.responses.JSONResponse', 'JSONResponse', (["{'message': 'invalid token', 'success': False}"], {'status_code': '(401)'}), "({'message': 'invalid token', 'success': False}, status_code=401)\n", (1542, 1607), False, 'from fastapi.responses import JSONResponse\n'), ((1673, 1750), 'fastapi.responses.JSONResponse', 'JSONResponse', (["{'message': 'token expired', 'success': False}"], {'status_code': '(401)'}), "({'message': 'token expired', 'success': False}, status_code=401)\n", (1685, 1750), False, 'from fastapi.responses import JSONResponse\n'), ((1070, 1183), 'fastapi.responses.JSONResponse', 'JSONResponse', (["{'message': 'successfully found dashboard data', 'success': True,\n 'recentUsers': accounts[:5]}"], {}), "({'message': 'successfully found dashboard data', 'success': \n True, 'recentUsers': accounts[:5]})\n", (1082, 1183), False, 'from fastapi.responses import JSONResponse\n'), ((1221, 1358), 'fastapi.responses.JSONResponse', 'JSONResponse', (["{'message':\n 'account not permitted to use this page, incident will be reported',\n 'success': False}"], {'status_code': '(403)'}), "({'message':\n 'account not permitted to use this page, incident will be reported',\n 'success': False}, status_code=403)\n", (1233, 1358), False, 'from fastapi.responses import JSONResponse\n')]
|
import sys
import traceback
from functools import reduce
from datetime import datetime
import sqlparse
import pprint
from django.db import models
from django.db import connection
from django.db.utils import OperationalError, ProgrammingError
from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value
import logging
from psycopg2.errors import UndefinedColumn, UndefinedTable, UndefinedObject
logger = logging.getLogger("django")
dblogger = logging.getLogger("database")
pp = pprint.PrettyPrinter(indent=4)
CASCADE = models.CASCADE
SET_NULL = models.SET_NULL
foreign_key_id = "_id"
def get_field_name(model_manager,field):
if field.db_column == None:
if isinstance(field,models.ForeignKey):
return f'{field.name}{foreign_key_id}'
else:
return f'{field.name}'
else:
return f'{field.db_column}'
def get_view_field_name(model_manager,field):
if isinstance(field,models.ForeignKey):
return (
f'{field.name}{foreign_key_id}'
f'{model_manager.material_view_field_id}')
else:
return f'{field.name}{model_manager.material_view_field_id}'
class PreparedStatement():
def __init__(self,
model,
view_name=None,
get_args=None,
where_args=None,
order_by=None,
limit=None,
fields=None,
aliases=None,
ignore=None,
extra_view_fields=None,
active_bit = None,
start_time = None,
stop_time = None,
JSON=False,
*args, **kwargs
):
self.model = model
self.get_args = get_args
cursor = connection.cursor()
if view_name == None:
view_name = model.__name__
if order_by == None:
if hasattr(model._meta,'ordering'):
order_by = model._meta.ordering
self.view_name = view_name
self.prepare_sql = self.create_prepare_sql(
view_name,
model,
get_args,
where_args,
order_by,
limit,
fields,
aliases,
ignore,
extra_view_fields,
active_bit,
start_time,
stop_time,
JSON,
)
try:
cursor.execute(self.prepare_sql)
except:
logging.warning(f"Failed to read prepared SQL for {self}")
cursor.close()
def create_prepare_sql(
self,
view_name=None,
model=None,
get_args=None,
where_args=None,
order_by=None,
limit=None,
fields=None,
aliases=None,
ignore=None,
extra_view_fields=None,
active_bit = None,
start_time = None,
stop_time = None,
JSON=False,
):
finder = []
if get_args:
for arg in get_args:
finder.append(f'{get_args[arg]}')
view_args = '(' + ', '.join(finder) + ')'
else:
view_args = ''
self.view_sql, self.view_subquery_sql = self.create_view_statement(
model, get_args, where_args,
order_by, limit, fields, aliases, ignore,
active_bit,start_time,stop_time,
extra_view_fields,JSON
)
return (f'PREPARE\n {view_name}{view_args}\n'
f'AS\n{self.view_sql};')
def create_view_statement(
self,
model,
get_args=None,
where_args=None,
order_by=None,
limit=None,
fields=None,
aliases=None,
ignore=None,
active_bit = None,
start_time = None,
stop_time = None,
extra_view_fields=None,
JSON=None,
):
sql = ['SELECT\n']
if ignore == None:
ignore = []
if aliases == None:
aliases = {}
if fields == None:
selected_fields = model._meta.get_fields()
else:
selected_fields = [model._meta.get_field(field_name) for field_name in fields]
select=[]
for field in selected_fields:
field_name = get_field_name(self, field)
if hasattr(field,'source') or hasattr(field,'dirty_bit'):
if not hasattr(field,'dirty_bit') and field_name not in ignore:
try:
alias = aliases[field_name]
except:
alias = field_name
select.append(f' "u"."{field_name}" AS "{alias}"')
if hasattr(field,'active_bit') and active_bit==None:
active_bit = field_name
if hasattr(field,'start_time') and start_time==None:
start_time = field_name
if hasattr(field,'stop_time') and stop_time==None:
stop_time = field_name
if extra_view_fields:
for field in extra_view_fields:
select.append(
f' {extra_view_fields[field]} AS "{field}"')
sql.append(',\n'.join(select) + f'\nFROM (\n')
sql.append(
self._create_lazy_refresh_select(
model, get_args, where_args,
order_by, limit, aliases, ignore,
active_bit,start_time,stop_time
))
sql.append(f') "u"\n')
finder = None
if where_args:
finder = []
for case in where_args:
finder.append(f' "u"."{case[0]}" {case[1]} {case[2]}')
elif get_args or active_bit or start_time or stop_time:
finder = []
if get_args:
count = 1
for arg in get_args:
finder.append(f' "u"."{arg}" = ${count}')
count += 1
if isinstance(active_bit,str):
finder.append(f' "u"."{active_bit}" = TRUE')
if isinstance(start_time,str):
finder.append(f' "u"."{start_time}" <= now()')
if isinstance(stop_time,str):
finder.append(f' "u"."{stop_time}" > now()')
if finder:
sql.append('WHERE\n' + '\n AND'.join(finder) + '\n')
if order_by:
ordering_fields = []
for ordering_field_name in order_by:
if ordering_field_name[0] == '-':
field = model._meta.get_field(ordering_field_name[1:])
field_name = get_field_name(self,field)
ordering_fields.append(f'"u"."{field_name}" DESC')
else:
field = model._meta.get_field(ordering_field_name)
field_name = get_field_name(self,field)
ordering_fields.append(f'"u"."{field_name}" ASC')
sql.append('ORDER BY\n'+ ','.join(ordering_fields) + '\n')
if limit:
sql.append(f'LIMIT {limit}')
sql = ''.join(sql)
if JSON:
if limit == 1:
view_sql = (f'SELECT row_to_json(t)::TEXT\n'
f'FROM\n(\n{sql}\n) as t')
view_subquery_sql = (f'SELECT json_strip_nulls(row_to_json(t))\n'
f'FROM\n(\n{sql}\n) as t')
else:
view_sql = (f'SELECT json_agg(json_strip_nulls(row_to_json(t)))::TEXT\n'
f'FROM\n(\n{sql}\n) as t')
view_subquery_sql = (f'SELECT json_agg(json_strip_nulls(row_to_json(t)))\n'
f'FROM\n(\n{sql}\n) as t')
else:
view_sql = sql
view_subquery_sql = sql
return view_sql, view_subquery_sql
def _create_lazy_refresh_select(
self,
model,
get_args=None,
where_args=None,
order_by=None,
limit=None,
aliases=None,
ignore=None,
active_bit = None,
start_time = None,
stop_time = None,
):
table_name = model._meta.db_table
view_name = f'{model.__name__}_lazy_refresh_view'
sql = [' SELECT\n',]
fresh_fields = []
stale_fields = []
dirty_bit = None
for field in model._meta.get_fields():
field_name = get_field_name(self,field)
if hasattr(field,'source') or hasattr(field,'dirty_bit'):
if hasattr(field,'dirty_bit'):
dirty_bit = field_name
else:
fresh_fields.append(f' {field_name}')
stale_fields.append(f' t.{field_name}')
sql.append(',\n'.join(fresh_fields) + f'\n FROM\n {table_name}\n')
finder = []
if where_args == None:
if get_args:
count = 1
for arg in get_args:
finder.append(f' {arg} = ${count}')
count += 1
finder.append(f' {dirty_bit} = FALSE')
id_parameters = [f'"v"."{model.primary_foreign_key}"']
if hasattr(model,'unique_select_fields'):
for uniqueid_field in model.unique_select_fields:
i = model._meta.get_field(uniqueid_field)
i_name = get_field_name(self,i)
id_parameters.append(f'"v"."{i_name}"')
id_parameters_clause = ', '.join(id_parameters)
sql.append(' WHERE\n' + '\n AND'.join(finder) + '\n UNION ALL\n')
sql.append(' SELECT\n' + ',\n'.join(stale_fields) + '\n')
sql.append(f' FROM\n "{table_name}" "v"\n')
sql.append(f' CROSS JOIN\n {model.refresh_function_name}'
f'({id_parameters_clause}) "t"')
finder = []
if where_args == None:
if get_args:
count = 1
for arg in get_args:
finder.append(f' "v"."{arg}" = ${count}')
count += 1
finder.append(f' "v"."{dirty_bit}" = TRUE')
sql.append('\n WHERE \n' + '\n AND'.join(finder))
return ''.join(sql)
def disable(self,cursor):
try:
cursor.execute(f'DEALLOCATE {self.view_name};')
except OperationalError as e:
pass
def enable(self,cursor):
cursor.execute(self.prepare_sql)
def execute(
self,
cursor,
id=None,
):
if id:
cursor.execute(f'EXECUTE {self.view_name}(%s);', [id])
else:
cursor.execute(f'EXECUTE {self.view_name};')
def fetch(
self,
cursor
):
return cursor.fetchone()
class MaterializedViewManager(models.Manager):
material_view_field_id = "_mview"
def prepare(self,cursor):
if hasattr(self.model,'primary_foreign_key'):
primary_foreign_key = self.model.primary_foreign_key
else:
primary_foreign_key = None
self.get_view, self.get_view_name = self._create_get_view(
primary_foreign_key)
self.init_view, self.init_function_name, \
self.init_function_return_table = self._create_init_view()
self.refresh_view, self.model.refresh_function_name, \
self.refresh_function_return_table = self._create_refresh_view(
self.model.primary_foreign_key)
cursor.execute(f'DROP FUNCTION IF EXISTS {self.init_function_name};')
cursor.execute(
f'DROP FUNCTION IF EXISTS {self.init_function_name}(_id INT);')
cursor.execute(f'DROP VIEW IF EXISTS {self.get_view_name};')
try:
dblogger.info(f"Preparing get_view sql statements for model {self.model.__name__}")
cursor.execute(self.get_view)
dblogger.info(f"- Prepared!")
except UndefinedColumn as e:
type, value, tb = sys.exc_info()
dblogger.error(f"Failed get_view prepare statements for model {self.model.__name__} with {type.__name__}!")
dblogger.error(f'- Specifically, {value}')
dblogger.error("- Please review the most recent stack entries:\n" + "".join(traceback.format_list(traceback.extract_tb(tb, limit=5))))
logger.error(f'Caught Database error {value} while preparing get_view statements for model {self.model.__name__}')
logger.error(f'- Did you create tables? Ignoring and continuing')
except UndefinedTable as e:
type, value, tb = sys.exc_info()
dblogger.error(f"Failed get_view prepare statements for model {self.model.__name__} with {type.__name__}!")
dblogger.error(f'- Specifically, {value}')
dblogger.error("- Please review the most recent stack entries:\n" + "".join(traceback.format_list(traceback.extract_tb(tb, limit=5))))
logger.error(f'Caught Database error {value} while preparing get_view statements for model {self.model.__name__}')
logger.error(f'- Did you create tables? Ignoring and continuing')
except UndefinedObject as e:
type, value, tb = sys.exc_info()
dblogger.error(f"Failed get_view prepare statements for model {self.model.__name__} with {type.__name__}!")
dblogger.error(f'- Specifically, {value}')
dblogger.error("- Please review the most recent stack entries:\n" + "".join(traceback.format_list(traceback.extract_tb(tb, limit=5))))
logger.error(f'Caught Database error {value} while preparing get_view statements for model {self.model.__name__}')
logger.error(f'- Did you create tables? Ignoring and continuing')
except ProgrammingError as e:
type, value, tb = sys.exc_info()
dblogger.error(f"Failed get_view prepare statements for model {self.model.__name__} with {type.__name__}!")
dblogger.error(f'- Specifically, {value}')
dblogger.error("- Please review the most recent stack entries:\n" + "".join(traceback.format_list(traceback.extract_tb(tb, limit=5))))
logger.error(f'Caught Database error {value} while preparing get_view statements for model {self.model.__name__}')
logger.error(f'- Ignoring and continuing')
except OperationalError as e:
type, value, tb = sys.exc_info()
dblogger.error(f"Failed get_view prepare statements for model {self.model.__name__} with {type.__name__}!")
dblogger.error(f'- Specifically, {value}')
dblogger.error("- Please review the most recent stack entries:\n" + "".join(traceback.format_list(traceback.extract_tb(tb, limit=5))))
logger.error(f'Caught Database error {value} while preparing get_view statements for model {self.model.__name__}')
logger.error(f'- Ignoring and continuing')
try:
dblogger.info(f"Preparing init_view sql statements for model {self.model.__name__}")
cursor.execute(self.init_view)
dblogger.info(f"- Prepared!")
except UndefinedColumn as e:
type, value, tb = sys.exc_info()
dblogger.error(f"Failed init_view prepare statements for model {self.model.__name__} with {type.__name__}. Did you create tables?")
dblogger.error(f'- Specifically, {value}')
dblogger.error("- Please review the most recent stack entries:\n" + "".join(traceback.format_list(traceback.extract_tb(tb, limit=5))))
logger.error(f'Caught Database error {value} while preparing init_view statements for model {self.model.__name__}')
logger.error(f'- Did you create tables? Ignoring and continuing')
except UndefinedTable as e:
type, value, tb = sys.exc_info()
dblogger.error(f"Failed init_view prepare statements for model {self.model.__name__} with {type.__name__}. Did you create tables?")
dblogger.error(f'- Specifically, {value}')
dblogger.error("- Please review the most recent stack entries:\n" + "".join(traceback.format_list(traceback.extract_tb(tb, limit=5))))
logger.error(f'Caught Database error {value} while preparing init_view statements for model {self.model.__name__}')
logger.error(f'- Did you create tables? Ignoring and continuing')
except UndefinedObject as e:
type, value, tb = sys.exc_info()
dblogger.error(f"Failed init_view prepare statements for model {self.model.__name__} with {type.__name__}. Did you create tables?")
dblogger.error(f'- Specifically, {value}')
dblogger.error("- Please review the most recent stack entries:\n" + "".join(traceback.format_list(traceback.extract_tb(tb, limit=5))))
logger.error(f'Caught Database error {value} while preparing init_view statements for model {self.model.__name__}')
logger.error(f'- Did you create tables? Ignoring and continuing')
except ProgrammingError as e:
type, value, tb = sys.exc_info()
dblogger.error(f"Failed init_view prepare statements for model {self.model.__name__} with {type.__name__}!")
dblogger.error(f'- Specifically, {value}')
dblogger.error("- Please review the most recent stack entries:\n" + "".join(traceback.format_list(traceback.extract_tb(tb, limit=5))))
logger.error(f'Caught Database error {value} while preparing init_view statements for model {self.model.__name__}')
logger.error(f'- Ignoring and continuing')
except OperationalError as e:
type, value, tb = sys.exc_info()
dblogger.error(f"Failed init_view prepare statements for model {self.model.__name__} with {type.__name__}!")
dblogger.error(f'- Specifically, {value}')
dblogger.error("- Please review the most recent stack entries:\n" + "".join(traceback.format_list(traceback.extract_tb(tb, limit=5))))
logger.error(f'Caught Database error {value} while preparing init_view statements for model {self.model.__name__}')
logger.error(f'- Ignoring and continuing')
try:
dblogger.info(f"Preparing refresh_view sql statements for model {self.model.__name__}")
cursor.execute(self.refresh_view)
dblogger.info(f"- Prepared!")
except UndefinedColumn as e:
type, value, tb = sys.exc_info()
dblogger.error(f"Failed refresh_view prepare statements for model {self.model.__name__} with {type.__name__}. Did you create tables?")
dblogger.error(f'- Specifically, {value}')
dblogger.error("- Please review the most recent stack entries:\n" + "".join(traceback.format_list(traceback.extract_tb(tb, limit=5))))
logger.error(f'Caught Database error {value} while preparing refresh_view statements for model {self.model.__name__}')
logger.error(f'- Did you create tables? Ignoring and continuing')
except UndefinedTable as e:
type, value, tb = sys.exc_info()
dblogger.error(f"Failed refresh_view prepare statements for model {self.model.__name__} with {type.__name__}. Did you create tables?")
dblogger.error(f'- Specifically, {value}')
dblogger.error("- Please review the most recent stack entries:\n" + "".join(traceback.format_list(traceback.extract_tb(tb, limit=5))))
logger.error(f'Caught Database error {value} while preparing refresh_view statements for model {self.model.__name__}')
logger.error(f'- Did you create tables? Ignoring and continuing')
except UndefinedObject as e:
type, value, tb = sys.exc_info()
dblogger.error(f"Failed refresh_view prepare statements for model {self.model.__name__} with {type.__name__}. Did you create tables?")
dblogger.error(f'- Specifically, {value}')
dblogger.error("- Please review the most recent stack entries:\n" + "".join(traceback.format_list(traceback.extract_tb(tb, limit=5))))
logger.error(f'Caught Database error {value} while preparing refresh_view statements for model {self.model.__name__}')
logger.error(f'- Did you create tables? Ignoring and continuing')
except ProgrammingError as e:
type, value, tb = sys.exc_info()
dblogger.error(f"Failed refresh_view prepare statements for model {self.model.__name__} with {type.__name__}!")
dblogger.error(f'- Specifically, {value}')
dblogger.error("- Please review the most recent stack entries:\n" + "".join(traceback.format_list(traceback.extract_tb(tb, limit=5))))
logger.error(f'Caught Database error {value} while preparing refresh_view statements for model {self.model.__name__}')
logger.error(f'- Ignoring and continuing')
except OperationalError as e:
type, value, tb = sys.exc_info()
dblogger.error(f"Failed refresh_view prepare statements for model {self.model.__name__} with {type.__name__}!")
dblogger.error(f'- Specifically, {value}')
dblogger.error("- Please review the most recent stack entries:\n" + "".join(traceback.format_list(traceback.extract_tb(tb, limit=5))))
logger.error(f'Caught Database error {value} while preparing refresh_view statements for model {self.model.__name__}')
logger.error(f'- Ignoring and continuing')
def clear(self):
cursor = connection.cursor()
cursor.execute(f'DELETE FROM "{self.model._meta.db_table}";')
cursor.close()
def load(self):
cursor = connection.cursor()
cursor.execute(f'SELECT * FROM {self.init_function_name}();')
cursor.close()
def refresh(self):
cursor = connection.cursor()
cursor.execute(f'DELETE FROM "{self.model._meta.db_table}"')
cursor.execute(f'SELECT * FROM {self.init_function_name}();')
cursor.close()
def _create_get_view(self,source_model):
"""This creates the root materialized view query. The output here
goes directly into the fields of the materialized view tables. These
fields come from joins of other tables. Joins are the cause of
database query slowdown, as each join means traversing more tables and
indexes. Storing those fields into the materialized view table means
you don't have to do the joins when you read the table instead of doing
a query.
"""
get_view_name = f'get_{self.model.__name__}_view'
sql = (
f'CREATE OR REPLACE VIEW\n {get_view_name}\n'
f'AS\n{self._create_get_sql(source_model)};')
return sql, get_view_name
def get_view(self):
if hasattr(self.model,'primary_foreign_key'):
primary_foreign_key = self.model.primary_foreign_key
else:
primary_foreign_key = None
get_view_name = f'get_{self.model.__name__}_view'
sql = (
f'CREATE OR REPLACE VIEW\n {get_view_name}\n'
f'AS\n{self._create_get_sql(primary_foreign_key)};')
return sqlparse.format(
sql,
reindent=True,
)
def _create_get_sql(self,source_model):
"""This creates the actual SELECT query statement used to build the
materialized view. Each materialized view field has a 'source' variable
the supplies the field. The 'source' variable can be a query string or
a query expression.
There are three types of special fields that are used in the output
select view to filter out invalid data. This includes an Active bit
field, a Start Time field, and a Stop Time field. Each of these fields
are calculated based on a set of input fields. These input fields
are given conditions that are used to calculate the output field.
Active bit fields by default are required to be TRUE or NULL to be
active. If a '-' is specified in front of the input field, then
the input field should be FALSE instead of TRUE. If there is an '!'
at the end of the input field, then the field can't be null. If the
input field is something besides a boolean, then any not-null value
is TRUE.
materializedviews.ActiveBitField(
active_conditions = [
'model__field', # Field on model must be TRUE or NULL
'-field', # Field on current model must be FALSE or NULL
'field!', # Field on current model must be TRUE and NOT NULL
'-field!', # Field on current model must be FALSE and NOT NULL
]
DateTime start fields calculate when a model start time is valid
based on a list of input fields. All fields must meet the minimum start
time for field to be active.
materializedviews.StartTimeField(
start_conditions=[
'field', # DateTimefield on current model to indicate start time
'model__field', # DateTimefield on model to indicate start time
]
DateTime stop fields calculate when a model expires based on a list of
input fields. If any current time is past any expired time, then the
model is invalid.
materializedviews.StopTimeField(
start_conditions=[
'field', # DateTimefield on current model to indicate start time
'model__field', # DateTimefield on model to indicate start time
]
"""
query_fields = {}
for field in self.model._meta.get_fields():
field_name = get_view_field_name(self,field)
if hasattr(field,'source'):
if field.source:
if isinstance(field.source, str):
query_fields[f'{field_name}'] = F(field.source)
else:
query_fields[f'{field_name}'] = field.source
# Process ActiveBitField
if hasattr(field,'active_conditions'):
queries = []
for condition_field in field.active_conditions:
if condition_field[0] == '-':
condition_field = condition_field[1:]
if condition_field[-1] == '!':
condition_field = condition_field[:-1]
bitfield = getattr(self.model,
condition_field)
if isinstance(bitfield,models.BooleanField):
q = {condition_field:False}
queries.append(Q(**q))
else:
q = {condition_field+'__isnull':True}
queries.append(Q(**q))
else:
q1 = {condition_field+'__isnull':True}
q2 = {condition_field:False}
queries.append(Q(Q(**q1)|Q(**q2)))
else:
if condition_field[-1] == '!':
condition_field = condition_field[:-1]
bitfield = getattr(self.model,
condition_field)
if isinstance(bitfield,models.BooleanField):
q = {condition_field:True}
queries.append(Q(**q))
else:
q = {condition_field+'__isnull':False}
queries.append(Q(**q))
else:
q1 = {condition_field+'__isnull':True}
q2 = {condition_field:True}
queries.append(Q(Q(**q1)|Q(**q2)))
reduced = reduce((lambda x, y: x & y),queries)
added_field = ExpressionWrapper(
Q(reduced),output_field=models.BooleanField())
query_fields[f'{field_name}'] = added_field
# Process StartTimeField
if hasattr(field,'start_conditions'):
queries = []
for condition_field in field.start_conditions:
t = {condition_field:None}
q = Case(
When(
Q(**t),
then=ExpressionWrapper(
Value("'-Infinity'"),models.DateTimeField())
),
default=F(condition_field),
output_field=models.DateTimeField()
)
queries.append(q)
added_field = Func(*queries,function='GREATEST')
query_fields[f'{field_name}'] = added_field
# Process StopTimeField
if hasattr(field,'stop_conditions'):
queries = []
for condition_field in field.stop_conditions:
t = {condition_field:None}
q = Case(
When(
Q(**t),
then=ExpressionWrapper(
Value("'Infinity'"),
models.DateTimeField())
),
default=F(condition_field),
output_field=models.DateTimeField()
)
queries.append(q)
added_field = Func(*queries,function='LEAST')
query_fields[f'{field_name}'] = added_field
q = self.model._meta.get_field(
source_model).remote_field.model.objects.values(**query_fields)
return q.query.__str__()
def get_sql(self):
if hasattr(self.model,'primary_foreign_key'):
primary_foreign_key = self.model.primary_foreign_key
else:
primary_foreign_key = None
sql = self._create_get_sql(primary_foreign_key)
return sqlparse.format(
sql,
reindent=True,
)
def _create_insert_sql(self,table_name,view_name):
sql = [f'INSERT INTO\n "{table_name}"\n (\n']
source_fields = []
destination_fields = []
for field in self.model._meta.get_fields():
field_name = get_field_name(self,field)
active_field = False
view_field_name = get_view_field_name(self, field)
if hasattr(field,'source'):
if field.source:
active_field = True
if hasattr(field,'active_bit') \
or hasattr(field,'start_time') \
or hasattr(field,'stop_time'):
active_field = True
if active_field == True:
destination_fields.append(f' {field_name}')
source_fields.append(
f' "t"."{view_field_name}" "{field_name}"')
if hasattr(field,'dirty_bit'):
destination_fields.append(f' {field_name}')
source_fields.append(' FALSE')
sql.append(',\n'.join(destination_fields) +
'\n )\n SELECT\n' + ',\n'.join(source_fields))
sql.append(f'\n FROM\n {view_name} "t"')
return ''.join(sql)
def get_insert_sql(self):
get_view_name = f'get_{self.model.__name__}_view'
table_name = self.model._meta.db_table
sql = self._create_insert_sql(table_name,get_view_name)
return sqlparse.format(
sql,
reindent=True,
)
def _create_init_view(self):
init_view_name = f'init_{self.model.__name__}_view'
get_view_name = f'get_{self.model.__name__}_view'
table_name = self.model._meta.db_table
return f'CREATE OR REPLACE FUNCTION\n {init_view_name}()\n' \
f'RETURNS\n "{table_name}"\nSECURITY DEFINER\n' \
f'LANGUAGE sql AS\n$$\n' \
f'{self._create_insert_sql(table_name,get_view_name)}\n' \
f' RETURNING "{table_name}".*;\n$$;', init_view_name, table_name
def get_init_view(self):
sql, init_view_name, table_name = self._create_init_view()
return sqlparse.format(
sql,
reindent=True,
)
def _create_update_sql(self,get_view_name,key,key_field):
table_name = self.model._meta.db_table
sql = [f'UPDATE\n "{table_name}"\nSET\n']
fields = []
for field in self.model._meta.get_fields():
field_name = get_field_name(self,field)
active_field = False
if hasattr(field,'source'):
if field.source:
active_field = True
if hasattr(field,'active_bit') \
or hasattr(field,'start_time') \
or hasattr(field,'stop_time'):
active_field = True
if active_field == True:
fields.append(
f' {field_name} = t.{field_name}'
f'{self.material_view_field_id}')
if hasattr(field,'dirty_bit'):
fields.append(f' {field_name} = FALSE')
sql.append(',\n'.join(fields) + f'\nFROM (\n')
where = [f'"{table_name}"."{key_field}" = {key}']
if hasattr(self.model,'unique_select_fields'):
for wherefield in self.model.unique_select_fields:
w = self.model._meta.get_field(wherefield)
w_name = get_field_name(self,w)
where.append(
f'"{table_name}"."{w_name}" = t.{w_name}'
f'{self.material_view_field_id}')
where_clause = '\n AND '.join(where)
id_parameters = [
f'"u"."{key_field}{self.material_view_field_id}"'
f' = {key}']
if hasattr(self.model,'unique_select_fields'):
for uniqueid_field in self.model.unique_select_fields:
i = self.model._meta.get_field(uniqueid_field)
i_name = get_field_name(self,i)
id_parameters.append(
f'"u"."{i_name}{self.material_view_field_id}"'
f' = {i_name}{key}'
)
id_parameters_clause = '\n AND '.join(id_parameters)
sql.append(f' SELECT\n *\n FROM\n {get_view_name} AS "u"\n '
f'WHERE\n {id_parameters_clause}\n'
f') t\nWHERE\n {where_clause}')
return ''.join(sql)
def _create_refresh_view(self, key_field):
refresh_view_name = f'refresh_{self.model.__name__}_view'
get_view_name = f'get_{self.model.__name__}_view'
table_name = self.model._meta.db_table
id_parameters = [f'{foreign_key_id} int']
if hasattr(self.model,'unique_select_fields'):
for uniqueid_field in self.model.unique_select_fields:
i = self.model._meta.get_field(uniqueid_field)
i_name = get_field_name(self,i)
id_parameters.append(
f'{i_name}{foreign_key_id} int')
id_parameters_clause = ',\n '.join(id_parameters)
return (f'CREATE OR REPLACE FUNCTION\n'
f' {refresh_view_name}(\n {id_parameters_clause}\n )\n'
f'RETURNS\n "{table_name}"\nSECURITY DEFINER\n'
f'LANGUAGE sql AS\n$$\n'
f'{self._create_update_sql(get_view_name,foreign_key_id,key_field)}'
f'\nRETURNING'
f' "{table_name}".*;\n$$;', refresh_view_name, table_name)
def get_refresh_view(self):
sql, refresh_view_name, table_name = self._create_refresh_view(self.model.primary_foreign_key)
return sql
class MaterializedViewModel(models.Model):
objects = MaterializedViewManager()
class Meta:
abstract = True
|
[
"sqlparse.format",
"logging.warning",
"django.db.models.Value",
"django.db.models.Q",
"django.db.connection.cursor",
"django.db.models.BooleanField",
"pprint.PrettyPrinter",
"django.db.models.F",
"sys.exc_info",
"functools.reduce",
"django.db.models.DateTimeField",
"traceback.extract_tb",
"django.db.models.Func",
"logging.getLogger"
] |
[((423, 450), 'logging.getLogger', 'logging.getLogger', (['"""django"""'], {}), "('django')\n", (440, 450), False, 'import logging\n'), ((462, 491), 'logging.getLogger', 'logging.getLogger', (['"""database"""'], {}), "('database')\n", (479, 491), False, 'import logging\n'), ((497, 527), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (517, 527), False, 'import pprint\n'), ((1642, 1661), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (1659, 1661), False, 'from django.db import connection\n'), ((21320, 21339), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (21337, 21339), False, 'from django.db import connection\n'), ((21470, 21489), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (21487, 21489), False, 'from django.db import connection\n'), ((21623, 21642), 'django.db.connection.cursor', 'connection.cursor', ([], {}), '()\n', (21640, 21642), False, 'from django.db import connection\n'), ((22969, 23004), 'sqlparse.format', 'sqlparse.format', (['sql'], {'reindent': '(True)'}), '(sql, reindent=True)\n', (22984, 23004), False, 'import sqlparse\n'), ((30018, 30053), 'sqlparse.format', 'sqlparse.format', (['sql'], {'reindent': '(True)'}), '(sql, reindent=True)\n', (30033, 30053), False, 'import sqlparse\n'), ((31509, 31544), 'sqlparse.format', 'sqlparse.format', (['sql'], {'reindent': '(True)'}), '(sql, reindent=True)\n', (31524, 31544), False, 'import sqlparse\n'), ((32210, 32245), 'sqlparse.format', 'sqlparse.format', (['sql'], {'reindent': '(True)'}), '(sql, reindent=True)\n', (32225, 32245), False, 'import sqlparse\n'), ((2413, 2471), 'logging.warning', 'logging.warning', (['f"""Failed to read prepared SQL for {self}"""'], {}), "(f'Failed to read prepared SQL for {self}')\n", (2428, 2471), False, 'import logging\n'), ((11762, 11776), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (11774, 11776), False, 'import sys\n'), ((12370, 12384), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (12382, 12384), False, 'import sys\n'), ((12979, 12993), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (12991, 12993), False, 'import sys\n'), ((13589, 13603), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (13601, 13603), False, 'import sys\n'), ((14176, 14190), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (14188, 14190), False, 'import sys\n'), ((14958, 14972), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (14970, 14972), False, 'import sys\n'), ((15591, 15605), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (15603, 15605), False, 'import sys\n'), ((16225, 16239), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (16237, 16239), False, 'import sys\n'), ((16860, 16874), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (16872, 16874), False, 'import sys\n'), ((17449, 17463), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (17461, 17463), False, 'import sys\n'), ((18239, 18253), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (18251, 18253), False, 'import sys\n'), ((18878, 18892), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (18890, 18892), False, 'import sys\n'), ((19518, 19532), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (19530, 19532), False, 'import sys\n'), ((20159, 20173), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (20171, 20173), False, 'import sys\n'), ((20754, 20768), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (20766, 20768), False, 'import sys\n'), ((27814, 27849), 'functools.reduce', 'reduce', (['(lambda x, y: x & y)', 'queries'], {}), '(lambda x, y: x & y, queries)\n', (27820, 27849), False, 'from functools import reduce\n'), ((28707, 28742), 'django.db.models.Func', 'Func', (['*queries'], {'function': '"""GREATEST"""'}), "(*queries, function='GREATEST')\n", (28711, 28742), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((29511, 29543), 'django.db.models.Func', 'Func', (['*queries'], {'function': '"""LEAST"""'}), "(*queries, function='LEAST')\n", (29515, 29543), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((27920, 27930), 'django.db.models.Q', 'Q', (['reduced'], {}), '(reduced)\n', (27921, 27930), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((25771, 25786), 'django.db.models.F', 'F', (['field.source'], {}), '(field.source)\n', (25772, 25786), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((27944, 27965), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (27963, 27965), False, 'from django.db import models\n'), ((28341, 28347), 'django.db.models.Q', 'Q', ([], {}), '(**t)\n', (28342, 28347), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((28537, 28555), 'django.db.models.F', 'F', (['condition_field'], {}), '(condition_field)\n', (28538, 28555), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((28594, 28616), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (28614, 28616), False, 'from django.db import models\n'), ((29113, 29119), 'django.db.models.Q', 'Q', ([], {}), '(**t)\n', (29114, 29119), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((29341, 29359), 'django.db.models.F', 'F', (['condition_field'], {}), '(condition_field)\n', (29342, 29359), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((29398, 29420), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (29418, 29420), False, 'from django.db import models\n'), ((12062, 12095), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {'limit': '(5)'}), '(tb, limit=5)\n', (12082, 12095), False, 'import traceback\n'), ((12670, 12703), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {'limit': '(5)'}), '(tb, limit=5)\n', (12690, 12703), False, 'import traceback\n'), ((13279, 13312), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {'limit': '(5)'}), '(tb, limit=5)\n', (13299, 13312), False, 'import traceback\n'), ((13889, 13922), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {'limit': '(5)'}), '(tb, limit=5)\n', (13909, 13922), False, 'import traceback\n'), ((14476, 14509), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {'limit': '(5)'}), '(tb, limit=5)\n', (14496, 14509), False, 'import traceback\n'), ((15282, 15315), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {'limit': '(5)'}), '(tb, limit=5)\n', (15302, 15315), False, 'import traceback\n'), ((15915, 15948), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {'limit': '(5)'}), '(tb, limit=5)\n', (15935, 15948), False, 'import traceback\n'), ((16549, 16582), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {'limit': '(5)'}), '(tb, limit=5)\n', (16569, 16582), False, 'import traceback\n'), ((17161, 17194), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {'limit': '(5)'}), '(tb, limit=5)\n', (17181, 17194), False, 'import traceback\n'), ((17750, 17783), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {'limit': '(5)'}), '(tb, limit=5)\n', (17770, 17783), False, 'import traceback\n'), ((18566, 18599), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {'limit': '(5)'}), '(tb, limit=5)\n', (18586, 18599), False, 'import traceback\n'), ((19205, 19238), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {'limit': '(5)'}), '(tb, limit=5)\n', (19225, 19238), False, 'import traceback\n'), ((19845, 19878), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {'limit': '(5)'}), '(tb, limit=5)\n', (19865, 19878), False, 'import traceback\n'), ((20463, 20496), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {'limit': '(5)'}), '(tb, limit=5)\n', (20483, 20496), False, 'import traceback\n'), ((21058, 21091), 'traceback.extract_tb', 'traceback.extract_tb', (['tb'], {'limit': '(5)'}), '(tb, limit=5)\n', (21078, 21091), False, 'import traceback\n'), ((26585, 26591), 'django.db.models.Q', 'Q', ([], {}), '(**q)\n', (26586, 26591), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((26744, 26750), 'django.db.models.Q', 'Q', ([], {}), '(**q)\n', (26745, 26750), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((27404, 27410), 'django.db.models.Q', 'Q', ([], {}), '(**q)\n', (27405, 27410), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((27564, 27570), 'django.db.models.Q', 'Q', ([], {}), '(**q)\n', (27565, 27570), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((28433, 28453), 'django.db.models.Value', 'Value', (['"""\'-Infinity\'"""'], {}), '("\'-Infinity\'")\n', (28438, 28453), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((28454, 28476), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (28474, 28476), False, 'from django.db import models\n'), ((29205, 29224), 'django.db.models.Value', 'Value', (['"""\'Infinity\'"""'], {}), '("\'Infinity\'")\n', (29210, 29224), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((29258, 29280), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (29278, 29280), False, 'from django.db import models\n'), ((26951, 26958), 'django.db.models.Q', 'Q', ([], {}), '(**q1)\n', (26952, 26958), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((26959, 26966), 'django.db.models.Q', 'Q', ([], {}), '(**q2)\n', (26960, 26966), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((27770, 27777), 'django.db.models.Q', 'Q', ([], {}), '(**q1)\n', (27771, 27777), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n'), ((27778, 27785), 'django.db.models.Q', 'Q', ([], {}), '(**q2)\n', (27779, 27785), False, 'from django.db.models import Q, F, ExpressionWrapper, Func, Case, When, Value\n')]
|
# -*- coding: utf-8 -*-
"""
Handling IDs in a more secure way
"""
import uuid
def getUUID():
return str(uuid.uuid4())
def getUUIDfromString(string):
return str(uuid.uuid5(uuid.NAMESPACE_URL, string))
|
[
"uuid.uuid4",
"uuid.uuid5"
] |
[((112, 124), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (122, 124), False, 'import uuid\n'), ((174, 212), 'uuid.uuid5', 'uuid.uuid5', (['uuid.NAMESPACE_URL', 'string'], {}), '(uuid.NAMESPACE_URL, string)\n', (184, 212), False, 'import uuid\n')]
|
from pipeline.utils import *
from pipeline.Step2.Evaluate_paddle import accuracy as accuracy_paddle
from pipeline.Step2.Evaluate_torch import accuracy as accuracy_torch
from pipeline.Step2.Evaluate_paddle import AverageMeter as AverageMeter_paddle
from pipeline.Step2.Evaluate_paddle import AverageMeter as AverageMeter_torch
import paddle
import torch
import torch_py
import paddle_py
def interleave_paddle(x, size):
s = list(x.shape)
return x.reshape([-1, size] + s[1:]).transpose([1, 0, 2, 3, 4]).reshape([-1] + s[1:])
def de_interleave_paddle(x, size):
s = list(x.shape)
return x.reshape([size, -1] + s[1:]).transpose([1, 0, 2]).reshape([-1] + s[1:])
def interleave_torch(x, size):
s = list(x.shape)
return x.reshape([-1, size] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def de_interleave_torch(x, size):
s = list(x.shape)
return x.reshape([size, -1] + s[1:]).transpose(0, 1).reshape([-1] + s[1:])
def main_old():
model_name = 'resnext'
save_name = 'Train_Alignment'
epoch_num = 10
model_paddle, model_torch = gen_model(model_name=model_name)
model_paddle, model_torch = update_model(model_paddle, model_torch)
top1_paddle, top5_paddle = AverageMeter_paddle(), AverageMeter_paddle()
top1_torch, top5_torch = AverageMeter_torch(), AverageMeter_torch()
reprod_log_paddle = ReprodLogger()
reprod_log_torch = ReprodLogger()
args = get_args()
labeled_trainloader, unlabeled_trainloader, test_loader = gen_dataloader_paddle(args)
if args.world_size > 1:
labeled_epoch = 0
unlabeled_epoch = 0
labeled_trainloader.sampler.set_epoch(labeled_epoch)
unlabeled_trainloader.sampler.set_epoch(unlabeled_epoch)
labeled_iter = iter(labeled_trainloader)
unlabeled_iter = iter(unlabeled_trainloader)
model_paddle.train()
model_torch.train()
for epoch in range(epoch_num):
try:
inputs_x, targets_x = labeled_iter.next()
except:
if args.world_size > 1:
labeled_epoch += 1
labeled_trainloader.sampler.set_epoch(labeled_epoch)
labeled_iter = iter(labeled_trainloader)
inputs_x, targets_x = labeled_iter.next()
try:
(inputs_u_w, inputs_u_s), _ = unlabeled_iter.next()
except:
if args.world_size > 1:
unlabeled_epoch += 1
unlabeled_trainloader.sampler.set_epoch(unlabeled_epoch)
unlabeled_iter = iter(unlabeled_trainloader)
(inputs_u_w, inputs_u_s), _ = unlabeled_iter.next()
batch_size = inputs_x.shape[0]
inputs_paddle = interleave_paddle(
paddle.concat((inputs_x, inputs_u_w, inputs_u_s)), 2 * args.mu + 1)
inputs_torch = data_paddle_2_torch(inputs_paddle)
logits_paddle, logits_torch = gen_res(model_paddle, inputs_paddle, model_torch, inputs_torch)
# 计算loss_paddle
logits_paddle = de_interleave_paddle(logits_paddle, 2 * args.mu + 1)
logits_x_paddle = logits_paddle[:batch_size]
logits_u_w_paddle, logits_u_s_paddle = logits_paddle[batch_size:].chunk(2)
del logits_paddle
Lx = F.cross_entropy(logits_x_paddle, targets_x, reduction='mean')
pseudo_label = F.softmax(logits_u_w_paddle.detach() / args.T, axis=-1)
max_probs, targets_u = paddle.max(pseudo_label, axis=-1), paddle.argmax(pseudo_label, axis=-1)
mask = paddle.greater_equal(max_probs, paddle.to_tensor(args.threshold)).astype(paddle.float32)
Lu = (F.cross_entropy(logits_u_s_paddle, targets_u,
reduction='none') * mask).mean()
loss_paddle = Lx + args.lambda_u * Lu
# loss_paddle 计算结束
# 计算 loss_torch
logits_torch = de_interleave_torch(logits_torch, 2 * args.mu + 1)
logits_x_torch = logits_torch[:batch_size]
logits_u_w_torch, logits_u_s_torch = logits_torch[batch_size:].chunk(2)
del logits_torch
Lx = torch.nn.functional.cross_entropy(logits_x_torch, data_paddle_2_torch(targets_x), reduction='mean')
pseudo_label = torch.nn.functional.softmax(logits_u_w_torch.detach() / args.T, axis=-1)
max_probs, targets_u = paddle.max(pseudo_label, axis=-1), paddle.argmax(pseudo_label, axis=-1)
mask = paddle.greater_equal(max_probs, args.threshold).float()
Lu = (torch.nn.functional.cross_entropy(logits_u_s_torch, targets_u,
reduction='none') * mask).mean()
loss_torch = Lx + args.lambda_u * Lu
# 计算 loss_torch 结束
loss_paddle.backward()
loss_torch.backward()
test_res_paddle = paddle_py.test(args, test_loader, model_paddle)
test_res_torch = torch_py.test(args, test_loader, model_torch)
reprod_log_paddle.add(f"epoch_{epoch + 1}_top1", np.array(test_res_paddle))
reprod_log_torch.add(f"epoch_{epoch + 1}_top5", np.array(test_res_torch))
reprod_log_paddle.save(f"./{save_name}_paddle.npy")
reprod_log_torch.save(f"./{save_name}_torch.npy")
gen_check(save_name)
if __name__ == '__main__':
reprod_log_paddle = ReprodLogger()
reprod_log_torch = ReprodLogger()
save_name = 'accuracy'
reprod_log_torch.add(f"label_num:40", np.array(93.60))
reprod_log_torch.add(f"label_num:250", np.array(95.31))
reprod_log_torch.add(f"label_num:4000", np.array(95.77))
model_40_path = '/Users/yangruizhi/Desktop/PR_list/FixMatch-Paddle/pipeline/model_params/model_best@40.pdparams'
model_250_path = '/Users/yangruizhi/Desktop/PR_list/FixMatch-Paddle/pipeline/model_params/model_best@250.pdparams'
model_4000_path = '/Users/yangruizhi/Desktop/PR_list/FixMatch-Paddle/pipeline/model_params/model_best@4000.pdparams'
model_40_acc = paddle.load(model_40_path)['best_acc']
model_250_acc = paddle.load(model_250_path)['best_acc']
model_4000_acc = paddle.load(model_4000_path)['best_acc']
reprod_log_paddle.add(f"label_num:40", np.array(model_40_acc))
reprod_log_paddle.add(f"label_num:250", np.array(model_250_acc))
reprod_log_paddle.add(f"label_num:4000", np.array(model_4000_acc))
reprod_log_paddle.save(f"./{save_name}_paddle.npy")
reprod_log_torch.save(f"./{save_name}_torch.npy")
gen_check(save_name)
|
[
"torch_py.test",
"paddle.concat",
"paddle.load",
"paddle.argmax",
"torch.nn.functional.cross_entropy",
"paddle.greater_equal",
"pipeline.Step2.Evaluate_paddle.AverageMeter",
"paddle.max",
"paddle.to_tensor",
"paddle_py.test"
] |
[((1215, 1236), 'pipeline.Step2.Evaluate_paddle.AverageMeter', 'AverageMeter_paddle', ([], {}), '()\n', (1234, 1236), True, 'from pipeline.Step2.Evaluate_paddle import AverageMeter as AverageMeter_paddle\n'), ((1238, 1259), 'pipeline.Step2.Evaluate_paddle.AverageMeter', 'AverageMeter_paddle', ([], {}), '()\n', (1257, 1259), True, 'from pipeline.Step2.Evaluate_paddle import AverageMeter as AverageMeter_paddle\n'), ((1289, 1309), 'pipeline.Step2.Evaluate_paddle.AverageMeter', 'AverageMeter_torch', ([], {}), '()\n', (1307, 1309), True, 'from pipeline.Step2.Evaluate_paddle import AverageMeter as AverageMeter_torch\n'), ((1311, 1331), 'pipeline.Step2.Evaluate_paddle.AverageMeter', 'AverageMeter_torch', ([], {}), '()\n', (1329, 1331), True, 'from pipeline.Step2.Evaluate_paddle import AverageMeter as AverageMeter_torch\n'), ((4710, 4757), 'paddle_py.test', 'paddle_py.test', (['args', 'test_loader', 'model_paddle'], {}), '(args, test_loader, model_paddle)\n', (4724, 4757), False, 'import paddle_py\n'), ((4783, 4828), 'torch_py.test', 'torch_py.test', (['args', 'test_loader', 'model_torch'], {}), '(args, test_loader, model_torch)\n', (4796, 4828), False, 'import torch_py\n'), ((5824, 5850), 'paddle.load', 'paddle.load', (['model_40_path'], {}), '(model_40_path)\n', (5835, 5850), False, 'import paddle\n'), ((5883, 5910), 'paddle.load', 'paddle.load', (['model_250_path'], {}), '(model_250_path)\n', (5894, 5910), False, 'import paddle\n'), ((5944, 5972), 'paddle.load', 'paddle.load', (['model_4000_path'], {}), '(model_4000_path)\n', (5955, 5972), False, 'import paddle\n'), ((2692, 2741), 'paddle.concat', 'paddle.concat', (['(inputs_x, inputs_u_w, inputs_u_s)'], {}), '((inputs_x, inputs_u_w, inputs_u_s))\n', (2705, 2741), False, 'import paddle\n'), ((3374, 3407), 'paddle.max', 'paddle.max', (['pseudo_label'], {'axis': '(-1)'}), '(pseudo_label, axis=-1)\n', (3384, 3407), False, 'import paddle\n'), ((3409, 3445), 'paddle.argmax', 'paddle.argmax', (['pseudo_label'], {'axis': '(-1)'}), '(pseudo_label, axis=-1)\n', (3422, 3445), False, 'import paddle\n'), ((4247, 4280), 'paddle.max', 'paddle.max', (['pseudo_label'], {'axis': '(-1)'}), '(pseudo_label, axis=-1)\n', (4257, 4280), False, 'import paddle\n'), ((4282, 4318), 'paddle.argmax', 'paddle.argmax', (['pseudo_label'], {'axis': '(-1)'}), '(pseudo_label, axis=-1)\n', (4295, 4318), False, 'import paddle\n'), ((4334, 4381), 'paddle.greater_equal', 'paddle.greater_equal', (['max_probs', 'args.threshold'], {}), '(max_probs, args.threshold)\n', (4354, 4381), False, 'import paddle\n'), ((3493, 3525), 'paddle.to_tensor', 'paddle.to_tensor', (['args.threshold'], {}), '(args.threshold)\n', (3509, 3525), False, 'import paddle\n'), ((4405, 4490), 'torch.nn.functional.cross_entropy', 'torch.nn.functional.cross_entropy', (['logits_u_s_torch', 'targets_u'], {'reduction': '"""none"""'}), "(logits_u_s_torch, targets_u, reduction='none'\n )\n", (4438, 4490), False, 'import torch\n')]
|
import base64
import itertools
import re
import eml_parser
from bs4 import BeautifulSoup
CONTAINS_CID = re.compile(r'(?:src="cid:[^"]+")|(?:href="cid:[^"]+")')
CID = re.compile(r"^cid:(.+)$")
def substitute_xml(content, contents):
if isinstance(content, bytes):
content = base64.b64decode(content).decode("utf-8", "ignore")
soup = BeautifulSoup(content, "lxml")
if CONTAINS_CID.search(content) is None:
return soup
# Fill in the tag contents where a tag links to a content with a cid
for tag, attr in itertools.chain(
((tag, "src") for tag in soup.find_all(src=CID)),
((tag, "href") for tag in soup.find_all(href=CID)),
):
cid = CID.match(tag.attrs[attr]).group(1)
inner_content = contents.get(f"<{cid}>")
if inner_content is None:
continue
inner_content = substitute_xml(inner_content, contents)
tag.append(inner_content)
return soup
def substitute_outer(content, contents):
if isinstance(content, bytes):
content = base64.b64decode(content).decode("utf-8", "ignore")
# Check if the content contains a cid - if not we assume it is a string here
if CONTAINS_CID.search(content) is None:
return content
# From here on we assume the content is valid XML
return str(substitute_xml(content, contents))
async def fetch_html_content(page):
snapshot = await page._client.send("Page.captureSnapshot")
eml = eml_parser.EmlParser(
include_raw_body=True, parse_attachments=True, include_attachment_data=True
).decode_email_bytes(snapshot["data"].encode("utf-8", "ignore"))
# Find the contents of all attachments and body parts
contents = dict(
itertools.chain(
(
(attachment["content_header"]["content-id"][0], attachment["raw"])
for attachment in eml.get("attachment", [])
if "content-id" in attachment["content_header"]
),
(
(body["content_header"]["content-id"][0], body["content"])
for body in eml.get("body", [])
if "content-id" in body["content_header"]
),
)
)
# Find the root document
root_content = next(
itertools.chain(
(
body["content"]
for body in eml.get("body", [])
if body["content_header"]["content-location"][0]
== eml["header"]["header"]["snapshot-content-location"][0]
),
(
attachment["raw"]
for attachment in eml.get("attachment", [])
if attachment["content_header"]["content-location"][0]
== eml["header"]["header"]["snapshot-content-location"][0]
),
)
)
return substitute_outer(root_content, contents)
|
[
"bs4.BeautifulSoup",
"eml_parser.EmlParser",
"base64.b64decode",
"re.compile"
] |
[((107, 161), 're.compile', 're.compile', (['"""(?:src="cid:[^"]+")|(?:href="cid:[^"]+")"""'], {}), '(\'(?:src="cid:[^"]+")|(?:href="cid:[^"]+")\')\n', (117, 161), False, 'import re\n'), ((169, 193), 're.compile', 're.compile', (['"""^cid:(.+)$"""'], {}), "('^cid:(.+)$')\n", (179, 193), False, 'import re\n'), ((353, 383), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""lxml"""'], {}), "(content, 'lxml')\n", (366, 383), False, 'from bs4 import BeautifulSoup\n'), ((1475, 1576), 'eml_parser.EmlParser', 'eml_parser.EmlParser', ([], {'include_raw_body': '(True)', 'parse_attachments': '(True)', 'include_attachment_data': '(True)'}), '(include_raw_body=True, parse_attachments=True,\n include_attachment_data=True)\n', (1495, 1576), False, 'import eml_parser\n'), ((289, 314), 'base64.b64decode', 'base64.b64decode', (['content'], {}), '(content)\n', (305, 314), False, 'import base64\n'), ((1056, 1081), 'base64.b64decode', 'base64.b64decode', (['content'], {}), '(content)\n', (1072, 1081), False, 'import base64\n')]
|
#!/usr/bin/env python3
from os import environ
from common.helpers import read_xml, overwrite_file
from hdfs.helpers import process
if __name__ == '__main__':
conf_dir = environ.get( "CONF_DIR" ) if environ.get( "CONF_DIR" ) else "/opt/hbase/conf"
filename = "hbase-site.xml"
print( f"using configuration: {conf_dir}/{filename}" )
xml = read_xml( conf_dir, filename )
processed_core_site = process( xml )
if processed_core_site is not None:
overwrite_file( conf_dir, filename, processed_core_site )
else:
print( f"using default {filename} from docker images" )
print( "to learn more about the HDFS configs please visit the GitHub repo: https://github.com/magi-platform/magi" )
|
[
"os.environ.get",
"hdfs.helpers.process",
"common.helpers.overwrite_file",
"common.helpers.read_xml"
] |
[((355, 383), 'common.helpers.read_xml', 'read_xml', (['conf_dir', 'filename'], {}), '(conf_dir, filename)\n', (363, 383), False, 'from common.helpers import read_xml, overwrite_file\n'), ((413, 425), 'hdfs.helpers.process', 'process', (['xml'], {}), '(xml)\n', (420, 425), False, 'from hdfs.helpers import process\n'), ((205, 228), 'os.environ.get', 'environ.get', (['"""CONF_DIR"""'], {}), "('CONF_DIR')\n", (216, 228), False, 'from os import environ\n'), ((176, 199), 'os.environ.get', 'environ.get', (['"""CONF_DIR"""'], {}), "('CONF_DIR')\n", (187, 199), False, 'from os import environ\n'), ((477, 532), 'common.helpers.overwrite_file', 'overwrite_file', (['conf_dir', 'filename', 'processed_core_site'], {}), '(conf_dir, filename, processed_core_site)\n', (491, 532), False, 'from common.helpers import read_xml, overwrite_file\n')]
|