code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 16 11:52:12 2019
@author: z5095790
"""
import numpy as np
import copy
import pickle
import os
from keras.models import load_model
class Node:
"""Binary tree with Ture and False Branches"""
def __init__(self, col=-1, value = None, parentID = None, ID = None, branch=None, results=None, numSamples=0, memory=None, leaf =0):
self.col = col
self.value = value
self.parentID = parentID
self.ID = ID
self.branch = branch
self.results = results #
self.numSamples = numSamples # stat of the number of training samples flow through that leaf
self.memory = memory # store samples fall into that leaf
self.leaf = leaf # FALSE for nodes, TRUE for leaves
def load_weight_bias_path(path):
DNNModel = load_model(path)
weight1 = DNNModel.layers[0].get_weights()[0]
biase1 = DNNModel.layers[0].get_weights()[1] #2,4
weight2 = DNNModel.layers[2].get_weights()[0]
biase2 = DNNModel.layers[2].get_weights()[1] #2,4
weight3 = DNNModel.layers[4].get_weights()[0]
biase3 = DNNModel.layers[4].get_weights()[1] #2,4
weight = [weight1, weight2, weight3]
bias = [biase1, biase2, biase3]
return(weight, bias)
def countLeaves(tree,showID = False):
count = 0
leafID_list = []
for i in range(0,len(tree)):
if tree[i].leaf == 1:
count += 1
leafID_list.append(tree[i].ID)
if showID:
return (count,leafID_list)
return count
def activationByLayer(hidden_nodes,activations):
activationByLayer = []
startNode = 0
for i in range(0, len(hidden_nodes)):
layer_activation = []
num_node_layer = hidden_nodes[i]
for j in range(0,num_node_layer):
layer_activation.append(activations[startNode+j])
activationByLayer.append(layer_activation)
startNode = startNode + num_node_layer
return activationByLayer
def transformWeight(hidden_nodes,activations,weight,bias):
weight_input_to_layers = copy.deepcopy(weight)
bias_input_to_layers = copy.deepcopy(bias)
weight_layer_activated = copy.deepcopy(weight)
for i in range(0,len(activations)):
for j in range(0,hidden_nodes[i]):
if activations[i][j] == 0:
weight_layer_activated[i+1][j,:] = 0
weight_input_to_layers[i+1] = np.matmul(weight_input_to_layers[i],weight_layer_activated[i+1])
bias_input_to_layers[i+1] = np.matmul(bias_input_to_layers[i],weight_layer_activated[i+1]) + bias_input_to_layers[i+1]
return (weight_input_to_layers,bias_input_to_layers)
def extractRules(tree,hidden_nodes,num_input,num_output,weight,bias):
leaves_list = []
rule_list = []
rule_list_txt = []
num_leaves, leaves_list = countLeaves(tree, showID = True)
num_hidden_layers = len(hidden_nodes)
# create a list of names for input and output vectors
input_name_array = []
for i in range(0,num_input):
input_name_array.append('X_'+str(i))
output_name_array = []
for i in range(0,num_output):
output_name_array.append('Y_'+str(i))
# generate rules for each leaf
num_constraints = np.zeros([num_leaves,1])
for i in range(0,num_leaves):
leafResult = tree[leaves_list[i]].results
leafResultByLayer = activationByLayer(hidden_nodes,leafResult)
weight_input_to_layers, bias_input_to_layers = transformWeight(hidden_nodes,leafResultByLayer,weight,bias)
# rules for activating hidden layers
rule_txt = 'IF:\n\n'
rule = np.zeros([tree[leaves_list[i]].col+1,num_input+2])
startCol = 0
for j in range(0,num_hidden_layers):
for m in range(0,hidden_nodes[j]):
if startCol == tree[leaves_list[i]].col:
break
else:
for k in range(0,num_input):
if k == 0:
rule_txt = rule_txt + '(' + str(weight_input_to_layers[j][k,m]) + input_name_array[k] + ')'
else:
rule_txt = rule_txt + ' + (' + str(weight_input_to_layers[j][k,m]) + input_name_array[k] + ')'
rule[startCol,k] = weight_input_to_layers[j][k,m]
if leafResultByLayer[j][m] == 1:
rule_txt = rule_txt + ' > ' + str(-bias_input_to_layers[j][m]) + "\n"
rule[startCol,-1] = 1
else:
rule_txt = rule_txt + ' <= ' + str(-bias_input_to_layers[j][m]) + "\n"
rule[startCol,-1] = -1
rule[startCol,num_input] = bias_input_to_layers[j][m]
startCol += 1
rule_txt = rule_txt + "THEN hidden layer " + str(j) + " activation is: " + str(leafResultByLayer[j]) + "\n\n"
# rules for decision at output
for m in range(0,num_output):
if num_output == 1:
rule_txt = rule_txt + 'IF:\n'
else:
result = '\t\t' + output_name_array[j] + ' = softmax('
for k in range(0,num_input):
if k == 0:
rule_txt = rule_txt + '(' + str(weight_input_to_layers[-1][k,m]) + input_name_array[k] + ')'
else:
rule_txt = rule_txt + ' + (' + str(weight_input_to_layers[-1][k,m]) + input_name_array[k] + ')'
rule[-1,k] = weight_input_to_layers[-1][k,m]
rule_txt = rule_txt + ' + (' + str(bias_input_to_layers[-1][m]) + ') > ' + str(0) + "\n"
rule_txt = rule_txt + 'THEN: class = 1, OTHERWISE class = 0.'
rule[-1,num_input] = bias_input_to_layers[-1][m]
rule_list_txt.append(rule_txt)
rule_list.append(rule)
num_constraints[i] = len(rule)-1
return (rule_list_txt, rule_list, num_constraints)
if __name__ == '__main__':
hidden_nodes = [5,5]
num_input = 2
num_output = 4
Tree_Directory = "./NumericalData/wall-following-2/Saved_Trees/"
Model_Directory = "./NumericalData/wall-following-2/Model/"
listdir_PrunedTrees = os.listdir(Tree_Directory)
listdir_PrunedTrees = listdir_PrunedTrees[0:100]
listdir_DNNmodel = os.listdir(Model_Directory)
#load tree, weight, bias
total_num_constraints = None
for i in range(0, 10):
weight, bias = load_weight_bias_path(Model_Directory + listdir_DNNmodel[i])
with open(Tree_Directory + listdir_PrunedTrees[i], 'rb') as f:
tree = pickle.load(f)
rule_list_txt, rule_list, num_constraints = extractRules(tree,hidden_nodes,num_input,num_output,weight,bias)
if total_num_constraints is None:
total_num_constraints = num_constraints
else:
total_num_constraints = np.vstack([total_num_constraints,num_constraints])
print("Tree %d extracted." %i)
|
[
"keras.models.load_model",
"copy.deepcopy",
"numpy.zeros",
"pickle.load",
"numpy.matmul",
"os.listdir",
"numpy.vstack"
] |
[((852, 868), 'keras.models.load_model', 'load_model', (['path'], {}), '(path)\n', (862, 868), False, 'from keras.models import load_model\n'), ((2177, 2198), 'copy.deepcopy', 'copy.deepcopy', (['weight'], {}), '(weight)\n', (2190, 2198), False, 'import copy\n'), ((2227, 2246), 'copy.deepcopy', 'copy.deepcopy', (['bias'], {}), '(bias)\n', (2240, 2246), False, 'import copy\n'), ((2277, 2298), 'copy.deepcopy', 'copy.deepcopy', (['weight'], {}), '(weight)\n', (2290, 2298), False, 'import copy\n'), ((3421, 3446), 'numpy.zeros', 'np.zeros', (['[num_leaves, 1]'], {}), '([num_leaves, 1])\n', (3429, 3446), True, 'import numpy as np\n'), ((6675, 6701), 'os.listdir', 'os.listdir', (['Tree_Directory'], {}), '(Tree_Directory)\n', (6685, 6701), False, 'import os\n'), ((6780, 6807), 'os.listdir', 'os.listdir', (['Model_Directory'], {}), '(Model_Directory)\n', (6790, 6807), False, 'import os\n'), ((2529, 2596), 'numpy.matmul', 'np.matmul', (['weight_input_to_layers[i]', 'weight_layer_activated[i + 1]'], {}), '(weight_input_to_layers[i], weight_layer_activated[i + 1])\n', (2538, 2596), True, 'import numpy as np\n'), ((3832, 3887), 'numpy.zeros', 'np.zeros', (['[tree[leaves_list[i]].col + 1, num_input + 2]'], {}), '([tree[leaves_list[i]].col + 1, num_input + 2])\n', (3840, 3887), True, 'import numpy as np\n'), ((2631, 2696), 'numpy.matmul', 'np.matmul', (['bias_input_to_layers[i]', 'weight_layer_activated[i + 1]'], {}), '(bias_input_to_layers[i], weight_layer_activated[i + 1])\n', (2640, 2696), True, 'import numpy as np\n'), ((7079, 7093), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7090, 7093), False, 'import pickle\n'), ((7374, 7425), 'numpy.vstack', 'np.vstack', (['[total_num_constraints, num_constraints]'], {}), '([total_num_constraints, num_constraints])\n', (7383, 7425), True, 'import numpy as np\n')]
|
import unittest
import uuid
from datetime import datetime
import pytz
import lusid
import lusid.models as models
from lusidfeature import lusid_feature
from utilities import InstrumentLoader
from utilities import TestDataUtilities
class Portfolios(unittest.TestCase):
@classmethod
def setUpClass(cls):
# create a configured API client
api_client = TestDataUtilities.api_client()
cls.scopes_api = lusid.ScopesApi(api_client)
cls.portfolios_api = lusid.PortfoliosApi(api_client)
cls.transaction_portfolios_api = lusid.TransactionPortfoliosApi(api_client)
cls.property_definitions_api = lusid.PropertyDefinitionsApi(api_client)
cls.instruments_api = lusid.InstrumentsApi(api_client)
instrument_loader = InstrumentLoader(cls.instruments_api)
cls.instrument_ids = instrument_loader.load_instruments()
cls.test_data_utilities = TestDataUtilities(cls.transaction_portfolios_api)
@lusid_feature("F8")
def test_create_portfolio(self):
guid = str(uuid.uuid4())
# details of the new portfolio to be created, created here with the minimum set of mandatory fields
request = models.CreateTransactionPortfolioRequest(
# descriptive name for the portfolio
display_name="portfolio-{0}".format(guid),
# unique portfolio code, portfolio codes must be unique across scopes
code="id-{0}".format(guid),
base_currency="GBP")
# create the portfolio in LUSID in the specified scope
result = self.transaction_portfolios_api.create_portfolio(
scope=TestDataUtilities.tutorials_scope,
create_transaction_portfolio_request=request)
self.assertEqual(result.id.code, request.code)
@lusid_feature("F9")
def test_create_portfolio_with_properties(self):
guid = str(uuid.uuid4())
property_name = "fund-style-{0}".format(guid)
data_type_id = models.ResourceId("system", "string")
# property definition
property_definition = models.CreatePropertyDefinitionRequest(
domain="Portfolio",
scope=TestDataUtilities.tutorials_scope,
code=property_name,
value_required=False,
display_name="Fund Style",
life_time="Perpetual",
data_type_id=data_type_id
)
# create the property definition
property_definition_result = self.property_definitions_api.create_property_definition(
create_property_definition_request=property_definition)
# property value
property_value = "Active"
portfolio_property = models.ModelProperty(key=property_definition_result.key,
value=models.PropertyValue(label_value=property_value))
# details of the portfolio to be created
request = models.CreateTransactionPortfolioRequest(display_name="portfolio-{0}".format(guid),
code="id-{0}".format(guid),
base_currency="GBP",
# set the property value when creating the portfolio
properties={
property_definition_result.key: portfolio_property
})
# create the portfolio
portfolio = self.transaction_portfolios_api.create_portfolio(
scope=TestDataUtilities.tutorials_scope,
create_transaction_portfolio_request=request)
portfolio_code = portfolio.id.code
self.assertEqual(portfolio_code, request.code)
portfolio_properties = self.portfolios_api.get_portfolio_properties(TestDataUtilities.tutorials_scope,
portfolio_code)
self.assertEqual(len(portfolio_properties.properties), 1)
self.assertEqual(portfolio_properties.properties[property_definition_result.key].value.label_value, property_value)
@lusid_feature("F10")
def test_add_transaction_to_portfolio(self):
# effective date of the portfolio, this is the date the portfolio was created and became live. All dates/times
# must be supplied in UTC
effective_date = datetime(2018, 1, 1, tzinfo=pytz.utc)
# create the portfolio
portfolio_id = self.test_data_utilities.create_transaction_portfolio(TestDataUtilities.tutorials_scope)
# details of the transaction to be added
transaction = models.TransactionRequest(
# unique transaction id
transaction_id=str(uuid.uuid4()),
# transaction type, configured during system setup
type="Buy",
instrument_identifiers={TestDataUtilities.lusid_luid_identifier: self.instrument_ids[0]},
transaction_date=effective_date,
settlement_date=effective_date,
units=100,
transaction_price=models.TransactionPrice(12.3),
total_consideration=models.CurrencyAndAmount(1230, "GBP"),
source="Client"
)
# add the transaction
self.transaction_portfolios_api.upsert_transactions(
TestDataUtilities.tutorials_scope,
portfolio_id,
transaction_request=[transaction])
# get the trades
trades = self.transaction_portfolios_api.get_transactions(TestDataUtilities.tutorials_scope, portfolio_id)
self.assertEqual(len(trades.values), 1)
self.assertEqual(trades.values[0].transaction_id, transaction.transaction_id)
@lusid_feature("F11")
def test_add_transaction_to_portfolio_with_property(self):
guid = str(uuid.uuid4())
property_name = "traderId-{0}".format(guid)
# details of the property to be created
property_definition = models.CreatePropertyDefinitionRequest(
# The domain the property is to be applied to
domain="Transaction",
# the scope the property will be created in
scope=TestDataUtilities.tutorials_scope,
life_time="Perpetual",
# when the property value is set it will be valid forever and cannot be changed.
# properties whose values can change over time should be created with LifeTimeEnum.TIMEVARIANT
code=property_name,
value_required=False,
display_name="<NAME>",
data_type_id=models.ResourceId("system", "string")
)
# create the property definition
property_definition_result = self.property_definitions_api.create_property_definition(
create_property_definition_request=property_definition)
# effective date for which portfolio is created
effective_date = datetime(2018, 1, 1, tzinfo=pytz.utc)
# create the portfolio
portfolio_id = self.test_data_utilities.create_transaction_portfolio(TestDataUtilities.tutorials_scope)
property_value_as_string = "A Trader"
property_value = models.PropertyValue(property_value_as_string)
# details of the transaction to be added
transaction = models.TransactionRequest(
transaction_id=str(uuid.uuid4()),
type="Buy",
instrument_identifiers={TestDataUtilities.lusid_luid_identifier: self.instrument_ids[0]},
transaction_date=effective_date,
settlement_date=effective_date,
units=100,
transaction_price=models.TransactionPrice(12.3),
total_consideration=models.CurrencyAndAmount(1230, "GBP"),
source="Client",
# add the property to the transaction
properties={property_definition_result.key: models.PerpetualProperty(property_definition_result.key, property_value)}
)
# add the transaction
self.transaction_portfolios_api.upsert_transactions(
TestDataUtilities.tutorials_scope,
portfolio_id,
transaction_request=[transaction])
# get the trades
trades = self.transaction_portfolios_api.get_transactions(TestDataUtilities.tutorials_scope, portfolio_id)
self.assertEqual(len(trades.values), 1)
self.assertEqual(trades.values[0].transaction_id, transaction.transaction_id)
self.assertEqual(trades.values[0].properties[property_definition_result.key].value.label_value, property_value_as_string)
@lusid_feature("F12")
def test_list_scopes(self):
# Get the list of scopes across all entities
scopes = self.scopes_api.list_scopes()
self.assertGreater(len(scopes.values), 0)
@lusid_feature("F13")
def test_list_portfolios(self):
# This defines the scope that the portfolios will be retrieved from
scope = TestDataUtilities.tutorials_scope + str(uuid.uuid4())
for _ in range(10):
self.test_data_utilities.create_transaction_portfolio(scope)
# Retrieve the list of portfolios
portfolios = self.portfolios_api.list_portfolios_for_scope(scope)
self.assertEqual(len(portfolios.values), 10)
|
[
"lusid.models.TransactionPrice",
"uuid.uuid4",
"lusid.models.PerpetualProperty",
"lusid.models.ResourceId",
"utilities.InstrumentLoader",
"lusid.TransactionPortfoliosApi",
"lusid.ScopesApi",
"utilities.TestDataUtilities",
"lusid.models.CreatePropertyDefinitionRequest",
"datetime.datetime",
"lusid.PropertyDefinitionsApi",
"lusid.models.CurrencyAndAmount",
"utilities.TestDataUtilities.api_client",
"lusid.models.PropertyValue",
"lusidfeature.lusid_feature",
"lusid.InstrumentsApi",
"lusid.PortfoliosApi"
] |
[((974, 993), 'lusidfeature.lusid_feature', 'lusid_feature', (['"""F8"""'], {}), "('F8')\n", (987, 993), False, 'from lusidfeature import lusid_feature\n'), ((1798, 1817), 'lusidfeature.lusid_feature', 'lusid_feature', (['"""F9"""'], {}), "('F9')\n", (1811, 1817), False, 'from lusidfeature import lusid_feature\n'), ((4249, 4269), 'lusidfeature.lusid_feature', 'lusid_feature', (['"""F10"""'], {}), "('F10')\n", (4262, 4269), False, 'from lusidfeature import lusid_feature\n'), ((5834, 5854), 'lusidfeature.lusid_feature', 'lusid_feature', (['"""F11"""'], {}), "('F11')\n", (5847, 5854), False, 'from lusidfeature import lusid_feature\n'), ((8693, 8713), 'lusidfeature.lusid_feature', 'lusid_feature', (['"""F12"""'], {}), "('F12')\n", (8706, 8713), False, 'from lusidfeature import lusid_feature\n'), ((8903, 8923), 'lusidfeature.lusid_feature', 'lusid_feature', (['"""F13"""'], {}), "('F13')\n", (8916, 8923), False, 'from lusidfeature import lusid_feature\n'), ((377, 407), 'utilities.TestDataUtilities.api_client', 'TestDataUtilities.api_client', ([], {}), '()\n', (405, 407), False, 'from utilities import TestDataUtilities\n'), ((434, 461), 'lusid.ScopesApi', 'lusid.ScopesApi', (['api_client'], {}), '(api_client)\n', (449, 461), False, 'import lusid\n'), ((491, 522), 'lusid.PortfoliosApi', 'lusid.PortfoliosApi', (['api_client'], {}), '(api_client)\n', (510, 522), False, 'import lusid\n'), ((564, 606), 'lusid.TransactionPortfoliosApi', 'lusid.TransactionPortfoliosApi', (['api_client'], {}), '(api_client)\n', (594, 606), False, 'import lusid\n'), ((646, 686), 'lusid.PropertyDefinitionsApi', 'lusid.PropertyDefinitionsApi', (['api_client'], {}), '(api_client)\n', (674, 686), False, 'import lusid\n'), ((717, 749), 'lusid.InstrumentsApi', 'lusid.InstrumentsApi', (['api_client'], {}), '(api_client)\n', (737, 749), False, 'import lusid\n'), ((779, 816), 'utilities.InstrumentLoader', 'InstrumentLoader', (['cls.instruments_api'], {}), '(cls.instruments_api)\n', (795, 816), False, 'from utilities import InstrumentLoader\n'), ((918, 967), 'utilities.TestDataUtilities', 'TestDataUtilities', (['cls.transaction_portfolios_api'], {}), '(cls.transaction_portfolios_api)\n', (935, 967), False, 'from utilities import TestDataUtilities\n'), ((1981, 2018), 'lusid.models.ResourceId', 'models.ResourceId', (['"""system"""', '"""string"""'], {}), "('system', 'string')\n", (1998, 2018), True, 'import lusid.models as models\n'), ((2082, 2315), 'lusid.models.CreatePropertyDefinitionRequest', 'models.CreatePropertyDefinitionRequest', ([], {'domain': '"""Portfolio"""', 'scope': 'TestDataUtilities.tutorials_scope', 'code': 'property_name', 'value_required': '(False)', 'display_name': '"""Fund Style"""', 'life_time': '"""Perpetual"""', 'data_type_id': 'data_type_id'}), "(domain='Portfolio', scope=\n TestDataUtilities.tutorials_scope, code=property_name, value_required=\n False, display_name='Fund Style', life_time='Perpetual', data_type_id=\n data_type_id)\n", (2120, 2315), True, 'import lusid.models as models\n'), ((4498, 4535), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)'], {'tzinfo': 'pytz.utc'}), '(2018, 1, 1, tzinfo=pytz.utc)\n', (4506, 4535), False, 'from datetime import datetime\n'), ((7027, 7064), 'datetime.datetime', 'datetime', (['(2018)', '(1)', '(1)'], {'tzinfo': 'pytz.utc'}), '(2018, 1, 1, tzinfo=pytz.utc)\n', (7035, 7064), False, 'from datetime import datetime\n'), ((7281, 7327), 'lusid.models.PropertyValue', 'models.PropertyValue', (['property_value_as_string'], {}), '(property_value_as_string)\n', (7301, 7327), True, 'import lusid.models as models\n'), ((1050, 1062), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1060, 1062), False, 'import uuid\n'), ((1890, 1902), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1900, 1902), False, 'import uuid\n'), ((5937, 5949), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5947, 5949), False, 'import uuid\n'), ((2805, 2853), 'lusid.models.PropertyValue', 'models.PropertyValue', ([], {'label_value': 'property_value'}), '(label_value=property_value)\n', (2825, 2853), True, 'import lusid.models as models\n'), ((5196, 5225), 'lusid.models.TransactionPrice', 'models.TransactionPrice', (['(12.3)'], {}), '(12.3)\n', (5219, 5225), True, 'import lusid.models as models\n'), ((5259, 5296), 'lusid.models.CurrencyAndAmount', 'models.CurrencyAndAmount', (['(1230)', '"""GBP"""'], {}), "(1230, 'GBP')\n", (5283, 5296), True, 'import lusid.models as models\n'), ((6690, 6727), 'lusid.models.ResourceId', 'models.ResourceId', (['"""system"""', '"""string"""'], {}), "('system', 'string')\n", (6707, 6727), True, 'import lusid.models as models\n'), ((7743, 7772), 'lusid.models.TransactionPrice', 'models.TransactionPrice', (['(12.3)'], {}), '(12.3)\n', (7766, 7772), True, 'import lusid.models as models\n'), ((7806, 7843), 'lusid.models.CurrencyAndAmount', 'models.CurrencyAndAmount', (['(1230)', '"""GBP"""'], {}), "(1230, 'GBP')\n", (7830, 7843), True, 'import lusid.models as models\n'), ((9092, 9104), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (9102, 9104), False, 'import uuid\n'), ((4849, 4861), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4859, 4861), False, 'import uuid\n'), ((7460, 7472), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (7470, 7472), False, 'import uuid\n'), ((7981, 8053), 'lusid.models.PerpetualProperty', 'models.PerpetualProperty', (['property_definition_result.key', 'property_value'], {}), '(property_definition_result.key, property_value)\n', (8005, 8053), True, 'import lusid.models as models\n')]
|
from sqlalchemy import testing
from sqlalchemy.testing import (
fixtures, eq_, assert_raises, assert_raises_message, AssertsCompiledSQL)
from sqlalchemy import (
exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func,
and_, asc, desc, inspect, literal_column, cast, exists)
from sqlalchemy.orm import (
configure_mappers, Session, mapper, create_session, relationship,
column_property, joinedload_all, contains_eager, contains_alias,
joinedload, clear_mappers, backref, relation, aliased)
from sqlalchemy.sql import table, column
from sqlalchemy.engine import default
import sqlalchemy as sa
from sqlalchemy.testing.schema import Column
from test.orm import _fixtures
from sqlalchemy.orm.util import join
class QueryTest(_fixtures.FixtureTest):
run_setup_mappers = 'once'
run_inserts = 'once'
run_deletes = None
@classmethod
def setup_mappers(cls):
Node, composite_pk_table, users, Keyword, items, Dingaling, \
order_items, item_keywords, Item, User, dingalings, \
Address, keywords, CompositePk, nodes, Order, orders, \
addresses = cls.classes.Node, \
cls.tables.composite_pk_table, cls.tables.users, \
cls.classes.Keyword, cls.tables.items, \
cls.classes.Dingaling, cls.tables.order_items, \
cls.tables.item_keywords, cls.classes.Item, \
cls.classes.User, cls.tables.dingalings, \
cls.classes.Address, cls.tables.keywords, \
cls.classes.CompositePk, cls.tables.nodes, \
cls.classes.Order, cls.tables.orders, cls.tables.addresses
mapper(
User, users, properties={
'addresses': relationship(
Address, backref='user', order_by=addresses.c.id),
'orders': relationship(
Order, backref='user', order_by=orders.c.id), # o2m, m2o
})
mapper(
Address, addresses, properties={
'dingaling': relationship(
Dingaling, uselist=False, backref="address") # o2o
})
mapper(Dingaling, dingalings)
mapper(
Order, orders, properties={
'items': relationship(
Item, secondary=order_items, order_by=items.c.id), # m2m
'address': relationship(Address), # m2o
})
mapper(
Item, items, properties={
'keywords': relationship(
Keyword, secondary=item_keywords)}) # m2m
mapper(Keyword, keywords)
mapper(
Node, nodes, properties={
'children': relationship(
Node, backref=backref('parent', remote_side=[nodes.c.id]))
})
mapper(CompositePk, composite_pk_table)
configure_mappers()
class QueryCorrelatesLikeSelect(QueryTest, AssertsCompiledSQL):
query_correlated = "SELECT users.name AS users_name, " \
"(SELECT count(addresses.id) AS count_1 FROM addresses " \
"WHERE addresses.user_id = users.id) AS anon_1 FROM users"
query_not_correlated = "SELECT users.name AS users_name, " \
"(SELECT count(addresses.id) AS count_1 FROM addresses, users " \
"WHERE addresses.user_id = users.id) AS anon_1 FROM users"
def test_as_scalar_select_auto_correlate(self):
addresses, users = self.tables.addresses, self.tables.users
query = select(
[func.count(addresses.c.id)],
addresses.c.user_id == users.c.id).as_scalar()
query = select([users.c.name.label('users_name'), query])
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect())
def test_as_scalar_select_explicit_correlate(self):
addresses, users = self.tables.addresses, self.tables.users
query = select(
[func.count(addresses.c.id)],
addresses.c.user_id == users.c.id).correlate(users).as_scalar()
query = select([users.c.name.label('users_name'), query])
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect())
def test_as_scalar_select_correlate_off(self):
addresses, users = self.tables.addresses, self.tables.users
query = select(
[func.count(addresses.c.id)],
addresses.c.user_id == users.c.id).correlate(None).as_scalar()
query = select([users.c.name.label('users_name'), query])
self.assert_compile(
query, self.query_not_correlated, dialect=default.DefaultDialect())
def test_as_scalar_query_auto_correlate(self):
sess = create_session()
Address, User = self.classes.Address, self.classes.User
query = sess.query(func.count(Address.id))\
.filter(Address.user_id == User.id)\
.as_scalar()
query = sess.query(User.name, query)
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect())
def test_as_scalar_query_explicit_correlate(self):
sess = create_session()
Address, User = self.classes.Address, self.classes.User
query = sess.query(func.count(Address.id)). \
filter(Address.user_id == User.id). \
correlate(self.tables.users).as_scalar()
query = sess.query(User.name, query)
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect())
def test_as_scalar_query_correlate_off(self):
sess = create_session()
Address, User = self.classes.Address, self.classes.User
query = sess.query(func.count(Address.id)). \
filter(Address.user_id == User.id).correlate(None).as_scalar()
query = sess.query(User.name, query)
self.assert_compile(
query, self.query_not_correlated, dialect=default.DefaultDialect())
class RawSelectTest(QueryTest, AssertsCompiledSQL):
"""compare a bunch of select() tests with the equivalent Query using
straight table/columns.
Results should be the same as Query should act as a select() pass-
thru for ClauseElement entities.
"""
__dialect__ = 'default'
def test_select(self):
addresses, users = self.tables.addresses, self.tables.users
sess = create_session()
self.assert_compile(
sess.query(users).select_entity_from(users.select()).
with_labels().statement,
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users, "
"(SELECT users.id AS id, users.name AS name FROM users) AS anon_1",
)
self.assert_compile(
sess.query(users, exists([1], from_obj=addresses)).
with_labels().statement,
"SELECT users.id AS users_id, users.name AS users_name, EXISTS "
"(SELECT 1 FROM addresses) AS anon_1 FROM users",
)
# a little tedious here, adding labels to work around Query's
# auto-labelling.
s = sess.query(
addresses.c.id.label('id'),
addresses.c.email_address.label('email')).\
filter(addresses.c.user_id == users.c.id).correlate(users).\
statement.alias()
self.assert_compile(
sess.query(users, s.c.email).select_entity_from(
users.join(s, s.c.id == users.c.id)
).with_labels().statement,
"SELECT users.id AS users_id, users.name AS users_name, "
"anon_1.email AS anon_1_email "
"FROM users JOIN (SELECT addresses.id AS id, "
"addresses.email_address AS email FROM addresses, users "
"WHERE addresses.user_id = users.id) AS anon_1 "
"ON anon_1.id = users.id",)
x = func.lala(users.c.id).label('foo')
self.assert_compile(sess.query(x).filter(x == 5).statement,
"SELECT lala(users.id) AS foo FROM users WHERE "
"lala(users.id) = :param_1")
self.assert_compile(sess.query(func.sum(x).label('bar')).statement,
"SELECT sum(lala(users.id)) AS bar FROM users")
class FromSelfTest(QueryTest, AssertsCompiledSQL):
__dialect__ = 'default'
def test_filter(self):
User = self.classes.User
eq_(
[User(id=8), User(id=9)],
create_session().query(User).filter(User.id.in_([8, 9])).
from_self().all())
eq_(
[User(id=8), User(id=9)],
create_session().query(User).order_by(User.id).slice(1, 3).
from_self().all())
eq_(
[User(id=8)],
list(
create_session().query(User).filter(User.id.in_([8, 9])).
from_self().order_by(User.id)[0:1]))
def test_join(self):
User, Address = self.classes.User, self.classes.Address
eq_(
[
(User(id=8), Address(id=2)),
(User(id=8), Address(id=3)),
(User(id=8), Address(id=4)),
(User(id=9), Address(id=5))],
create_session().query(User).filter(User.id.in_([8, 9])).
from_self().join('addresses').add_entity(Address).
order_by(User.id, Address.id).all()
)
def test_group_by(self):
Address = self.classes.Address
eq_(
create_session().
query(Address.user_id, func.count(Address.id).label('count')).
group_by(Address.user_id).order_by(Address.user_id).all(),
[(7, 1), (8, 3), (9, 1)]
)
eq_(
create_session().query(Address.user_id, Address.id).
from_self(Address.user_id, func.count(Address.id)).
group_by(Address.user_id).order_by(Address.user_id).all(),
[(7, 1), (8, 3), (9, 1)]
)
def test_having(self):
User = self.classes.User
s = create_session()
self.assert_compile(
s.query(User.id).group_by(User.id).having(User.id > 5).
from_self(),
"SELECT anon_1.users_id AS anon_1_users_id FROM "
"(SELECT users.id AS users_id FROM users GROUP "
"BY users.id HAVING users.id > :id_1) AS anon_1"
)
def test_no_joinedload(self):
"""test that joinedloads are pushed outwards and not rendered in
subqueries."""
User = self.classes.User
s = create_session()
self.assert_compile(
s.query(User).options(joinedload(User.addresses)).
from_self().statement,
"SELECT anon_1.users_id, anon_1.users_name, addresses_1.id, "
"addresses_1.user_id, addresses_1.email_address FROM "
"(SELECT users.id AS users_id, users.name AS "
"users_name FROM users) AS anon_1 LEFT OUTER JOIN "
"addresses AS addresses_1 ON anon_1.users_id = "
"addresses_1.user_id ORDER BY addresses_1.id"
)
def test_aliases(self):
"""test that aliased objects are accessible externally to a from_self()
call."""
User, Address = self.classes.User, self.classes.Address
s = create_session()
ualias = aliased(User)
eq_(
s.query(User, ualias).filter(User.id > ualias.id).
from_self(User.name, ualias.name).
order_by(User.name, ualias.name).all(),
[
('chuck', 'ed'),
('chuck', 'fred'),
('chuck', 'jack'),
('ed', 'jack'),
('fred', 'ed'),
('fred', 'jack')
]
)
eq_(
s.query(User, ualias).filter(User.id > ualias.id).
from_self(User.name, ualias.name).filter(ualias.name == 'ed').
order_by(User.name, ualias.name).all(),
[('chuck', 'ed'), ('fred', 'ed')])
eq_(
s.query(User, ualias).filter(User.id > ualias.id).
from_self(ualias.name, Address.email_address).
join(ualias.addresses).
order_by(ualias.name, Address.email_address).all(),
[
('ed', '<EMAIL>'),
('jack', '<EMAIL>'),
('jack', '<EMAIL>'),
('jack', '<EMAIL>'),
('jack', '<EMAIL>')])
def test_multiple_entities(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
eq_(
sess.query(User, Address).
filter(User.id == Address.user_id).
filter(Address.id.in_([2, 5])).from_self().all(),
[
(User(id=8), Address(id=2)),
(User(id=9), Address(id=5))])
eq_(
sess.query(User, Address).filter(User.id == Address.user_id).
filter(Address.id.in_([2, 5])).from_self().
options(joinedload('addresses')).first(),
(
User(
id=8, addresses=[Address(), Address(), Address()]),
Address(id=2)),)
def test_multiple_with_column_entities(self):
User = self.classes.User
sess = create_session()
eq_(
sess.query(User.id).from_self().
add_column(func.count().label('foo')).group_by(User.id).
order_by(User.id).from_self().all(), [
(7, 1), (8, 1), (9, 1), (10, 1)])
class ColumnAccessTest(QueryTest, AssertsCompiledSQL):
"""test access of columns after _from_selectable has been applied"""
__dialect__ = 'default'
def test_from_self(self):
User = self.classes.User
sess = create_session()
q = sess.query(User).from_self()
self.assert_compile(
q.filter(User.name == 'ed'),
"SELECT anon_1.users_id AS anon_1_users_id, anon_1.users_name AS "
"anon_1_users_name FROM (SELECT users.id AS users_id, users.name "
"AS users_name FROM users) AS anon_1 WHERE anon_1.users_name = "
":name_1"
)
def test_from_self_twice(self):
User = self.classes.User
sess = create_session()
q = sess.query(User).from_self(User.id, User.name).from_self()
self.assert_compile(
q.filter(User.name == 'ed'),
"SELECT anon_1.anon_2_users_id AS anon_1_anon_2_users_id, "
"anon_1.anon_2_users_name AS anon_1_anon_2_users_name FROM "
"(SELECT anon_2.users_id AS anon_2_users_id, anon_2.users_name "
"AS anon_2_users_name FROM (SELECT users.id AS users_id, "
"users.name AS users_name FROM users) AS anon_2) AS anon_1 "
"WHERE anon_1.anon_2_users_name = :name_1"
)
def test_select_entity_from(self):
User = self.classes.User
sess = create_session()
q = sess.query(User)
q = sess.query(User).select_entity_from(q.statement)
self.assert_compile(
q.filter(User.name == 'ed'),
"SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name "
"FROM (SELECT users.id AS id, users.name AS name FROM "
"users) AS anon_1 WHERE anon_1.name = :name_1"
)
def test_select_entity_from_no_entities(self):
User = self.classes.User
sess = create_session()
assert_raises_message(
sa.exc.ArgumentError,
r"A selectable \(FromClause\) instance is "
"expected when the base alias is being set",
sess.query(User).select_entity_from, User)
def test_select_from_no_aliasing(self):
User = self.classes.User
sess = create_session()
q = sess.query(User)
q = sess.query(User).select_from(q.statement)
self.assert_compile(
q.filter(User.name == 'ed'),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users, (SELECT users.id AS id, users.name AS name FROM "
"users) AS anon_1 WHERE users.name = :name_1"
)
def test_anonymous_expression(self):
from sqlalchemy.sql import column
sess = create_session()
c1, c2 = column('c1'), column('c2')
q1 = sess.query(c1, c2).filter(c1 == 'dog')
q2 = sess.query(c1, c2).filter(c1 == 'cat')
q3 = q1.union(q2)
self.assert_compile(
q3.order_by(c1),
"SELECT anon_1.c1 AS anon_1_c1, anon_1.c2 "
"AS anon_1_c2 FROM (SELECT c1 AS c1, c2 AS c2 WHERE "
"c1 = :c1_1 UNION SELECT c1 AS c1, c2 AS c2 "
"WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.c1"
)
def test_anonymous_expression_from_self_twice(self):
from sqlalchemy.sql import column
sess = create_session()
c1, c2 = column('c1'), column('c2')
q1 = sess.query(c1, c2).filter(c1 == 'dog')
q1 = q1.from_self().from_self()
self.assert_compile(
q1.order_by(c1),
"SELECT anon_1.anon_2_c1 AS anon_1_anon_2_c1, anon_1.anon_2_c2 AS "
"anon_1_anon_2_c2 FROM (SELECT anon_2.c1 AS anon_2_c1, anon_2.c2 "
"AS anon_2_c2 "
"FROM (SELECT c1 AS c1, c2 AS c2 WHERE c1 = :c1_1) AS "
"anon_2) AS anon_1 ORDER BY anon_1.anon_2_c1"
)
def test_anonymous_expression_union(self):
from sqlalchemy.sql import column
sess = create_session()
c1, c2 = column('c1'), column('c2')
q1 = sess.query(c1, c2).filter(c1 == 'dog')
q2 = sess.query(c1, c2).filter(c1 == 'cat')
q3 = q1.union(q2)
self.assert_compile(
q3.order_by(c1),
"SELECT anon_1.c1 AS anon_1_c1, anon_1.c2 "
"AS anon_1_c2 FROM (SELECT c1 AS c1, c2 AS c2 WHERE "
"c1 = :c1_1 UNION SELECT c1 AS c1, c2 AS c2 "
"WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.c1"
)
def test_table_anonymous_expression_from_self_twice(self):
from sqlalchemy.sql import column
sess = create_session()
t1 = table('t1', column('c1'), column('c2'))
q1 = sess.query(t1.c.c1, t1.c.c2).filter(t1.c.c1 == 'dog')
q1 = q1.from_self().from_self()
self.assert_compile(
q1.order_by(t1.c.c1),
"SELECT anon_1.anon_2_t1_c1 "
"AS anon_1_anon_2_t1_c1, anon_1.anon_2_t1_c2 "
"AS anon_1_anon_2_t1_c2 "
"FROM (SELECT anon_2.t1_c1 AS anon_2_t1_c1, "
"anon_2.t1_c2 AS anon_2_t1_c2 FROM (SELECT t1.c1 AS t1_c1, t1.c2 "
"AS t1_c2 FROM t1 WHERE t1.c1 = :c1_1) AS anon_2) AS anon_1 "
"ORDER BY anon_1.anon_2_t1_c1"
)
def test_anonymous_labeled_expression(self):
sess = create_session()
c1, c2 = column('c1'), column('c2')
q1 = sess.query(c1.label('foo'), c2.label('bar')).filter(c1 == 'dog')
q2 = sess.query(c1.label('foo'), c2.label('bar')).filter(c1 == 'cat')
q3 = q1.union(q2)
self.assert_compile(
q3.order_by(c1),
"SELECT anon_1.foo AS anon_1_foo, anon_1.bar AS anon_1_bar FROM "
"(SELECT c1 AS foo, c2 AS bar WHERE c1 = :c1_1 UNION SELECT "
"c1 AS foo, c2 AS bar "
"WHERE c1 = :c1_2) AS anon_1 ORDER BY anon_1.foo")
def test_anonymous_expression_plus_aliased_join(self):
"""test that the 'dont alias non-ORM' rule remains for other
kinds of aliasing when _from_selectable() is used."""
User = self.classes.User
Address = self.classes.Address
addresses = self.tables.addresses
sess = create_session()
q1 = sess.query(User.id).filter(User.id > 5)
q1 = q1.from_self()
q1 = q1.join(User.addresses, aliased=True).\
order_by(User.id, Address.id, addresses.c.id)
self.assert_compile(
q1,
"SELECT anon_1.users_id AS anon_1_users_id "
"FROM (SELECT users.id AS users_id FROM users "
"WHERE users.id > :id_1) AS anon_1 JOIN addresses AS addresses_1 "
"ON anon_1.users_id = addresses_1.user_id "
"ORDER BY anon_1.users_id, addresses_1.id, addresses.id"
)
class AddEntityEquivalenceTest(fixtures.MappedTest, AssertsCompiledSQL):
run_setup_mappers = 'once'
@classmethod
def define_tables(cls, metadata):
Table(
'a', metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(20)),
Column('bid', Integer, ForeignKey('b.id'))
)
Table(
'b', metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('name', String(50)),
Column('type', String(20))
)
Table(
'c', metadata,
Column('id', Integer, ForeignKey('b.id'), primary_key=True),
Column('age', Integer))
Table(
'd', metadata,
Column('id', Integer, ForeignKey('a.id'), primary_key=True),
Column('dede', Integer))
@classmethod
def setup_classes(cls):
a, c, b, d = (cls.tables.a, cls.tables.c, cls.tables.b, cls.tables.d)
class A(cls.Comparable):
pass
class B(cls.Comparable):
pass
class C(B):
pass
class D(A):
pass
mapper(
A, a, polymorphic_identity='a', polymorphic_on=a.c.type,
with_polymorphic=('*', None), properties={
'link': relation(B, uselist=False, backref='back')})
mapper(
B, b, polymorphic_identity='b', polymorphic_on=b.c.type,
with_polymorphic=('*', None))
mapper(C, c, inherits=B, polymorphic_identity='c')
mapper(D, d, inherits=A, polymorphic_identity='d')
@classmethod
def insert_data(cls):
A, C, B = (cls.classes.A, cls.classes.C, cls.classes.B)
sess = create_session()
sess.add_all(
[
B(name='b1'),
A(name='a1', link=C(name='c1', age=3)),
C(name='c2', age=6),
A(name='a2')])
sess.flush()
def test_add_entity_equivalence(self):
A, C, B = (self.classes.A, self.classes.C, self.classes.B)
sess = create_session()
for q in [
sess.query(A, B).join(A.link),
sess.query(A).join(A.link).add_entity(B),
]:
eq_(
q.all(),
[(
A(bid=2, id=1, name='a1', type='a'),
C(age=3, id=2, name='c1', type='c')
)]
)
for q in [
sess.query(B, A).join(B.back),
sess.query(B).join(B.back).add_entity(A),
sess.query(B).add_entity(A).join(B.back)
]:
eq_(
q.all(),
[(
C(age=3, id=2, name='c1', type='c'),
A(bid=2, id=1, name='a1', type='a')
)]
)
class InstancesTest(QueryTest, AssertsCompiledSQL):
def test_from_alias_one(self):
User, addresses, users = (self.classes.User,
self.tables.addresses,
self.tables.users)
query = users.select(users.c.id == 7).\
union(users.select(users.c.id > 7)).alias('ulist').\
outerjoin(addresses).\
select(use_labels=True, order_by=['ulist.id', addresses.c.id])
sess = create_session()
q = sess.query(User)
def go():
l = list(
q.options(
contains_alias('ulist'), contains_eager('addresses')).
instances(query.execute()))
assert self.static.user_address_result == l
self.assert_sql_count(testing.db, go, 1)
def test_from_alias_two(self):
User, addresses, users = (self.classes.User,
self.tables.addresses,
self.tables.users)
query = users.select(users.c.id == 7).\
union(users.select(users.c.id > 7)).alias('ulist').\
outerjoin(addresses). \
select(use_labels=True, order_by=['ulist.id', addresses.c.id])
sess = create_session()
q = sess.query(User)
def go():
l = q.options(
contains_alias('ulist'), contains_eager('addresses')).\
from_statement(query).all()
assert self.static.user_address_result == l
self.assert_sql_count(testing.db, go, 1)
def test_from_alias_three(self):
User, addresses, users = (self.classes.User,
self.tables.addresses,
self.tables.users)
query = users.select(users.c.id == 7).\
union(users.select(users.c.id > 7)).alias('ulist').\
outerjoin(addresses). \
select(use_labels=True, order_by=['ulist.id', addresses.c.id])
sess = create_session()
# better way. use select_entity_from()
def go():
l = sess.query(User).select_entity_from(query).\
options(contains_eager('addresses')).all()
assert self.static.user_address_result == l
self.assert_sql_count(testing.db, go, 1)
def test_from_alias_four(self):
User, addresses, users = (self.classes.User,
self.tables.addresses,
self.tables.users)
sess = create_session()
# same thing, but alias addresses, so that the adapter
# generated by select_entity_from() is wrapped within
# the adapter created by contains_eager()
adalias = addresses.alias()
query = users.select(users.c.id == 7).\
union(users.select(users.c.id > 7)).\
alias('ulist').outerjoin(adalias).\
select(use_labels=True, order_by=['ulist.id', adalias.c.id])
def go():
l = sess.query(User).select_entity_from(query).\
options(contains_eager('addresses', alias=adalias)).all()
assert self.static.user_address_result == l
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager(self):
users, addresses, User = (self.tables.users,
self.tables.addresses,
self.classes.User)
sess = create_session()
# test that contains_eager suppresses the normal outer join rendering
q = sess.query(User).outerjoin(User.addresses).\
options(contains_eager(User.addresses)).\
order_by(User.id, addresses.c.id)
self.assert_compile(q.with_labels().statement,
'SELECT addresses.id AS addresses_id, '
'addresses.user_id AS addresses_user_id, '
'addresses.email_address AS '
'addresses_email_address, users.id AS '
'users_id, users.name AS users_name FROM '
'users LEFT OUTER JOIN addresses ON '
'users.id = addresses.user_id ORDER BY '
'users.id, addresses.id',
dialect=default.DefaultDialect())
def go():
assert self.static.user_address_result == q.all()
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
adalias = addresses.alias()
q = sess.query(User).\
select_entity_from(users.outerjoin(adalias)).\
options(contains_eager(User.addresses, alias=adalias)).\
order_by(User.id, adalias.c.id)
def go():
eq_(self.static.user_address_result, q.order_by(User.id).all())
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
selectquery = users.outerjoin(addresses). \
select(
users.c.id < 10, use_labels=True,
order_by=[users.c.id, addresses.c.id])
q = sess.query(User)
def go():
l = list(
q.options(contains_eager('addresses')).
instances(selectquery.execute()))
assert self.static.user_address_result[0:3] == l
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
l = list(
q.options(contains_eager(User.addresses)).
instances(selectquery.execute()))
assert self.static.user_address_result[0:3] == l
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
l = q.options(
contains_eager('addresses')).from_statement(selectquery).all()
assert self.static.user_address_result[0:3] == l
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_string_alias(self):
addresses, users, User = (self.tables.addresses,
self.tables.users,
self.classes.User)
sess = create_session()
q = sess.query(User)
adalias = addresses.alias('adalias')
selectquery = users.outerjoin(adalias). \
select(use_labels=True, order_by=[users.c.id, adalias.c.id])
# string alias name
def go():
l = list(
q.options(
contains_eager('addresses', alias="adalias")).
instances(selectquery.execute()))
assert self.static.user_address_result == l
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_aliased_instances(self):
addresses, users, User = (self.tables.addresses,
self.tables.users,
self.classes.User)
sess = create_session()
q = sess.query(User)
adalias = addresses.alias('adalias')
selectquery = users.outerjoin(adalias).\
select(use_labels=True, order_by=[users.c.id, adalias.c.id])
# expression.Alias object
def go():
l = list(
q.options(
contains_eager('addresses', alias=adalias)).
instances(selectquery.execute()))
assert self.static.user_address_result == l
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_aliased(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
q = sess.query(User)
# Aliased object
adalias = aliased(Address)
def go():
l = q.options(
contains_eager('addresses', alias=adalias)
).outerjoin(adalias, User.addresses).\
order_by(User.id, adalias.id)
assert self.static.user_address_result == l.all()
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_multi_string_alias(self):
orders, items, users, order_items, User = (self.tables.orders,
self.tables.items,
self.tables.users,
self.tables.order_items,
self.classes.User)
sess = create_session()
q = sess.query(User)
oalias = orders.alias('o1')
ialias = items.alias('i1')
query = users.outerjoin(oalias).outerjoin(order_items).\
outerjoin(ialias).select(use_labels=True).\
order_by(users.c.id, oalias.c.id, ialias.c.id)
# test using string alias with more than one level deep
def go():
l = list(
q.options(
contains_eager('orders', alias='o1'),
contains_eager('orders.items', alias='i1')
).instances(query.execute()))
assert self.static.user_order_result == l
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_multi_alias(self):
orders, items, users, order_items, User = (self.tables.orders,
self.tables.items,
self.tables.users,
self.tables.order_items,
self.classes.User)
sess = create_session()
q = sess.query(User)
oalias = orders.alias('o1')
ialias = items.alias('i1')
query = users.outerjoin(oalias).outerjoin(order_items).\
outerjoin(ialias).select(use_labels=True).\
order_by(users.c.id, oalias.c.id, ialias.c.id)
# test using Alias with more than one level deep
# new way:
# from sqlalchemy.orm.strategy_options import Load
# opt = Load(User).contains_eager('orders', alias=oalias).
# contains_eager('items', alias=ialias)
def go():
l = list(
q.options(
contains_eager('orders', alias=oalias),
contains_eager('orders.items', alias=ialias)).
instances(query.execute()))
assert self.static.user_order_result == l
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_multi_aliased(self):
Item, User, Order = (
self.classes.Item, self.classes.User, self.classes.Order)
sess = create_session()
q = sess.query(User)
# test using Aliased with more than one level deep
oalias = aliased(Order)
ialias = aliased(Item)
def go():
l = q.options(
contains_eager(User.orders, alias=oalias),
contains_eager(User.orders, Order.items, alias=ialias)).\
outerjoin(oalias, User.orders).\
outerjoin(ialias, oalias.items).\
order_by(User.id, oalias.id, ialias.id)
assert self.static.user_order_result == l.all()
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_chaining(self):
"""test that contains_eager() 'chains' by default."""
Dingaling, User, Address = (self.classes.Dingaling,
self.classes.User,
self.classes.Address)
sess = create_session()
q = sess.query(User).join(User.addresses).join(Address.dingaling).\
options(contains_eager(User.addresses, Address.dingaling),)
def go():
eq_(
q.all(),
# note we only load the Address records that
# have a Dingaling here due to using the inner
# join for the eager load
[
User(name='ed', addresses=[
Address(email_address='<EMAIL>',
dingaling=Dingaling(data='ding 1/2')),
]),
User(name='fred', addresses=[
Address(email_address='<EMAIL>',
dingaling=Dingaling(data='ding 2/5'))
])
]
)
self.assert_sql_count(testing.db, go, 1)
def test_contains_eager_chaining_aliased_endpoint(self):
"""test that contains_eager() 'chains' by default and supports
an alias at the end."""
Dingaling, User, Address = (self.classes.Dingaling,
self.classes.User,
self.classes.Address)
sess = create_session()
da = aliased(Dingaling, name="foob")
q = sess.query(User).join(User.addresses).\
join(da, Address.dingaling).\
options(
contains_eager(User.addresses, Address.dingaling, alias=da),)
def go():
eq_(
q.all(),
# note we only load the Address records that
# have a Dingaling here due to using the inner
# join for the eager load
[
User(name='ed', addresses=[
Address(email_address='<EMAIL>',
dingaling=Dingaling(data='ding 1/2')),
]),
User(name='fred', addresses=[
Address(email_address='<EMAIL>',
dingaling=Dingaling(data='ding 2/5'))
])
]
)
self.assert_sql_count(testing.db, go, 1)
def test_mixed_eager_contains_with_limit(self):
Order, User, Address = (self.classes.Order,
self.classes.User,
self.classes.Address)
sess = create_session()
q = sess.query(User)
def go():
# outerjoin to User.orders, offset 1/limit 2 so we get user
# 7 + second two orders. then joinedload the addresses.
# User + Order columns go into the subquery, address left
# outer joins to the subquery, joinedloader for User.orders
# applies context.adapter to result rows. This was
# [ticket:1180].
l = q.outerjoin(User.orders).options(
joinedload(User.addresses), contains_eager(User.orders)). \
order_by(User.id, Order.id).offset(1).limit(2).all()
eq_(
l, [
User(
id=7,
addresses=[
Address(
email_address='<EMAIL>',
user_id=7, id=1)],
name='jack',
orders=[
Order(
address_id=1, user_id=7, description='order 3',
isopen=1, id=3),
Order(
address_id=None, user_id=7,
description='order 5', isopen=0, id=5)])])
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
# same as above, except Order is aliased, so two adapters
# are applied by the eager loader
oalias = aliased(Order)
l = q.outerjoin(oalias, User.orders).options(
joinedload(User.addresses),
contains_eager(User.orders, alias=oalias)). \
order_by(User.id, oalias.id).\
offset(1).limit(2).all()
eq_(
l,
[
User(
id=7,
addresses=[
Address(
email_address='<EMAIL>',
user_id=7, id=1)],
name='jack',
orders=[
Order(
address_id=1, user_id=7, description='order 3',
isopen=1, id=3),
Order(
address_id=None, user_id=7,
description='order 5', isopen=0, id=5)])])
self.assert_sql_count(testing.db, go, 1)
class MixedEntitiesTest(QueryTest, AssertsCompiledSQL):
__dialect__ = 'default'
def test_values(self):
Address, users, User = (self.classes.Address,
self.tables.users,
self.classes.User)
sess = create_session()
assert list(sess.query(User).values()) == list()
sel = users.select(User.id.in_([7, 8])).alias()
q = sess.query(User)
q2 = q.select_entity_from(sel).values(User.name)
eq_(list(q2), [('jack',), ('ed',)])
q = sess.query(User)
q2 = q.order_by(User.id).\
values(User.name, User.name + " " + cast(User.id, String(50)))
eq_(
list(q2),
[
('jack', 'jack 7'), ('ed', 'ed 8'),
('fred', 'fred 9'), ('chuck', 'chuck 10')]
)
q2 = q.join('addresses').filter(User.name.like('%e%')).\
order_by(User.id, Address.id).\
values(User.name, Address.email_address)
eq_(
list(q2),
[
('ed', '<EMAIL>'), ('ed', '<EMAIL>'),
('ed', '<EMAIL>'), ('fred', '<EMAIL>')])
q2 = q.join('addresses').filter(User.name.like('%e%')).\
order_by(desc(Address.email_address)).\
slice(1, 3).values(User.name, Address.email_address)
eq_(list(q2), [('ed', '<EMAIL>'), ('ed', '<EMAIL>')])
adalias = aliased(Address)
q2 = q.join(adalias, 'addresses'). \
filter(User.name.like('%e%')).order_by(adalias.email_address).\
values(User.name, adalias.email_address)
eq_(list(q2), [('ed', '<EMAIL>'), ('ed', '<EMAIL>'),
('ed', '<EMAIL>'), ('fred', '<EMAIL>')])
q2 = q.values(func.count(User.name))
assert next(q2) == (4,)
q2 = q.select_entity_from(sel).filter(User.id == 8). \
values(User.name, sel.c.name, User.name)
eq_(list(q2), [('ed', 'ed', 'ed')])
# using User.xxx is alised against "sel", so this query returns nothing
q2 = q.select_entity_from(sel).filter(User.id == 8).\
filter(User.id > sel.c.id).values(User.name, sel.c.name, User.name)
eq_(list(q2), [])
# whereas this uses users.c.xxx, is not aliased and creates a new join
q2 = q.select_entity_from(sel).filter(users.c.id == 8).\
filter(users.c.id > sel.c.id). \
values(users.c.name, sel.c.name, User.name)
eq_(list(q2), [('ed', 'jack', 'jack')])
def test_alias_naming(self):
User = self.classes.User
sess = create_session()
ua = aliased(User, name="foobar")
q = sess.query(ua)
self.assert_compile(
q,
"SELECT foobar.id AS foobar_id, "
"foobar.name AS foobar_name FROM users AS foobar"
)
@testing.fails_on('mssql', 'FIXME: unknown')
def test_values_specific_order_by(self):
users, User = self.tables.users, self.classes.User
sess = create_session()
assert list(sess.query(User).values()) == list()
sel = users.select(User.id.in_([7, 8])).alias()
q = sess.query(User)
u2 = aliased(User)
q2 = q.select_entity_from(sel).filter(u2.id > 1).\
order_by(User.id, sel.c.id, u2.id).\
values(User.name, sel.c.name, u2.name)
eq_(
list(q2),
[
('jack', 'jack', 'jack'), ('jack', 'jack', 'ed'),
('jack', 'jack', 'fred'), ('jack', 'jack', 'chuck'),
('ed', 'ed', 'jack'), ('ed', 'ed', 'ed'),
('ed', 'ed', 'fred'), ('ed', 'ed', 'chuck')])
@testing.fails_on('mssql', 'FIXME: unknown')
@testing.fails_on('oracle',
"Oracle doesn't support boolean expressions as "
"columns")
@testing.fails_on('postgresql+pg8000',
"pg8000 parses the SQL itself before passing on "
"to PG, doesn't parse this")
@testing.fails_on('postgresql+zxjdbc',
"zxjdbc parses the SQL itself before passing on "
"to PG, doesn't parse this")
@testing.fails_on("firebird", "unknown")
def test_values_with_boolean_selects(self):
"""Tests a values clause that works with select boolean
evaluations"""
User = self.classes.User
sess = create_session()
q = sess.query(User)
q2 = q.group_by(User.name.like('%j%')).\
order_by(desc(User.name.like('%j%'))).\
values(User.name.like('%j%'), func.count(User.name.like('%j%')))
eq_(list(q2), [(True, 1), (False, 3)])
q2 = q.order_by(desc(User.name.like('%j%'))). \
values(User.name.like('%j%'))
eq_(list(q2), [(True,), (False,), (False,), (False,)])
def test_correlated_subquery(self):
"""test that a subquery constructed from ORM attributes doesn't leak
out those entities to the outermost query."""
Address, users, User = (
self.classes.Address, self.tables.users, self.classes.User)
sess = create_session()
subq = select([func.count()]).where(User.id == Address.user_id).\
correlate(users).label('count')
# we don't want Address to be outside of the subquery here
eq_(
list(sess.query(User, subq)[0:3]),
[
(User(id=7, name='jack'), 1), (User(id=8, name='ed'), 3),
(User(id=9, name='fred'), 1)])
# same thing without the correlate, as it should
# not be needed
subq = select([func.count()]).where(User.id == Address.user_id).\
label('count')
# we don't want Address to be outside of the subquery here
eq_(
list(sess.query(User, subq)[0:3]),
[
(User(id=7, name='jack'), 1), (User(id=8, name='ed'), 3),
(User(id=9, name='fred'), 1)])
def test_column_queries(self):
Address, users, User = (self.classes.Address,
self.tables.users,
self.classes.User)
sess = create_session()
eq_(
sess.query(User.name).all(),
[('jack',), ('ed',), ('fred',), ('chuck',)])
sel = users.select(User.id.in_([7, 8])).alias()
q = sess.query(User.name)
q2 = q.select_entity_from(sel).all()
eq_(list(q2), [('jack',), ('ed',)])
eq_(
sess.query(User.name, Address.email_address).
filter(User.id == Address.user_id).all(),
[
('jack', '<EMAIL>'), ('ed', '<EMAIL>'),
('ed', '<EMAIL>'), ('ed', '<EMAIL>'),
('fred', '<EMAIL>')])
eq_(
sess.query(User.name, func.count(Address.email_address)).
outerjoin(User.addresses).group_by(User.id, User.name).
order_by(User.id).all(),
[('jack', 1), ('ed', 3), ('fred', 1), ('chuck', 0)])
eq_(
sess.query(User, func.count(Address.email_address)).
outerjoin(User.addresses).group_by(User).
order_by(User.id).all(),
[
(User(name='jack', id=7), 1), (User(name='ed', id=8), 3),
(User(name='fred', id=9), 1), (User(name='chuck', id=10), 0)])
eq_(
sess.query(func.count(Address.email_address), User).
outerjoin(User.addresses).group_by(User).
order_by(User.id).all(),
[
(1, User(name='jack', id=7)), (3, User(name='ed', id=8)),
(1, User(name='fred', id=9)), (0, User(name='chuck', id=10))])
adalias = aliased(Address)
eq_(
sess.query(User, func.count(adalias.email_address)).
outerjoin(adalias, 'addresses').group_by(User).
order_by(User.id).all(),
[
(User(name='jack', id=7), 1), (User(name='ed', id=8), 3),
(User(name='fred', id=9), 1), (User(name='chuck', id=10), 0)])
eq_(
sess.query(func.count(adalias.email_address), User).
outerjoin(adalias, User.addresses).group_by(User).
order_by(User.id).all(),
[
(1, User(name='jack', id=7)), (3, User(name='ed', id=8)),
(1, User(name='fred', id=9)), (0, User(name='chuck', id=10))]
)
# select from aliasing + explicit aliasing
eq_(
sess.query(User, adalias.email_address, adalias.id).
outerjoin(adalias, User.addresses).
from_self(User, adalias.email_address).
order_by(User.id, adalias.id).all(),
[
(User(name='jack', id=7), '<EMAIL>'),
(User(name='ed', id=8), '<EMAIL>'),
(User(name='ed', id=8), '<EMAIL>'),
(User(name='ed', id=8), '<EMAIL>'),
(User(name='fred', id=9), '<EMAIL>'),
(User(name='chuck', id=10), None)
]
)
# anon + select from aliasing
eq_(
sess.query(User).join(User.addresses, aliased=True).
filter(Address.email_address.like('%ed%')).
from_self().all(),
[
User(name='ed', id=8),
User(name='fred', id=9),
]
)
# test eager aliasing, with/without select_entity_from aliasing
for q in [
sess.query(User, adalias.email_address).
outerjoin(adalias, User.addresses).
options(joinedload(User.addresses)).
order_by(User.id, adalias.id).limit(10),
sess.query(User, adalias.email_address, adalias.id).
outerjoin(adalias, User.addresses).
from_self(User, adalias.email_address).
options(joinedload(User.addresses)).
order_by(User.id, adalias.id).limit(10),
]:
eq_(
q.all(),
[
(
User(
addresses=[
Address(
user_id=7, email_address='<EMAIL>',
id=1)],
name='jack', id=7),
'<EMAIL>'),
(
User(
addresses=[
Address(
user_id=8, email_address='<EMAIL>',
id=2),
Address(
user_id=8,
email_address='<EMAIL>', id=3),
Address(
user_id=8, email_address='<EMAIL>',
id=4)],
name='ed', id=8),
'<EMAIL>'),
(
User(
addresses=[
Address(
user_id=8, email_address='<EMAIL>',
id=2),
Address(
user_id=8,
email_address='<EMAIL>', id=3),
Address(
user_id=8, email_address='<EMAIL>',
id=4)],
name='ed', id=8),
'<EMAIL>'),
(
User(
addresses=[
Address(
user_id=8, email_address='<EMAIL>',
id=2),
Address(
user_id=8,
email_address='<EMAIL>', id=3),
Address(
user_id=8, email_address='<EMAIL>',
id=4)],
name='ed', id=8),
'<EMAIL>'),
(
User(
addresses=[
Address(
user_id=9, email_address='<EMAIL>',
id=5)],
name='fred', id=9),
'<EMAIL>'),
(User(addresses=[], name='chuck', id=10), None)])
def test_column_from_limited_joinedload(self):
User = self.classes.User
sess = create_session()
def go():
results = sess.query(User).limit(1).\
options(joinedload('addresses')).add_column(User.name).all()
eq_(results, [(User(name='jack'), 'jack')])
self.assert_sql_count(testing.db, go, 1)
@testing.fails_on("firebird", "unknown")
def test_self_referential(self):
Order = self.classes.Order
sess = create_session()
oalias = aliased(Order)
for q in [
sess.query(Order, oalias).filter(Order.user_id == oalias.user_id).
filter(Order.user_id == 7).
filter(Order.id > oalias.id).order_by(Order.id, oalias.id),
sess.query(Order, oalias).from_self().
filter(Order.user_id == oalias.user_id).filter(Order.user_id == 7).
filter(Order.id > oalias.id).order_by(Order.id, oalias.id),
# same thing, but reversed.
sess.query(oalias, Order).from_self().
filter(oalias.user_id == Order.user_id).
filter(oalias.user_id == 7).filter(Order.id < oalias.id).
order_by(oalias.id, Order.id),
# here we go....two layers of aliasing
sess.query(Order, oalias).filter(Order.user_id == oalias.user_id).
filter(Order.user_id == 7).filter(Order.id > oalias.id).
from_self().order_by(Order.id, oalias.id).
limit(10).options(joinedload(Order.items)),
# gratuitous four layers
sess.query(Order, oalias).filter(Order.user_id == oalias.user_id).
filter(Order.user_id == 7).filter(Order.id > oalias.id).
from_self().from_self().from_self().order_by(Order.id, oalias.id).
limit(10).options(joinedload(Order.items)),
]:
eq_(
q.all(),
[
(
Order(
address_id=1, description='order 3', isopen=1,
user_id=7, id=3),
Order(
address_id=1, description='order 1', isopen=0,
user_id=7, id=1)),
(
Order(
address_id=None, description='order 5', isopen=0,
user_id=7, id=5),
Order(
address_id=1, description='order 1', isopen=0,
user_id=7, id=1)),
(
Order(
address_id=None, description='order 5', isopen=0,
user_id=7, id=5),
Order(
address_id=1, description='order 3', isopen=1,
user_id=7, id=3))
]
)
# ensure column expressions are taken from inside the subquery, not
# restated at the top
q = sess.query(
Order.id, Order.description,
literal_column("'q'").label('foo')).\
filter(Order.description == 'order 3').from_self()
self.assert_compile(
q,
"SELECT anon_1.orders_id AS "
"anon_1_orders_id, anon_1.orders_descriptio"
"n AS anon_1_orders_description, "
"anon_1.foo AS anon_1_foo FROM (SELECT "
"orders.id AS orders_id, "
"orders.description AS orders_description, "
"'q' AS foo FROM orders WHERE "
"orders.description = :description_1) AS "
"anon_1")
eq_(
q.all(),
[(3, 'order 3', 'q')]
)
def test_multi_mappers(self):
Address, addresses, users, User = (self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User)
test_session = create_session()
(user7, user8, user9, user10) = test_session.query(User).all()
(address1, address2, address3, address4, address5) = \
test_session.query(Address).all()
expected = [(user7, address1),
(user8, address2),
(user8, address3),
(user8, address4),
(user9, address5),
(user10, None)]
sess = create_session()
selectquery = users.outerjoin(addresses). \
select(use_labels=True, order_by=[users.c.id, addresses.c.id])
eq_(
list(sess.query(User, Address).instances(selectquery.execute())),
expected)
sess.expunge_all()
for address_entity in (Address, aliased(Address)):
q = sess.query(User).add_entity(address_entity).\
outerjoin(address_entity, 'addresses').\
order_by(User.id, address_entity.id)
eq_(q.all(), expected)
sess.expunge_all()
q = sess.query(User).add_entity(address_entity)
q = q.join(address_entity, 'addresses')
q = q.filter_by(email_address='<EMAIL>')
eq_(q.all(), [(user8, address3)])
sess.expunge_all()
q = sess.query(User, address_entity). \
join(address_entity, 'addresses'). \
filter_by(email_address='<EMAIL>')
eq_(q.all(), [(user8, address3)])
sess.expunge_all()
q = sess.query(User, address_entity). \
join(address_entity, 'addresses').\
options(joinedload('addresses')).\
filter_by(email_address='<EMAIL>')
eq_(list(util.OrderedSet(q.all())), [(user8, address3)])
sess.expunge_all()
def test_aliased_multi_mappers(self):
User, addresses, users, Address = (self.classes.User,
self.tables.addresses,
self.tables.users,
self.classes.Address)
sess = create_session()
(user7, user8, user9, user10) = sess.query(User).all()
(address1, address2, address3, address4, address5) = \
sess.query(Address).all()
expected = [(user7, address1),
(user8, address2),
(user8, address3),
(user8, address4),
(user9, address5),
(user10, None)]
q = sess.query(User)
adalias = addresses.alias('adalias')
q = q.add_entity(Address, alias=adalias). \
select_entity_from(users.outerjoin(adalias))
l = q.order_by(User.id, adalias.c.id).all()
assert l == expected
sess.expunge_all()
q = sess.query(User).add_entity(Address, alias=adalias)
l = q.select_entity_from(users.outerjoin(adalias)). \
filter(adalias.c.email_address == '<EMAIL>').all()
assert l == [(user8, address3)]
def test_with_entities(self):
User, Address = self.classes.User, self.classes.Address
sess = create_session()
q = sess.query(User).filter(User.id == 7).order_by(User.name)
self.assert_compile(
q.with_entities(User.id, Address).
filter(Address.user_id == User.id),
'SELECT users.id AS users_id, addresses.id '
'AS addresses_id, addresses.user_id AS '
'addresses_user_id, addresses.email_address'
' AS addresses_email_address FROM users, '
'addresses WHERE users.id = :id_1 AND '
'addresses.user_id = users.id ORDER BY '
'users.name')
def test_multi_columns(self):
users, User = self.tables.users, self.classes.User
sess = create_session()
expected = [(u, u.name) for u in sess.query(User).all()]
for add_col in (User.name, users.c.name):
assert sess.query(User).add_column(add_col).all() == expected
sess.expunge_all()
assert_raises(
sa_exc.InvalidRequestError, sess.query(User).add_column, object())
def test_add_multi_columns(self):
"""test that add_column accepts a FROM clause."""
users, User = self.tables.users, self.classes.User
sess = create_session()
eq_(
sess.query(User.id).add_column(users).all(),
[(7, 7, 'jack'), (8, 8, 'ed'), (9, 9, 'fred'), (10, 10, 'chuck')]
)
def test_multi_columns_2(self):
"""test aliased/nonalised joins with the usage of add_column()"""
User, Address, addresses, users = (self.classes.User,
self.classes.Address,
self.tables.addresses,
self.tables.users)
sess = create_session()
(user7, user8, user9, user10) = sess.query(User).all()
expected = [(user7, 1),
(user8, 3),
(user9, 1),
(user10, 0)
]
q = sess.query(User)
q = q.group_by(users).order_by(User.id).outerjoin('addresses').\
add_column(func.count(Address.id).label('count'))
eq_(q.all(), expected)
sess.expunge_all()
adalias = aliased(Address)
q = sess.query(User)
q = q.group_by(users).order_by(User.id). \
outerjoin(adalias, 'addresses').\
add_column(func.count(adalias.id).label('count'))
eq_(q.all(), expected)
sess.expunge_all()
# TODO: figure out why group_by(users) doesn't work here
s = select([users, func.count(addresses.c.id).label('count')]). \
select_from(users.outerjoin(addresses)). \
group_by(*[c for c in users.c]).order_by(User.id)
q = sess.query(User)
l = q.add_column("count").from_statement(s).all()
assert l == expected
def test_raw_columns(self):
addresses, users, User = (self.tables.addresses,
self.tables.users,
self.classes.User)
sess = create_session()
(user7, user8, user9, user10) = sess.query(User).all()
expected = [
(user7, 1, "Name:jack"),
(user8, 3, "Name:ed"),
(user9, 1, "Name:fred"),
(user10, 0, "Name:chuck")]
adalias = addresses.alias()
q = create_session().query(User).add_column(func.count(adalias.c.id))\
.add_column(("Name:" + users.c.name)).outerjoin(adalias, 'addresses')\
.group_by(users).order_by(users.c.id)
assert q.all() == expected
# test with a straight statement
s = select(
[
users, func.count(addresses.c.id).label('count'),
("Name:" + users.c.name).label('concat')],
from_obj=[users.outerjoin(addresses)],
group_by=[c for c in users.c], order_by=[users.c.id])
q = create_session().query(User)
l = q.add_column("count").add_column("concat").from_statement(s).all()
assert l == expected
sess.expunge_all()
# test with select_entity_from()
q = create_session().query(User).add_column(func.count(addresses.c.id))\
.add_column(("Name:" + users.c.name)).select_entity_from(users.outerjoin(addresses))\
.group_by(users).order_by(users.c.id)
assert q.all() == expected
sess.expunge_all()
q = create_session().query(User).add_column(func.count(addresses.c.id))\
.add_column(("Name:" + users.c.name)).outerjoin('addresses')\
.group_by(users).order_by(users.c.id)
assert q.all() == expected
sess.expunge_all()
q = create_session().query(User).add_column(func.count(adalias.c.id))\
.add_column(("Name:" + users.c.name)).outerjoin(adalias, 'addresses')\
.group_by(users).order_by(users.c.id)
assert q.all() == expected
sess.expunge_all()
def test_expression_selectable_matches_mzero(self):
User, Address = self.classes.User, self.classes.Address
ua = aliased(User)
aa = aliased(Address)
s = create_session()
for crit, j, exp in [
(
User.id + Address.id, User.addresses,
"SELECT users.id + addresses.id AS anon_1 "
"FROM users JOIN addresses ON users.id = "
"addresses.user_id"),
(
User.id + Address.id, Address.user,
"SELECT users.id + addresses.id AS anon_1 "
"FROM addresses JOIN users ON users.id = "
"addresses.user_id"),
(
Address.id + User.id, User.addresses,
"SELECT addresses.id + users.id AS anon_1 "
"FROM users JOIN addresses ON users.id = "
"addresses.user_id"),
(
User.id + aa.id, (aa, User.addresses),
"SELECT users.id + addresses_1.id AS anon_1 "
"FROM users JOIN addresses AS addresses_1 "
"ON users.id = addresses_1.user_id"),
]:
q = s.query(crit)
mzero = q._mapper_zero()
assert mzero.mapped_table is q._entity_zero().selectable
q = q.join(j)
self.assert_compile(q, exp)
for crit, j, exp in [
(
ua.id + Address.id, ua.addresses,
"SELECT users_1.id + addresses.id AS anon_1 "
"FROM users AS users_1 JOIN addresses "
"ON users_1.id = addresses.user_id"),
(
ua.id + aa.id, (aa, ua.addresses),
"SELECT users_1.id + addresses_1.id AS anon_1 "
"FROM users AS users_1 JOIN addresses AS "
"addresses_1 ON users_1.id = addresses_1.user_id"),
(
ua.id + aa.id, (ua, aa.user),
"SELECT users_1.id + addresses_1.id AS anon_1 "
"FROM addresses AS addresses_1 JOIN "
"users AS users_1 "
"ON users_1.id = addresses_1.user_id")
]:
q = s.query(crit)
mzero = q._mapper_zero()
assert inspect(mzero).selectable is q._entity_zero().selectable
q = q.join(j)
self.assert_compile(q, exp)
def test_aliased_adapt_on_names(self):
User, Address = self.classes.User, self.classes.Address
sess = Session()
agg_address = sess.query(
Address.id,
func.sum(func.length(Address.email_address)).
label('email_address')).group_by(Address.user_id)
ag1 = aliased(Address, agg_address.subquery())
ag2 = aliased(Address, agg_address.subquery(), adapt_on_names=True)
# first, without adapt on names, 'email_address' isn't matched up - we
# get the raw "address" element in the SELECT
self.assert_compile(
sess.query(User, ag1.email_address).join(ag1, User.addresses).
filter(ag1.email_address > 5),
"SELECT users.id "
"AS users_id, users.name AS users_name, addresses.email_address "
"AS addresses_email_address FROM addresses, users JOIN "
"(SELECT addresses.id AS id, sum(length(addresses.email_address)) "
"AS email_address FROM addresses GROUP BY addresses.user_id) AS "
"anon_1 ON users.id = addresses.user_id "
"WHERE addresses.email_address > :email_address_1")
# second, 'email_address' matches up to the aggreagte, and we get a
# smooth JOIN from users->subquery and that's it
self.assert_compile(
sess.query(User, ag2.email_address).join(ag2, User.addresses).
filter(ag2.email_address > 5),
"SELECT users.id AS users_id, users.name AS users_name, "
"anon_1.email_address AS anon_1_email_address FROM users "
"JOIN ("
"SELECT addresses.id AS id, sum(length(addresses.email_address)) "
"AS email_address FROM addresses GROUP BY addresses.user_id) AS "
"anon_1 ON users.id = addresses.user_id "
"WHERE anon_1.email_address > :email_address_1",)
class SelectFromTest(QueryTest, AssertsCompiledSQL):
run_setup_mappers = None
__dialect__ = 'default'
def test_replace_with_select(self):
users, Address, addresses, User = (
self.tables.users, self.classes.Address, self.tables.addresses,
self.classes.User)
mapper(
User, users, properties={
'addresses': relationship(Address)})
mapper(Address, addresses)
sel = users.select(users.c.id.in_([7, 8])).alias()
sess = create_session()
eq_(
sess.query(User).select_entity_from(sel).all(),
[User(id=7), User(id=8)])
eq_(
sess.query(User).select_entity_from(sel).
filter(User.id == 8).all(),
[User(id=8)])
eq_(
sess.query(User).select_entity_from(sel).
order_by(desc(User.name)).all(), [
User(name='jack', id=7), User(name='ed', id=8)])
eq_(
sess.query(User).select_entity_from(sel).
order_by(asc(User.name)).all(), [
User(name='ed', id=8), User(name='jack', id=7)])
eq_(
sess.query(User).select_entity_from(sel).
options(joinedload('addresses')).first(),
User(name='jack', addresses=[Address(id=1)]))
def test_join_mapper_order_by(self):
"""test that mapper-level order_by is adapted to a selectable."""
User, users = self.classes.User, self.tables.users
mapper(User, users, order_by=users.c.id)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
eq_(
sess.query(User).select_entity_from(sel).all(),
[
User(name='jack', id=7), User(name='ed', id=8)])
def test_differentiate_self_external(self):
"""test some different combinations of joining a table to a subquery of
itself."""
users, User = self.tables.users, self.classes.User
mapper(User, users)
sess = create_session()
sel = sess.query(User).filter(User.id.in_([7, 8])).subquery()
ualias = aliased(User)
self.assert_compile(
sess.query(User).join(sel, User.id > sel.c.id),
"SELECT users.id AS users_id, users.name AS users_name FROM "
"users JOIN (SELECT users.id AS id, users.name AS name FROM users "
"WHERE users.id IN (:id_1, :id_2)) "
"AS anon_1 ON users.id > anon_1.id",)
self.assert_compile(
sess.query(ualias).select_entity_from(sel).
filter(ualias.id > sel.c.id),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users AS users_1, ("
"SELECT users.id AS id, users.name AS name FROM users "
"WHERE users.id IN (:id_1, :id_2)) AS anon_1 "
"WHERE users_1.id > anon_1.id",)
self.assert_compile(
sess.query(ualias).select_entity_from(sel).
join(ualias, ualias.id > sel.c.id),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM (SELECT users.id AS id, users.name AS name "
"FROM users WHERE users.id IN (:id_1, :id_2)) AS anon_1 "
"JOIN users AS users_1 ON users_1.id > anon_1.id")
self.assert_compile(
sess.query(ualias).select_entity_from(sel).
join(ualias, ualias.id > User.id),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM (SELECT users.id AS id, users.name AS name FROM "
"users WHERE users.id IN (:id_1, :id_2)) AS anon_1 "
"JOIN users AS users_1 ON users_1.id > anon_1.id")
salias = aliased(User, sel)
self.assert_compile(
sess.query(salias).join(ualias, ualias.id > salias.id),
"SELECT anon_1.id AS anon_1_id, anon_1.name AS anon_1_name FROM "
"(SELECT users.id AS id, users.name AS name "
"FROM users WHERE users.id IN (:id_1, :id_2)) AS anon_1 "
"JOIN users AS users_1 ON users_1.id > anon_1.id",)
self.assert_compile(
sess.query(ualias).select_entity_from(
join(sel, ualias, ualias.id > sel.c.id)),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM "
"(SELECT users.id AS id, users.name AS name "
"FROM users WHERE users.id "
"IN (:id_1, :id_2)) AS anon_1 "
"JOIN users AS users_1 ON users_1.id > anon_1.id")
def test_aliased_class_vs_nonaliased(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
ua = aliased(User)
sess = create_session()
self.assert_compile(
sess.query(User).select_from(ua).join(User, ua.name > User.name),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users AS users_1 JOIN users ON users_1.name > users.name"
)
self.assert_compile(
sess.query(User.name).select_from(ua).
join(User, ua.name > User.name),
"SELECT users.name AS users_name FROM users AS users_1 "
"JOIN users ON users_1.name > users.name"
)
self.assert_compile(
sess.query(ua.name).select_from(ua).
join(User, ua.name > User.name),
"SELECT users_1.name AS users_1_name FROM users AS users_1 "
"JOIN users ON users_1.name > users.name"
)
self.assert_compile(
sess.query(ua).select_from(User).join(ua, ua.name > User.name),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users JOIN users AS users_1 ON users_1.name > users.name"
)
self.assert_compile(
sess.query(ua).select_from(User).join(ua, User.name > ua.name),
"SELECT users_1.id AS users_1_id, users_1.name AS users_1_name "
"FROM users JOIN users AS users_1 ON users.name > users_1.name"
)
# this is tested in many other places here, just adding it
# here for comparison
self.assert_compile(
sess.query(User.name).select_entity_from(
users.select().where(users.c.id > 5)),
"SELECT anon_1.name AS anon_1_name FROM (SELECT users.id AS id, "
"users.name AS name FROM users WHERE users.id > :id_1) AS anon_1")
def test_join_no_order_by(self):
User, users = self.classes.User, self.tables.users
mapper(User, users)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
eq_(
sess.query(User).select_entity_from(sel).all(),
[User(name='jack', id=7), User(name='ed', id=8)])
def test_join_relname_from_selected_from(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
mapper(User, users, properties=
{'addresses': relationship(mapper(Address, addresses),
backref='user')})
sess = create_session()
self.assert_compile(
sess.query(User).select_from(Address).join("user"),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM addresses JOIN users ON users.id = addresses.user_id"
)
def test_filter_by_selected_from(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
mapper(User, users, properties=
{'addresses': relationship(mapper(Address, addresses))})
sess = create_session()
self.assert_compile(
sess.query(User).select_from(Address).
filter_by(email_address='ed').join(User),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM addresses JOIN users ON users.id = addresses.user_id "
"WHERE addresses.email_address = :email_address_1"
)
def test_join_ent_selected_from(self):
User, Address = self.classes.User, self.classes.Address
users, addresses = self.tables.users, self.tables.addresses
mapper(User, users, properties=
{'addresses': relationship(mapper(Address, addresses))})
sess = create_session()
self.assert_compile(
sess.query(User).select_from(Address).join(User),
"SELECT users.id AS users_id, users.name AS users_name "
"FROM addresses JOIN users ON users.id = addresses.user_id"
)
def test_join(self):
users, Address, addresses, User = (
self.tables.users, self.classes.Address, self.tables.addresses,
self.classes.User)
mapper(User, users, properties={'addresses': relationship(Address)})
mapper(Address, addresses)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
eq_(
sess.query(User).select_entity_from(sel).join('addresses').
add_entity(Address).order_by(User.id).order_by(Address.id).all(),
[
(
User(name='jack', id=7),
Address(user_id=7, email_address='<EMAIL>', id=1)),
(
User(name='ed', id=8),
Address(user_id=8, email_address='<EMAIL>', id=2)),
(
User(name='ed', id=8),
Address(
user_id=8, email_address='<EMAIL>', id=3)),
(
User(name='ed', id=8),
Address(user_id=8, email_address='<EMAIL>', id=4))])
adalias = aliased(Address)
eq_(
sess.query(User).select_entity_from(sel).
join(adalias, 'addresses').add_entity(adalias).order_by(User.id).
order_by(adalias.id).all(),
[
(
User(name='jack', id=7),
Address(user_id=7, email_address='<EMAIL>', id=1)),
(
User(name='ed', id=8),
Address(user_id=8, email_address='<EMAIL>', id=2)),
(
User(name='ed', id=8),
Address(
user_id=8, email_address='<EMAIL>', id=3)),
(
User(name='ed', id=8),
Address(user_id=8, email_address='<EMAIL>', id=4))])
def test_more_joins(self):
(
users, Keyword, orders, items, order_items, Order, Item, User,
keywords, item_keywords) = \
(
self.tables.users, self.classes.Keyword, self.tables.orders,
self.tables.items, self.tables.order_items, self.classes.Order,
self.classes.Item, self.classes.User, self.tables.keywords,
self.tables.item_keywords)
mapper(
User, users, properties={
'orders': relationship(Order, backref='user')}) # o2m, m2o
mapper(
Order, orders, properties={
'items': relationship(
Item, secondary=order_items, order_by=items.c.id)}) # m2m
mapper(
Item, items, properties={
'keywords': relationship(
Keyword, secondary=item_keywords,
order_by=keywords.c.id)}) # m2m
mapper(Keyword, keywords)
sess = create_session()
sel = users.select(users.c.id.in_([7, 8]))
eq_(
sess.query(User).select_entity_from(sel).
join('orders', 'items', 'keywords').
filter(Keyword.name.in_(['red', 'big', 'round'])).all(),
[User(name='jack', id=7)])
eq_(
sess.query(User).select_entity_from(sel).
join('orders', 'items', 'keywords', aliased=True).
filter(Keyword.name.in_(['red', 'big', 'round'])).all(),
[User(name='jack', id=7)])
def test_very_nested_joins_with_joinedload(self):
(
users, Keyword, orders, items, order_items, Order, Item, User,
keywords, item_keywords) = \
(
self.tables.users, self.classes.Keyword, self.tables.orders,
self.tables.items, self.tables.order_items, self.classes.Order,
self.classes.Item, self.classes.User, self.tables.keywords,
self.tables.item_keywords)
mapper(
User, users, properties={
'orders': relationship(Order, backref='user')}) # o2m, m2o
mapper(
Order, orders, properties={
'items': relationship(
Item, secondary=order_items, order_by=items.c.id)}) # m2m
mapper(
Item, items, properties={
'keywords': relationship(
Keyword, secondary=item_keywords,
order_by=keywords.c.id)}) # m2m
mapper(Keyword, keywords)
sess = create_session()
sel = users.select(users.c.id.in_([7, 8]))
def go():
eq_(
sess.query(User).select_entity_from(sel).
options(joinedload_all('orders.items.keywords')).
join('orders', 'items', 'keywords', aliased=True).
filter(Keyword.name.in_(['red', 'big', 'round'])).
all(),
[
User(name='jack', orders=[
Order(
description='order 1', items=[
Item(
description='item 1', keywords=[
Keyword(name='red'),
Keyword(name='big'),
Keyword(name='round')]),
Item(
description='item 2', keywords=[
Keyword(name='red', id=2),
Keyword(name='small', id=5),
Keyword(name='square')]),
Item(
description='item 3', keywords=[
Keyword(name='green', id=3),
Keyword(name='big', id=4),
Keyword(name='round', id=6)])]),
Order(
description='order 3', items=[
Item(
description='item 3', keywords=[
Keyword(name='green', id=3),
Keyword(name='big', id=4),
Keyword(name='round', id=6)]),
Item(description='item 4', keywords=[], id=4),
Item(
description='item 5', keywords=[], id=5)]),
Order(
description='order 5',
items=[
Item(description='item 5', keywords=[])])])])
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
sel2 = orders.select(orders.c.id.in_([1, 2, 3]))
eq_(
sess.query(Order).select_entity_from(sel2).
join('items', 'keywords').filter(Keyword.name == 'red').
order_by(Order.id).all(),
[
Order(description='order 1', id=1),
Order(description='order 2', id=2)])
eq_(
sess.query(Order).select_entity_from(sel2).
join('items', 'keywords', aliased=True).
filter(Keyword.name == 'red').order_by(Order.id).all(),
[
Order(description='order 1', id=1),
Order(description='order 2', id=2)])
def test_replace_with_eager(self):
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(
User, users, properties={
'addresses': relationship(Address, order_by=addresses.c.id)})
mapper(Address, addresses)
sel = users.select(users.c.id.in_([7, 8]))
sess = create_session()
def go():
eq_(
sess.query(User).options(joinedload('addresses')).
select_entity_from(sel).order_by(User.id).all(),
[
User(id=7, addresses=[Address(id=1)]),
User(
id=8, addresses=[Address(id=2), Address(id=3),
Address(id=4)])])
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(
sess.query(User).options(joinedload('addresses')).
select_entity_from(sel).filter(User.id == 8).order_by(User.id).
all(),
[
User(
id=8, addresses=[Address(id=2), Address(id=3),
Address(id=4)])])
self.assert_sql_count(testing.db, go, 1)
sess.expunge_all()
def go():
eq_(
sess.query(User).options(joinedload('addresses')).
select_entity_from(sel).order_by(User.id)[1],
User(
id=8, addresses=[Address(id=2), Address(id=3),
Address(id=4)]))
self.assert_sql_count(testing.db, go, 1)
class CustomJoinTest(QueryTest):
run_setup_mappers = None
def test_double_same_mappers(self):
"""test aliasing of joins with a custom join condition"""
(
addresses, items, order_items, orders, Item, User, Address, Order,
users) = \
(
self.tables.addresses, self.tables.items,
self.tables.order_items, self.tables.orders, self.classes.Item,
self.classes.User, self.classes.Address, self.classes.Order,
self.tables.users)
mapper(Address, addresses)
mapper(
Order, orders, properties={
'items': relationship(
Item, secondary=order_items, lazy='select',
order_by=items.c.id)})
mapper(Item, items)
mapper(
User, users, properties=dict(
addresses=relationship(Address, lazy='select'),
open_orders=relationship(
Order,
primaryjoin=and_(
orders.c.isopen == 1, users.c.id == orders.c.user_id),
lazy='select'),
closed_orders=relationship(
Order,
primaryjoin=and_(
orders.c.isopen == 0, users.c.id == orders.c.user_id),
lazy='select')))
q = create_session().query(User)
eq_(
q.join('open_orders', 'items', aliased=True).filter(Item.id == 4).
join('closed_orders', 'items', aliased=True).filter(Item.id == 3).
all(),
[User(id=7)]
)
class ExternalColumnsTest(QueryTest):
"""test mappers with SQL-expressions added as column properties."""
run_setup_mappers = None
def test_external_columns_bad(self):
users, User = self.tables.users, self.classes.User
assert_raises_message(
sa_exc.ArgumentError,
"not represented in the mapper's table", mapper, User, users,
properties={
'concat': (users.c.id * 2),
})
clear_mappers()
def test_external_columns(self):
"""test querying mappings that reference external columns or
selectables."""
users, Address, addresses, User = (self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User)
mapper(
User, users, properties={
'concat': column_property((users.c.id * 2)),
'count': column_property(
select(
[func.count(addresses.c.id)],
users.c.id == addresses.c.user_id).correlate(users).
as_scalar())})
mapper(Address, addresses, properties={
'user': relationship(User)
})
sess = create_session()
sess.query(Address).options(joinedload('user')).all()
eq_(
sess.query(User).all(),
[
User(id=7, concat=14, count=1),
User(id=8, concat=16, count=3),
User(id=9, concat=18, count=1),
User(id=10, concat=20, count=0),
])
address_result = [
Address(id=1, user=User(id=7, concat=14, count=1)),
Address(id=2, user=User(id=8, concat=16, count=3)),
Address(id=3, user=User(id=8, concat=16, count=3)),
Address(id=4, user=User(id=8, concat=16, count=3)),
Address(id=5, user=User(id=9, concat=18, count=1))
]
eq_(sess.query(Address).all(), address_result)
# run the eager version twice to test caching of aliased clauses
for x in range(2):
sess.expunge_all()
def go():
eq_(
sess.query(Address).options(joinedload('user')).
order_by(Address.id).all(),
address_result)
self.assert_sql_count(testing.db, go, 1)
ualias = aliased(User)
eq_(
sess.query(Address, ualias).join(ualias, 'user').all(),
[(address, address.user) for address in address_result]
)
eq_(
sess.query(Address, ualias.count).join(ualias, 'user').
join('user', aliased=True).order_by(Address.id).all(),
[
(Address(id=1), 1),
(Address(id=2), 3),
(Address(id=3), 3),
(Address(id=4), 3),
(Address(id=5), 1)
]
)
eq_(
sess.query(Address, ualias.concat, ualias.count).
join(ualias, 'user').
join('user', aliased=True).order_by(Address.id).all(),
[
(Address(id=1), 14, 1),
(Address(id=2), 16, 3),
(Address(id=3), 16, 3),
(Address(id=4), 16, 3),
(Address(id=5), 18, 1)
]
)
ua = aliased(User)
eq_(
sess.query(Address, ua.concat, ua.count).
select_entity_from(join(Address, ua, 'user')).
options(joinedload(Address.user)).order_by(Address.id).all(),
[
(Address(id=1, user=User(id=7, concat=14, count=1)), 14, 1),
(Address(id=2, user=User(id=8, concat=16, count=3)), 16, 3),
(Address(id=3, user=User(id=8, concat=16, count=3)), 16, 3),
(Address(id=4, user=User(id=8, concat=16, count=3)), 16, 3),
(Address(id=5, user=User(id=9, concat=18, count=1)), 18, 1)
])
eq_(
list(
sess.query(Address).join('user').
values(Address.id, User.id, User.concat, User.count)),
[
(1, 7, 14, 1), (2, 8, 16, 3), (3, 8, 16, 3), (4, 8, 16, 3),
(5, 9, 18, 1)])
eq_(
list(
sess.query(Address, ua).
select_entity_from(join(Address, ua, 'user')).
values(Address.id, ua.id, ua.concat, ua.count)),
[
(1, 7, 14, 1), (2, 8, 16, 3), (3, 8, 16, 3), (4, 8, 16, 3),
(5, 9, 18, 1)])
def test_external_columns_joinedload(self):
users, orders, User, Address, Order, addresses = (self.tables.users,
self.tables.orders,
self.classes.User,
self.classes.Address,
self.classes.Order,
self.tables.addresses)
# in this test, we have a subquery on User that accesses "addresses",
# underneath an joinedload for "addresses". So the "addresses" alias
# adapter needs to *not* hit the "addresses" table within the "user"
# subquery, but "user" still needs to be adapted. therefore the long
# standing practice of eager adapters being "chained" has been removed
# since its unnecessary and breaks this exact condition.
mapper(
User, users, properties={
'addresses': relationship(
Address, backref='user', order_by=addresses.c.id),
'concat': column_property((users.c.id * 2)),
'count': column_property(
select(
[func.count(addresses.c.id)],
users.c.id == addresses.c.user_id).correlate(users))})
mapper(Address, addresses)
mapper(
Order, orders, properties={
'address': relationship(Address)}) # m2o
sess = create_session()
def go():
o1 = sess.query(Order).options(joinedload_all('address.user')). \
get(1)
eq_(o1.address.user.count, 1)
self.assert_sql_count(testing.db, go, 1)
sess = create_session()
def go():
o1 = sess.query(Order).options(joinedload_all('address.user')). \
first()
eq_(o1.address.user.count, 1)
self.assert_sql_count(testing.db, go, 1)
def test_external_columns_compound(self):
# see [ticket:2167] for background
users, Address, addresses, User = (
self.tables.users, self.classes.Address, self.tables.addresses,
self.classes.User)
mapper(
User, users, properties={
'fullname': column_property(users.c.name.label('x'))})
mapper(
Address, addresses, properties={
'username': column_property(
select([User.fullname]).
where(User.id == addresses.c.user_id).label('y'))})
sess = create_session()
a1 = sess.query(Address).first()
eq_(a1.username, "jack")
sess = create_session()
a1 = sess.query(Address).from_self().first()
eq_(a1.username, "jack")
class TestOverlyEagerEquivalentCols(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
Table(
'base', metadata,
Column(
'id', Integer, primary_key=True,
test_needs_autoincrement=True),
Column('data', String(50))
)
Table(
'sub1', metadata,
Column('id', Integer, ForeignKey('base.id'), primary_key=True),
Column('data', String(50))
)
Table(
'sub2', metadata,
Column(
'id', Integer, ForeignKey('base.id'), ForeignKey('sub1.id'),
primary_key=True),
Column('data', String(50))
)
def test_equivs(self):
base, sub2, sub1 = (
self.tables.base, self.tables.sub2, self.tables.sub1)
class Base(fixtures.ComparableEntity):
pass
class Sub1(fixtures.ComparableEntity):
pass
class Sub2(fixtures.ComparableEntity):
pass
mapper(Base, base, properties={
'sub1': relationship(Sub1),
'sub2': relationship(Sub2)
})
mapper(Sub1, sub1)
mapper(Sub2, sub2)
sess = create_session()
s11 = Sub1(data='s11')
s12 = Sub1(data='s12')
s2 = Sub2(data='s2')
b1 = Base(data='b1', sub1=[s11], sub2=[])
b2 = Base(data='b1', sub1=[s12], sub2=[])
sess.add(b1)
sess.add(b2)
sess.flush()
# there's an overlapping ForeignKey here, so not much option except
# to artificially control the flush order
b2.sub2 = [s2]
sess.flush()
q = sess.query(Base).outerjoin('sub2', aliased=True)
assert sub1.c.id not in q._filter_aliases.equivalents
eq_(
sess.query(Base).join('sub1').outerjoin('sub2', aliased=True).
filter(Sub1.id == 1).one(),
b1
)
class LabelCollideTest(fixtures.MappedTest):
"""Test handling for a label collision. This collision
is handled by core, see ticket:2702 as well as
test/sql/test_selectable->WithLabelsTest. here we want
to make sure the end result is as we expect.
"""
@classmethod
def define_tables(cls, metadata):
Table(
'foo', metadata,
Column('id', Integer, primary_key=True),
Column('bar_id', Integer)
)
Table('foo_bar', metadata, Column('id', Integer, primary_key=True))
@classmethod
def setup_classes(cls):
class Foo(cls.Basic):
pass
class Bar(cls.Basic):
pass
@classmethod
def setup_mappers(cls):
mapper(cls.classes.Foo, cls.tables.foo)
mapper(cls.classes.Bar, cls.tables.foo_bar)
@classmethod
def insert_data(cls):
s = Session()
s.add_all([
cls.classes.Foo(id=1, bar_id=2),
cls.classes.Bar(id=3)
])
s.commit()
def test_overlap_plain(self):
s = Session()
row = s.query(self.classes.Foo, self.classes.Bar).all()[0]
def go():
eq_(row.Foo.id, 1)
eq_(row.Foo.bar_id, 2)
eq_(row.Bar.id, 3)
# all three columns are loaded independently without
# overlap, no additional SQL to load all attributes
self.assert_sql_count(testing.db, go, 0)
def test_overlap_subquery(self):
s = Session()
row = s.query(self.classes.Foo, self.classes.Bar).from_self().all()[0]
def go():
eq_(row.Foo.id, 1)
eq_(row.Foo.bar_id, 2)
eq_(row.Bar.id, 3)
# all three columns are loaded independently without
# overlap, no additional SQL to load all attributes
self.assert_sql_count(testing.db, go, 0)
|
[
"sqlalchemy.func.sum",
"sqlalchemy.testing.fails_on",
"sqlalchemy.testing.assert_raises_message",
"sqlalchemy.orm.mapper",
"sqlalchemy.testing.schema.Column",
"sqlalchemy.select",
"sqlalchemy.orm.aliased",
"sqlalchemy.orm.create_session",
"sqlalchemy.orm.clear_mappers",
"sqlalchemy.engine.default.DefaultDialect",
"sqlalchemy.orm.column_property",
"sqlalchemy.orm.relation",
"sqlalchemy.inspect",
"sqlalchemy.exists",
"sqlalchemy.and_",
"sqlalchemy.testing.eq_",
"sqlalchemy.orm.util.join",
"sqlalchemy.func.length",
"sqlalchemy.orm.joinedload_all",
"sqlalchemy.literal_column",
"sqlalchemy.String",
"sqlalchemy.orm.configure_mappers",
"sqlalchemy.sql.column",
"sqlalchemy.ForeignKey",
"sqlalchemy.orm.Session",
"sqlalchemy.orm.joinedload",
"sqlalchemy.orm.relationship",
"sqlalchemy.orm.contains_alias",
"sqlalchemy.func.count",
"sqlalchemy.desc",
"sqlalchemy.asc",
"sqlalchemy.orm.contains_eager",
"sqlalchemy.orm.backref",
"sqlalchemy.func.lala"
] |
[((42438, 42481), 'sqlalchemy.testing.fails_on', 'testing.fails_on', (['"""mssql"""', '"""FIXME: unknown"""'], {}), "('mssql', 'FIXME: unknown')\n", (42454, 42481), False, 'from sqlalchemy import testing\n'), ((43259, 43302), 'sqlalchemy.testing.fails_on', 'testing.fails_on', (['"""mssql"""', '"""FIXME: unknown"""'], {}), "('mssql', 'FIXME: unknown')\n", (43275, 43302), False, 'from sqlalchemy import testing\n'), ((43308, 43395), 'sqlalchemy.testing.fails_on', 'testing.fails_on', (['"""oracle"""', '"""Oracle doesn\'t support boolean expressions as columns"""'], {}), '(\'oracle\',\n "Oracle doesn\'t support boolean expressions as columns")\n', (43324, 43395), False, 'from sqlalchemy import testing\n'), ((43444, 43561), 'sqlalchemy.testing.fails_on', 'testing.fails_on', (['"""postgresql+pg8000"""', '"""pg8000 parses the SQL itself before passing on to PG, doesn\'t parse this"""'], {}), '(\'postgresql+pg8000\',\n "pg8000 parses the SQL itself before passing on to PG, doesn\'t parse this")\n', (43460, 43561), False, 'from sqlalchemy import testing\n'), ((43610, 43727), 'sqlalchemy.testing.fails_on', 'testing.fails_on', (['"""postgresql+zxjdbc"""', '"""zxjdbc parses the SQL itself before passing on to PG, doesn\'t parse this"""'], {}), '(\'postgresql+zxjdbc\',\n "zxjdbc parses the SQL itself before passing on to PG, doesn\'t parse this")\n', (43626, 43727), False, 'from sqlalchemy import testing\n'), ((43776, 43815), 'sqlalchemy.testing.fails_on', 'testing.fails_on', (['"""firebird"""', '"""unknown"""'], {}), "('firebird', 'unknown')\n", (43792, 43815), False, 'from sqlalchemy import testing\n'), ((52667, 52706), 'sqlalchemy.testing.fails_on', 'testing.fails_on', (['"""firebird"""', '"""unknown"""'], {}), "('firebird', 'unknown')\n", (52683, 52706), False, 'from sqlalchemy import testing\n'), ((2136, 2165), 'sqlalchemy.orm.mapper', 'mapper', (['Dingaling', 'dingalings'], {}), '(Dingaling, dingalings)\n', (2142, 2165), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((2578, 2603), 'sqlalchemy.orm.mapper', 'mapper', (['Keyword', 'keywords'], {}), '(Keyword, keywords)\n', (2584, 2603), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((2804, 2843), 'sqlalchemy.orm.mapper', 'mapper', (['CompositePk', 'composite_pk_table'], {}), '(CompositePk, composite_pk_table)\n', (2810, 2843), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((2853, 2872), 'sqlalchemy.orm.configure_mappers', 'configure_mappers', ([], {}), '()\n', (2870, 2872), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((4700, 4716), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (4714, 4716), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((5128, 5144), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (5142, 5144), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((5582, 5598), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (5596, 5598), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((6359, 6375), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (6373, 6375), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((9949, 9965), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (9963, 9965), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((10461, 10477), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (10475, 10477), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((11203, 11219), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (11217, 11219), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((11238, 11251), 'sqlalchemy.orm.aliased', 'aliased', (['User'], {}), '(User)\n', (11245, 11251), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((12469, 12485), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (12483, 12485), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((13193, 13209), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (13207, 13209), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((13677, 13693), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (13691, 13693), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((14158, 14174), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (14172, 14174), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((14836, 14852), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (14850, 14852), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((15324, 15340), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (15338, 15340), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((15668, 15684), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (15682, 15684), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((16151, 16167), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (16165, 16167), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((16767, 16783), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (16781, 16783), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((17407, 17423), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (17421, 17423), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((18029, 18045), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (18043, 18045), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((18737, 18753), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (18751, 18753), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((19611, 19627), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (19625, 19627), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((21736, 21833), 'sqlalchemy.orm.mapper', 'mapper', (['B', 'b'], {'polymorphic_identity': '"""b"""', 'polymorphic_on': 'b.c.type', 'with_polymorphic': "('*', None)"}), "(B, b, polymorphic_identity='b', polymorphic_on=b.c.type,\n with_polymorphic=('*', None))\n", (21742, 21833), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((21863, 21913), 'sqlalchemy.orm.mapper', 'mapper', (['C', 'c'], {'inherits': 'B', 'polymorphic_identity': '"""c"""'}), "(C, c, inherits=B, polymorphic_identity='c')\n", (21869, 21913), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((21922, 21972), 'sqlalchemy.orm.mapper', 'mapper', (['D', 'd'], {'inherits': 'A', 'polymorphic_identity': '"""d"""'}), "(D, d, inherits=A, polymorphic_identity='d')\n", (21928, 21972), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((22097, 22113), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (22111, 22113), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((22452, 22468), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (22466, 22468), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((23680, 23696), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (23694, 23696), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((24453, 24469), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (24467, 24469), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((25203, 25219), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (25217, 25219), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((25724, 25740), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (25738, 25740), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((26642, 26658), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (26656, 26658), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((29343, 29359), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (29357, 29359), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((30105, 30121), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (30119, 30121), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((30765, 30781), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (30779, 30781), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((30855, 30871), 'sqlalchemy.orm.aliased', 'aliased', (['Address'], {}), '(Address)\n', (30862, 30871), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((31541, 31557), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (31555, 31557), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((32586, 32602), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (32600, 32602), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((33648, 33664), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (33662, 33664), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((33771, 33785), 'sqlalchemy.orm.aliased', 'aliased', (['Order'], {}), '(Order)\n', (33778, 33785), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((33803, 33816), 'sqlalchemy.orm.aliased', 'aliased', (['Item'], {}), '(Item)\n', (33810, 33816), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((34549, 34565), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (34563, 34565), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((35787, 35803), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (35801, 35803), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((35817, 35848), 'sqlalchemy.orm.aliased', 'aliased', (['Dingaling'], {'name': '"""foob"""'}), "(Dingaling, name='foob')\n", (35824, 35848), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((36994, 37010), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (37008, 37010), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((39843, 39859), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (39857, 39859), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((41002, 41018), 'sqlalchemy.orm.aliased', 'aliased', (['Address'], {}), '(Address)\n', (41009, 41018), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((42183, 42199), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (42197, 42199), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((42214, 42242), 'sqlalchemy.orm.aliased', 'aliased', (['User'], {'name': '"""foobar"""'}), "(User, name='foobar')\n", (42221, 42242), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((42602, 42618), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (42616, 42618), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((42776, 42789), 'sqlalchemy.orm.aliased', 'aliased', (['User'], {}), '(User)\n', (42783, 42789), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((44001, 44017), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (44015, 44017), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((44729, 44745), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (44743, 44745), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((45782, 45798), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (45796, 45798), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((47326, 47342), 'sqlalchemy.orm.aliased', 'aliased', (['Address'], {}), '(Address)\n', (47333, 47342), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((52393, 52409), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (52407, 52409), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((52795, 52811), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (52809, 52811), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((52829, 52843), 'sqlalchemy.orm.aliased', 'aliased', (['Order'], {}), '(Order)\n', (52836, 52843), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((56342, 56358), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (56356, 56358), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((56748, 56764), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (56762, 56764), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((58396, 58412), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (58410, 58412), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((59408, 59424), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (59422, 59424), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((60084, 60100), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (60098, 60100), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((60599, 60615), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (60613, 60615), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((61125, 61141), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (61139, 61141), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((61566, 61582), 'sqlalchemy.orm.aliased', 'aliased', (['Address'], {}), '(Address)\n', (61573, 61582), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((62410, 62426), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (62424, 62426), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((64453, 64466), 'sqlalchemy.orm.aliased', 'aliased', (['User'], {}), '(User)\n', (64460, 64466), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((64480, 64496), 'sqlalchemy.orm.aliased', 'aliased', (['Address'], {}), '(Address)\n', (64487, 64496), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((64509, 64525), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (64523, 64525), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((66823, 66832), 'sqlalchemy.orm.Session', 'Session', ([], {}), '()\n', (66830, 66832), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((69013, 69039), 'sqlalchemy.orm.mapper', 'mapper', (['Address', 'addresses'], {}), '(Address, addresses)\n', (69019, 69039), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((69115, 69131), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (69129, 69131), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((70102, 70142), 'sqlalchemy.orm.mapper', 'mapper', (['User', 'users'], {'order_by': 'users.c.id'}), '(User, users, order_by=users.c.id)\n', (70108, 70142), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((70210, 70226), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (70224, 70226), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((70597, 70616), 'sqlalchemy.orm.mapper', 'mapper', (['User', 'users'], {}), '(User, users)\n', (70603, 70616), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((70633, 70649), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (70647, 70649), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((70738, 70751), 'sqlalchemy.orm.aliased', 'aliased', (['User'], {}), '(User)\n', (70745, 70751), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((72342, 72360), 'sqlalchemy.orm.aliased', 'aliased', (['User', 'sel'], {}), '(User, sel)\n', (72349, 72360), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((73286, 73305), 'sqlalchemy.orm.mapper', 'mapper', (['User', 'users'], {}), '(User, users)\n', (73292, 73305), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((73320, 73333), 'sqlalchemy.orm.aliased', 'aliased', (['User'], {}), '(User)\n', (73327, 73333), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((73350, 73366), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (73364, 73366), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((75186, 75205), 'sqlalchemy.orm.mapper', 'mapper', (['User', 'users'], {}), '(User, users)\n', (75192, 75205), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((75273, 75289), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (75287, 75289), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((75768, 75784), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (75782, 75784), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((76332, 76348), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (76346, 76348), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((77000, 77016), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (77014, 77016), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((77524, 77550), 'sqlalchemy.orm.mapper', 'mapper', (['Address', 'addresses'], {}), '(Address, addresses)\n', (77530, 77550), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((77618, 77634), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (77632, 77634), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((78392, 78408), 'sqlalchemy.orm.aliased', 'aliased', (['Address'], {}), '(Address)\n', (78399, 78408), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((80133, 80158), 'sqlalchemy.orm.mapper', 'mapper', (['Keyword', 'keywords'], {}), '(Keyword, keywords)\n', (80139, 80158), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((80175, 80191), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (80189, 80191), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((81694, 81719), 'sqlalchemy.orm.mapper', 'mapper', (['Keyword', 'keywords'], {}), '(Keyword, keywords)\n', (81700, 81719), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((81736, 81752), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (81750, 81752), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((85134, 85160), 'sqlalchemy.orm.mapper', 'mapper', (['Address', 'addresses'], {}), '(Address, addresses)\n', (85140, 85160), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((85228, 85244), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (85242, 85244), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((87041, 87067), 'sqlalchemy.orm.mapper', 'mapper', (['Address', 'addresses'], {}), '(Address, addresses)\n', (87047, 87067), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((87278, 87297), 'sqlalchemy.orm.mapper', 'mapper', (['Item', 'items'], {}), '(Item, items)\n', (87284, 87297), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((88386, 88538), 'sqlalchemy.testing.assert_raises_message', 'assert_raises_message', (['sa_exc.ArgumentError', '"""not represented in the mapper\'s table"""', 'mapper', 'User', 'users'], {'properties': "{'concat': users.c.id * 2}"}), '(sa_exc.ArgumentError,\n "not represented in the mapper\'s table", mapper, User, users,\n properties={\'concat\': users.c.id * 2})\n', (88407, 88538), False, 'from sqlalchemy.testing import fixtures, eq_, assert_raises, assert_raises_message, AssertsCompiledSQL\n'), ((88609, 88624), 'sqlalchemy.orm.clear_mappers', 'clear_mappers', ([], {}), '()\n', (88622, 88624), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((89446, 89462), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (89460, 89462), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((90610, 90623), 'sqlalchemy.orm.aliased', 'aliased', (['User'], {}), '(User)\n', (90617, 90623), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((91577, 91590), 'sqlalchemy.orm.aliased', 'aliased', (['User'], {}), '(User)\n', (91584, 91590), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((94087, 94113), 'sqlalchemy.orm.mapper', 'mapper', (['Address', 'addresses'], {}), '(Address, addresses)\n', (94093, 94113), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((94244, 94260), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (94258, 94260), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((94488, 94504), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (94502, 94504), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((95323, 95339), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (95337, 95339), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((95389, 95413), 'sqlalchemy.testing.eq_', 'eq_', (['a1.username', '"""jack"""'], {}), "(a1.username, 'jack')\n", (95392, 95413), False, 'from sqlalchemy.testing import fixtures, eq_, assert_raises, assert_raises_message, AssertsCompiledSQL\n'), ((95430, 95446), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (95444, 95446), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((95508, 95532), 'sqlalchemy.testing.eq_', 'eq_', (['a1.username', '"""jack"""'], {}), "(a1.username, 'jack')\n", (95511, 95532), False, 'from sqlalchemy.testing import fixtures, eq_, assert_raises, assert_raises_message, AssertsCompiledSQL\n'), ((96715, 96733), 'sqlalchemy.orm.mapper', 'mapper', (['Sub1', 'sub1'], {}), '(Sub1, sub1)\n', (96721, 96733), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((96742, 96760), 'sqlalchemy.orm.mapper', 'mapper', (['Sub2', 'sub2'], {}), '(Sub2, sub2)\n', (96748, 96760), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((96776, 96792), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (96790, 96792), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((98246, 98285), 'sqlalchemy.orm.mapper', 'mapper', (['cls.classes.Foo', 'cls.tables.foo'], {}), '(cls.classes.Foo, cls.tables.foo)\n', (98252, 98285), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((98294, 98337), 'sqlalchemy.orm.mapper', 'mapper', (['cls.classes.Bar', 'cls.tables.foo_bar'], {}), '(cls.classes.Bar, cls.tables.foo_bar)\n', (98300, 98337), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((98394, 98403), 'sqlalchemy.orm.Session', 'Session', ([], {}), '()\n', (98401, 98403), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((98580, 98589), 'sqlalchemy.orm.Session', 'Session', ([], {}), '()\n', (98587, 98589), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((98993, 99002), 'sqlalchemy.orm.Session', 'Session', ([], {}), '()\n', (99000, 99002), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((16185, 16197), 'sqlalchemy.sql.column', 'column', (['"""c1"""'], {}), "('c1')\n", (16191, 16197), False, 'from sqlalchemy.sql import column\n'), ((16199, 16211), 'sqlalchemy.sql.column', 'column', (['"""c2"""'], {}), "('c2')\n", (16205, 16211), False, 'from sqlalchemy.sql import column\n'), ((16801, 16813), 'sqlalchemy.sql.column', 'column', (['"""c1"""'], {}), "('c1')\n", (16807, 16813), False, 'from sqlalchemy.sql import column\n'), ((16815, 16827), 'sqlalchemy.sql.column', 'column', (['"""c2"""'], {}), "('c2')\n", (16821, 16827), False, 'from sqlalchemy.sql import column\n'), ((17441, 17453), 'sqlalchemy.sql.column', 'column', (['"""c1"""'], {}), "('c1')\n", (17447, 17453), False, 'from sqlalchemy.sql import column\n'), ((17455, 17467), 'sqlalchemy.sql.column', 'column', (['"""c2"""'], {}), "('c2')\n", (17461, 17467), False, 'from sqlalchemy.sql import column\n'), ((18071, 18083), 'sqlalchemy.sql.column', 'column', (['"""c1"""'], {}), "('c1')\n", (18077, 18083), False, 'from sqlalchemy.sql import column\n'), ((18085, 18097), 'sqlalchemy.sql.column', 'column', (['"""c2"""'], {}), "('c2')\n", (18091, 18097), False, 'from sqlalchemy.sql import column\n'), ((18771, 18783), 'sqlalchemy.sql.column', 'column', (['"""c1"""'], {}), "('c1')\n", (18777, 18783), False, 'from sqlalchemy.sql import column\n'), ((18785, 18797), 'sqlalchemy.sql.column', 'column', (['"""c2"""'], {}), "('c2')\n", (18791, 18797), False, 'from sqlalchemy.sql import column\n'), ((20412, 20482), 'sqlalchemy.testing.schema.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)', 'test_needs_autoincrement': '(True)'}), "('id', Integer, primary_key=True, test_needs_autoincrement=True)\n", (20418, 20482), False, 'from sqlalchemy.testing.schema import Column\n'), ((20717, 20787), 'sqlalchemy.testing.schema.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)', 'test_needs_autoincrement': '(True)'}), "('id', Integer, primary_key=True, test_needs_autoincrement=True)\n", (20723, 20787), False, 'from sqlalchemy.testing.schema import Column\n'), ((21039, 21061), 'sqlalchemy.testing.schema.Column', 'Column', (['"""age"""', 'Integer'], {}), "('age', Integer)\n", (21045, 21061), False, 'from sqlalchemy.testing.schema import Column\n'), ((21191, 21214), 'sqlalchemy.testing.schema.Column', 'Column', (['"""dede"""', 'Integer'], {}), "('dede', Integer)\n", (21197, 21214), False, 'from sqlalchemy.testing.schema import Column\n'), ((34662, 34711), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['User.addresses', 'Address.dingaling'], {}), '(User.addresses, Address.dingaling)\n', (34676, 34711), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((35980, 36039), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['User.addresses', 'Address.dingaling'], {'alias': 'da'}), '(User.addresses, Address.dingaling, alias=da)\n', (35994, 36039), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((38545, 38559), 'sqlalchemy.orm.aliased', 'aliased', (['Order'], {}), '(Order)\n', (38552, 38559), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((41341, 41362), 'sqlalchemy.func.count', 'func.count', (['User.name'], {}), '(User.name)\n', (41351, 41362), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((57074, 57090), 'sqlalchemy.orm.aliased', 'aliased', (['Address'], {}), '(Address)\n', (57081, 57090), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((94393, 94422), 'sqlalchemy.testing.eq_', 'eq_', (['o1.address.user.count', '(1)'], {}), '(o1.address.user.count, 1)\n', (94396, 94422), False, 'from sqlalchemy.testing import fixtures, eq_, assert_raises, assert_raises_message, AssertsCompiledSQL\n'), ((94638, 94667), 'sqlalchemy.testing.eq_', 'eq_', (['o1.address.user.count', '(1)'], {}), '(o1.address.user.count, 1)\n', (94641, 94667), False, 'from sqlalchemy.testing import fixtures, eq_, assert_raises, assert_raises_message, AssertsCompiledSQL\n'), ((95705, 95775), 'sqlalchemy.testing.schema.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)', 'test_needs_autoincrement': '(True)'}), "('id', Integer, primary_key=True, test_needs_autoincrement=True)\n", (95711, 95775), False, 'from sqlalchemy.testing.schema import Column\n'), ((97885, 97924), 'sqlalchemy.testing.schema.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)'}), "('id', Integer, primary_key=True)\n", (97891, 97924), False, 'from sqlalchemy.testing.schema import Column\n'), ((97938, 97963), 'sqlalchemy.testing.schema.Column', 'Column', (['"""bar_id"""', 'Integer'], {}), "('bar_id', Integer)\n", (97944, 97963), False, 'from sqlalchemy.testing.schema import Column\n'), ((98009, 98048), 'sqlalchemy.testing.schema.Column', 'Column', (['"""id"""', 'Integer'], {'primary_key': '(True)'}), "('id', Integer, primary_key=True)\n", (98015, 98048), False, 'from sqlalchemy.testing.schema import Column\n'), ((98688, 98706), 'sqlalchemy.testing.eq_', 'eq_', (['row.Foo.id', '(1)'], {}), '(row.Foo.id, 1)\n', (98691, 98706), False, 'from sqlalchemy.testing import fixtures, eq_, assert_raises, assert_raises_message, AssertsCompiledSQL\n'), ((98719, 98741), 'sqlalchemy.testing.eq_', 'eq_', (['row.Foo.bar_id', '(2)'], {}), '(row.Foo.bar_id, 2)\n', (98722, 98741), False, 'from sqlalchemy.testing import fixtures, eq_, assert_raises, assert_raises_message, AssertsCompiledSQL\n'), ((98754, 98772), 'sqlalchemy.testing.eq_', 'eq_', (['row.Bar.id', '(3)'], {}), '(row.Bar.id, 3)\n', (98757, 98772), False, 'from sqlalchemy.testing import fixtures, eq_, assert_raises, assert_raises_message, AssertsCompiledSQL\n'), ((99113, 99131), 'sqlalchemy.testing.eq_', 'eq_', (['row.Foo.id', '(1)'], {}), '(row.Foo.id, 1)\n', (99116, 99131), False, 'from sqlalchemy.testing import fixtures, eq_, assert_raises, assert_raises_message, AssertsCompiledSQL\n'), ((99144, 99166), 'sqlalchemy.testing.eq_', 'eq_', (['row.Foo.bar_id', '(2)'], {}), '(row.Foo.bar_id, 2)\n', (99147, 99166), False, 'from sqlalchemy.testing import fixtures, eq_, assert_raises, assert_raises_message, AssertsCompiledSQL\n'), ((99179, 99197), 'sqlalchemy.testing.eq_', 'eq_', (['row.Bar.id', '(3)'], {}), '(row.Bar.id, 3)\n', (99182, 99197), False, 'from sqlalchemy.testing import fixtures, eq_, assert_raises, assert_raises_message, AssertsCompiledSQL\n'), ((3733, 3757), 'sqlalchemy.engine.default.DefaultDialect', 'default.DefaultDialect', ([], {}), '()\n', (3755, 3757), False, 'from sqlalchemy.engine import default\n'), ((4171, 4195), 'sqlalchemy.engine.default.DefaultDialect', 'default.DefaultDialect', ([], {}), '()\n', (4193, 4195), False, 'from sqlalchemy.engine import default\n'), ((4607, 4631), 'sqlalchemy.engine.default.DefaultDialect', 'default.DefaultDialect', ([], {}), '()\n', (4629, 4631), False, 'from sqlalchemy.engine import default\n'), ((5031, 5055), 'sqlalchemy.engine.default.DefaultDialect', 'default.DefaultDialect', ([], {}), '()\n', (5053, 5055), False, 'from sqlalchemy.engine import default\n'), ((5490, 5514), 'sqlalchemy.engine.default.DefaultDialect', 'default.DefaultDialect', ([], {}), '()\n', (5512, 5514), False, 'from sqlalchemy.engine import default\n'), ((5920, 5944), 'sqlalchemy.engine.default.DefaultDialect', 'default.DefaultDialect', ([], {}), '()\n', (5942, 5944), False, 'from sqlalchemy.engine import default\n'), ((7842, 7863), 'sqlalchemy.func.lala', 'func.lala', (['users.c.id'], {}), '(users.c.id)\n', (7851, 7863), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((20544, 20554), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (20550, 20554), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((20584, 20594), 'sqlalchemy.String', 'String', (['(20)'], {}), '(20)\n', (20590, 20594), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((20632, 20650), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""b.id"""'], {}), "('b.id')\n", (20642, 20650), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((20849, 20859), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (20855, 20859), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((20889, 20899), 'sqlalchemy.String', 'String', (['(20)'], {}), '(20)\n', (20895, 20899), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((20988, 21006), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""b.id"""'], {}), "('b.id')\n", (20998, 21006), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((21140, 21158), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""a.id"""'], {}), "('a.id')\n", (21150, 21158), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((27511, 27535), 'sqlalchemy.engine.default.DefaultDialect', 'default.DefaultDialect', ([], {}), '()\n', (27533, 27535), False, 'from sqlalchemy.engine import default\n'), ((53801, 53824), 'sqlalchemy.orm.joinedload', 'joinedload', (['Order.items'], {}), '(Order.items)\n', (53811, 53824), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((54122, 54145), 'sqlalchemy.orm.joinedload', 'joinedload', (['Order.items'], {}), '(Order.items)\n', (54132, 54145), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((63274, 63290), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (63288, 63290), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((72825, 72864), 'sqlalchemy.orm.util.join', 'join', (['sel', 'ualias', '(ualias.id > sel.c.id)'], {}), '(sel, ualias, ualias.id > sel.c.id)\n', (72829, 72864), False, 'from sqlalchemy.orm.util import join\n'), ((87879, 87895), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (87893, 87895), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((95837, 95847), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (95843, 95847), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((95939, 95960), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""base.id"""'], {}), "('base.id')\n", (95949, 95960), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((96008, 96018), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (96014, 96018), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((96127, 96148), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""base.id"""'], {}), "('base.id')\n", (96137, 96148), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((96150, 96171), 'sqlalchemy.ForeignKey', 'ForeignKey', (['"""sub1.id"""'], {}), "('sub1.id')\n", (96160, 96171), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((96235, 96245), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (96241, 96245), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((1719, 1781), 'sqlalchemy.orm.relationship', 'relationship', (['Address'], {'backref': '"""user"""', 'order_by': 'addresses.c.id'}), "(Address, backref='user', order_by=addresses.c.id)\n", (1731, 1781), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((1830, 1887), 'sqlalchemy.orm.relationship', 'relationship', (['Order'], {'backref': '"""user"""', 'order_by': 'orders.c.id'}), "(Order, backref='user', order_by=orders.c.id)\n", (1842, 1887), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((2027, 2084), 'sqlalchemy.orm.relationship', 'relationship', (['Dingaling'], {'uselist': '(False)', 'backref': '"""address"""'}), "(Dingaling, uselist=False, backref='address')\n", (2039, 2084), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((2247, 2309), 'sqlalchemy.orm.relationship', 'relationship', (['Item'], {'secondary': 'order_items', 'order_by': 'items.c.id'}), '(Item, secondary=order_items, order_by=items.c.id)\n', (2259, 2309), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((2366, 2387), 'sqlalchemy.orm.relationship', 'relationship', (['Address'], {}), '(Address)\n', (2378, 2387), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((2493, 2539), 'sqlalchemy.orm.relationship', 'relationship', (['Keyword'], {'secondary': 'item_keywords'}), '(Keyword, secondary=item_keywords)\n', (2505, 2539), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((21683, 21725), 'sqlalchemy.orm.relation', 'relation', (['B'], {'uselist': '(False)', 'backref': '"""back"""'}), "(B, uselist=False, backref='back')\n", (21691, 21725), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((26815, 26845), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['User.addresses'], {}), '(User.addresses)\n', (26829, 26845), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((27841, 27886), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['User.addresses'], {'alias': 'adalias'}), '(User.addresses, alias=adalias)\n', (27855, 27886), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((40232, 40242), 'sqlalchemy.String', 'String', (['(50)'], {}), '(50)\n', (40238, 40242), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((61450, 61472), 'sqlalchemy.func.count', 'func.count', (['Address.id'], {}), '(Address.id)\n', (61460, 61472), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((61732, 61754), 'sqlalchemy.func.count', 'func.count', (['adalias.id'], {}), '(adalias.id)\n', (61742, 61754), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((66576, 66590), 'sqlalchemy.inspect', 'inspect', (['mzero'], {}), '(mzero)\n', (66583, 66590), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((68981, 69002), 'sqlalchemy.orm.relationship', 'relationship', (['Address'], {}), '(Address)\n', (68993, 69002), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((77492, 77513), 'sqlalchemy.orm.relationship', 'relationship', (['Address'], {}), '(Address)\n', (77504, 77513), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((79697, 79732), 'sqlalchemy.orm.relationship', 'relationship', (['Order'], {'backref': '"""user"""'}), "(Order, backref='user')\n", (79709, 79732), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((79828, 79890), 'sqlalchemy.orm.relationship', 'relationship', (['Item'], {'secondary': 'order_items', 'order_by': 'items.c.id'}), '(Item, secondary=order_items, order_by=items.c.id)\n', (79840, 79890), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((80004, 80074), 'sqlalchemy.orm.relationship', 'relationship', (['Keyword'], {'secondary': 'item_keywords', 'order_by': 'keywords.c.id'}), '(Keyword, secondary=item_keywords, order_by=keywords.c.id)\n', (80016, 80074), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((81259, 81294), 'sqlalchemy.orm.relationship', 'relationship', (['Order'], {'backref': '"""user"""'}), "(Order, backref='user')\n", (81271, 81294), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((81390, 81452), 'sqlalchemy.orm.relationship', 'relationship', (['Item'], {'secondary': 'order_items', 'order_by': 'items.c.id'}), '(Item, secondary=order_items, order_by=items.c.id)\n', (81402, 81452), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((81565, 81635), 'sqlalchemy.orm.relationship', 'relationship', (['Keyword'], {'secondary': 'item_keywords', 'order_by': 'keywords.c.id'}), '(Keyword, secondary=item_keywords, order_by=keywords.c.id)\n', (81577, 81635), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((85077, 85123), 'sqlalchemy.orm.relationship', 'relationship', (['Address'], {'order_by': 'addresses.c.id'}), '(Address, order_by=addresses.c.id)\n', (85089, 85123), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((87149, 87226), 'sqlalchemy.orm.relationship', 'relationship', (['Item'], {'secondary': 'order_items', 'lazy': '"""select"""', 'order_by': 'items.c.id'}), "(Item, secondary=order_items, lazy='select', order_by=items.c.id)\n", (87161, 87226), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((89060, 89091), 'sqlalchemy.orm.column_property', 'column_property', (['(users.c.id * 2)'], {}), '(users.c.id * 2)\n', (89075, 89091), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((89400, 89418), 'sqlalchemy.orm.relationship', 'relationship', (['User'], {}), '(User)\n', (89412, 89418), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((89500, 89518), 'sqlalchemy.orm.joinedload', 'joinedload', (['"""user"""'], {}), "('user')\n", (89510, 89518), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((93730, 93792), 'sqlalchemy.orm.relationship', 'relationship', (['Address'], {'backref': '"""user"""', 'order_by': 'addresses.c.id'}), "(Address, backref='user', order_by=addresses.c.id)\n", (93742, 93792), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((93841, 93872), 'sqlalchemy.orm.column_property', 'column_property', (['(users.c.id * 2)'], {}), '(users.c.id * 2)\n', (93856, 93872), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((94197, 94218), 'sqlalchemy.orm.relationship', 'relationship', (['Address'], {}), '(Address)\n', (94209, 94218), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((96636, 96654), 'sqlalchemy.orm.relationship', 'relationship', (['Sub1'], {}), '(Sub1)\n', (96648, 96654), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((96676, 96694), 'sqlalchemy.orm.relationship', 'relationship', (['Sub2'], {}), '(Sub2)\n', (96688, 96694), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((3500, 3526), 'sqlalchemy.func.count', 'func.count', (['addresses.c.id'], {}), '(addresses.c.id)\n', (3510, 3526), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((12918, 12941), 'sqlalchemy.orm.joinedload', 'joinedload', (['"""addresses"""'], {}), "('addresses')\n", (12928, 12941), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((25372, 25399), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['"""addresses"""'], {}), "('addresses')\n", (25386, 25399), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((26276, 26318), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['"""addresses"""'], {'alias': 'adalias'}), "('addresses', alias=adalias)\n", (26290, 26318), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((57937, 57960), 'sqlalchemy.orm.joinedload', 'joinedload', (['"""addresses"""'], {}), "('addresses')\n", (57947, 57960), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((63043, 63069), 'sqlalchemy.func.count', 'func.count', (['addresses.c.id'], {}), '(addresses.c.id)\n', (63053, 63069), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((69467, 69482), 'sqlalchemy.desc', 'desc', (['User.name'], {}), '(User.name)\n', (69471, 69482), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((69647, 69661), 'sqlalchemy.asc', 'asc', (['User.name'], {}), '(User.name)\n', (69650, 69661), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((69825, 69848), 'sqlalchemy.orm.joinedload', 'joinedload', (['"""addresses"""'], {}), "('addresses')\n", (69835, 69848), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((75690, 75716), 'sqlalchemy.orm.mapper', 'mapper', (['Address', 'addresses'], {}), '(Address, addresses)\n', (75696, 75716), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((76286, 76312), 'sqlalchemy.orm.mapper', 'mapper', (['Address', 'addresses'], {}), '(Address, addresses)\n', (76292, 76312), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((76954, 76980), 'sqlalchemy.orm.mapper', 'mapper', (['Address', 'addresses'], {}), '(Address, addresses)\n', (76960, 76980), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((87382, 87418), 'sqlalchemy.orm.relationship', 'relationship', (['Address'], {'lazy': '"""select"""'}), "(Address, lazy='select')\n", (87394, 87418), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((94323, 94353), 'sqlalchemy.orm.joinedload_all', 'joinedload_all', (['"""address.user"""'], {}), "('address.user')\n", (94337, 94353), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((94567, 94597), 'sqlalchemy.orm.joinedload_all', 'joinedload_all', (['"""address.user"""'], {}), "('address.user')\n", (94581, 94597), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((2735, 2778), 'sqlalchemy.orm.backref', 'backref', (['"""parent"""'], {'remote_side': '[nodes.c.id]'}), "('parent', remote_side=[nodes.c.id])\n", (2742, 2778), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((4808, 4830), 'sqlalchemy.func.count', 'func.count', (['Address.id'], {}), '(Address.id)\n', (4818, 4830), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((6759, 6790), 'sqlalchemy.exists', 'exists', (['[1]'], {'from_obj': 'addresses'}), '([1], from_obj=addresses)\n', (6765, 6790), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((8087, 8098), 'sqlalchemy.func.sum', 'func.sum', (['x'], {}), '(x)\n', (8095, 8098), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((10542, 10568), 'sqlalchemy.orm.joinedload', 'joinedload', (['User.addresses'], {}), '(User.addresses)\n', (10552, 10568), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((23814, 23837), 'sqlalchemy.orm.contains_alias', 'contains_alias', (['"""ulist"""'], {}), "('ulist')\n", (23828, 23837), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((23839, 23866), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['"""addresses"""'], {}), "('addresses')\n", (23853, 23866), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((28379, 28406), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['"""addresses"""'], {}), "('addresses')\n", (28393, 28406), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((28664, 28694), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['User.addresses'], {}), '(User.addresses)\n', (28678, 28694), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((29674, 29718), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['"""addresses"""'], {'alias': '"""adalias"""'}), "('addresses', alias='adalias')\n", (29688, 29718), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((30441, 30483), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['"""addresses"""'], {'alias': 'adalias'}), "('addresses', alias=adalias)\n", (30455, 30483), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((31991, 32027), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['"""orders"""'], {'alias': '"""o1"""'}), "('orders', alias='o1')\n", (32005, 32027), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((32049, 32091), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['"""orders.items"""'], {'alias': '"""i1"""'}), "('orders.items', alias='i1')\n", (32063, 32091), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((33228, 33266), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['"""orders"""'], {'alias': 'oalias'}), "('orders', alias=oalias)\n", (33242, 33266), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((33288, 33332), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['"""orders.items"""'], {'alias': 'ialias'}), "('orders.items', alias=ialias)\n", (33302, 33332), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((40825, 40852), 'sqlalchemy.desc', 'desc', (['Address.email_address'], {}), '(Address.email_address)\n', (40829, 40852), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((92587, 92612), 'sqlalchemy.orm.util.join', 'join', (['Address', 'ua', '"""user"""'], {}), "(Address, ua, 'user')\n", (92591, 92612), False, 'from sqlalchemy.orm.util import join\n'), ((3921, 3947), 'sqlalchemy.func.count', 'func.count', (['addresses.c.id'], {}), '(addresses.c.id)\n', (3931, 3947), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((4354, 4380), 'sqlalchemy.func.count', 'func.count', (['addresses.c.id'], {}), '(addresses.c.id)\n', (4364, 4380), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((24561, 24584), 'sqlalchemy.orm.contains_alias', 'contains_alias', (['"""ulist"""'], {}), "('ulist')\n", (24575, 24584), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((24586, 24613), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['"""addresses"""'], {}), "('addresses')\n", (24600, 24613), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((28946, 28973), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['"""addresses"""'], {}), "('addresses')\n", (28960, 28973), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((30934, 30976), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['"""addresses"""'], {'alias': 'adalias'}), "('addresses', alias=adalias)\n", (30948, 30976), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((45233, 45245), 'sqlalchemy.func.count', 'func.count', ([], {}), '()\n', (45243, 45245), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((49206, 49232), 'sqlalchemy.orm.joinedload', 'joinedload', (['User.addresses'], {}), '(User.addresses)\n', (49216, 49232), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((49473, 49499), 'sqlalchemy.orm.joinedload', 'joinedload', (['User.addresses'], {}), '(User.addresses)\n', (49483, 49499), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((52503, 52526), 'sqlalchemy.orm.joinedload', 'joinedload', (['"""addresses"""'], {}), "('addresses')\n", (52513, 52526), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((66912, 66946), 'sqlalchemy.func.length', 'func.length', (['Address.email_address'], {}), '(Address.email_address)\n', (66923, 66946), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((87521, 87579), 'sqlalchemy.and_', 'and_', (['(orders.c.isopen == 1)', '(users.c.id == orders.c.user_id)'], {}), '(orders.c.isopen == 1, users.c.id == orders.c.user_id)\n', (87525, 87579), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((87745, 87803), 'sqlalchemy.and_', 'and_', (['(orders.c.isopen == 0)', '(users.c.id == orders.c.user_id)'], {}), '(orders.c.isopen == 0, users.c.id == orders.c.user_id)\n', (87749, 87803), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((91737, 91761), 'sqlalchemy.orm.joinedload', 'joinedload', (['Address.user'], {}), '(Address.user)\n', (91747, 91761), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((5236, 5258), 'sqlalchemy.func.count', 'func.count', (['Address.id'], {}), '(Address.id)\n', (5246, 5258), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((5690, 5712), 'sqlalchemy.func.count', 'func.count', (['Address.id'], {}), '(Address.id)\n', (5700, 5712), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((55422, 55443), 'sqlalchemy.literal_column', 'literal_column', (['"""\'q\'"""'], {}), '("\'q\'")\n', (55436, 55443), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((9732, 9754), 'sqlalchemy.func.count', 'func.count', (['Address.id'], {}), '(Address.id)\n', (9742, 9754), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((33879, 33920), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['User.orders'], {'alias': 'oalias'}), '(User.orders, alias=oalias)\n', (33893, 33920), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((33938, 33992), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['User.orders', 'Order.items'], {'alias': 'ialias'}), '(User.orders, Order.items, alias=ialias)\n', (33952, 33992), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((44770, 44782), 'sqlalchemy.func.count', 'func.count', ([], {}), '()\n', (44780, 44782), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((86221, 86244), 'sqlalchemy.orm.joinedload', 'joinedload', (['"""addresses"""'], {}), "('addresses')\n", (86231, 86244), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((90434, 90452), 'sqlalchemy.orm.joinedload', 'joinedload', (['"""user"""'], {}), "('user')\n", (90444, 90452), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((91689, 91714), 'sqlalchemy.orm.util.join', 'join', (['Address', 'ua', '"""user"""'], {}), "(Address, ua, 'user')\n", (91693, 91714), False, 'from sqlalchemy.orm.util import join\n'), ((93971, 93997), 'sqlalchemy.func.count', 'func.count', (['addresses.c.id'], {}), '(addresses.c.id)\n', (93981, 93997), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((95211, 95234), 'sqlalchemy.select', 'select', (['[User.fullname]'], {}), '([User.fullname])\n', (95217, 95234), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((8390, 8406), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (8404, 8406), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((9403, 9419), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (9417, 9419), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((62748, 62772), 'sqlalchemy.func.count', 'func.count', (['adalias.c.id'], {}), '(adalias.c.id)\n', (62758, 62772), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((63533, 63559), 'sqlalchemy.func.count', 'func.count', (['addresses.c.id'], {}), '(addresses.c.id)\n', (63543, 63559), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((63826, 63852), 'sqlalchemy.func.count', 'func.count', (['addresses.c.id'], {}), '(addresses.c.id)\n', (63836, 63852), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((64095, 64119), 'sqlalchemy.func.count', 'func.count', (['adalias.c.id'], {}), '(adalias.c.id)\n', (64105, 64119), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((81923, 81962), 'sqlalchemy.orm.joinedload_all', 'joinedload_all', (['"""orders.items.keywords"""'], {}), "('orders.items.keywords')\n", (81937, 81962), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((85322, 85345), 'sqlalchemy.orm.joinedload', 'joinedload', (['"""addresses"""'], {}), "('addresses')\n", (85332, 85345), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((9456, 9478), 'sqlalchemy.func.count', 'func.count', (['Address.id'], {}), '(Address.id)\n', (9466, 9478), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((37502, 37528), 'sqlalchemy.orm.joinedload', 'joinedload', (['User.addresses'], {}), '(User.addresses)\n', (37512, 37528), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((37530, 37557), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['User.orders'], {}), '(User.orders)\n', (37544, 37557), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((38634, 38660), 'sqlalchemy.orm.joinedload', 'joinedload', (['User.addresses'], {}), '(User.addresses)\n', (38644, 38660), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((38678, 38719), 'sqlalchemy.orm.contains_eager', 'contains_eager', (['User.orders'], {'alias': 'oalias'}), '(User.orders, alias=oalias)\n', (38692, 38719), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((46427, 46460), 'sqlalchemy.func.count', 'func.count', (['Address.email_address'], {}), '(Address.email_address)\n', (46437, 46460), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((46676, 46709), 'sqlalchemy.func.count', 'func.count', (['Address.email_address'], {}), '(Address.email_address)\n', (46686, 46709), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((47007, 47040), 'sqlalchemy.func.count', 'func.count', (['Address.email_address'], {}), '(Address.email_address)\n', (47017, 47040), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((47385, 47418), 'sqlalchemy.func.count', 'func.count', (['adalias.email_address'], {}), '(adalias.email_address)\n', (47395, 47418), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((47722, 47755), 'sqlalchemy.func.count', 'func.count', (['adalias.email_address'], {}), '(adalias.email_address)\n', (47732, 47755), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((61922, 61948), 'sqlalchemy.func.count', 'func.count', (['addresses.c.id'], {}), '(addresses.c.id)\n', (61932, 61948), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((89190, 89216), 'sqlalchemy.func.count', 'func.count', (['addresses.c.id'], {}), '(addresses.c.id)\n', (89200, 89216), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((8543, 8559), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (8557, 8559), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((8707, 8723), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (8721, 8723), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((9640, 9656), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (9654, 9656), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((85782, 85805), 'sqlalchemy.orm.joinedload', 'joinedload', (['"""addresses"""'], {}), "('addresses')\n", (85792, 85805), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((13292, 13304), 'sqlalchemy.func.count', 'func.count', ([], {}), '()\n', (13302, 13304), False, 'from sqlalchemy import exc as sa_exc, util, Integer, Table, String, ForeignKey, select, func, and_, asc, desc, inspect, literal_column, cast, exists\n'), ((62708, 62724), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (62722, 62724), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((63493, 63509), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (63507, 63509), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((63786, 63802), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (63800, 63802), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((64055, 64071), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (64069, 64071), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n'), ((9129, 9145), 'sqlalchemy.orm.create_session', 'create_session', ([], {}), '()\n', (9143, 9145), False, 'from sqlalchemy.orm import configure_mappers, Session, mapper, create_session, relationship, column_property, joinedload_all, contains_eager, contains_alias, joinedload, clear_mappers, backref, relation, aliased\n')]
|
#----------------------------------------------------------------------------------
# Microsoft Developer & Platform Evangelism
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES
# OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
#----------------------------------------------------------------------------------
# The example companies, organizations, products, domain names,
# e-mail addresses, logos, people, places, and events depicted
# herein are fictitious. No association with any real company,
# organization, product, domain name, email address, logo, person,
# places, or events is intended or should be inferred.
#----------------------------------------------------------------------------------
import os # Provides system support to clear the screen
# <Snippet_Imports>
from azure.core.exceptions import (
ResourceExistsError,
ResourceNotFoundError
)
from azure.storage.fileshare import (
ShareServiceClient,
ShareClient,
ShareDirectoryClient,
ShareFileClient
)
# </Snippet_Imports>
from constants import Constants
class FileShareOperations:
def __init__(self):
super().__init__()
self.constants = Constants()
# <Snippet_CreateFileShare>
def create_file_share(self, connection_string, share_name):
try:
# Create a ShareClient from a connection string
share_client = ShareClient.from_connection_string(
connection_string, share_name)
print("Creating share:", share_name)
share_client.create_share()
except ResourceExistsError as ex:
print("ResourceExistsError:", ex.message)
# </Snippet_CreateFileShare>
# <Snippet_CreateDirectory>
def create_directory(self, connection_string, share_name, dir_name):
try:
# Create a ShareDirectoryClient from a connection string
dir_client = ShareDirectoryClient.from_connection_string(
connection_string, share_name, dir_name)
print("Creating directory:", share_name + "/" + dir_name)
dir_client.create_directory()
except ResourceExistsError as ex:
print("ResourceExistsError:", ex.message)
# </Snippet_CreateDirectory>
# <Snippet_UploadFile>
def upload_local_file(self, connection_string, local_file_path, share_name, dest_file_path):
try:
source_file = open(local_file_path, "rb")
data = source_file.read()
# Create a ShareFileClient from a connection string
file_client = ShareFileClient.from_connection_string(
connection_string, share_name, dest_file_path)
print("Uploading to:", share_name + "/" + dest_file_path)
file_client.upload_file(data)
except ResourceExistsError as ex:
print("ResourceExistsError:", ex.message)
except ResourceNotFoundError as ex:
print("ResourceNotFoundError:", ex.message)
# </Snippet_UploadFile>
# <Snippet_ListFilesAndDirs>
def list_files_and_dirs(self, connection_string, share_name, dir_name):
try:
# Create a ShareClient from a connection string
share_client = ShareClient.from_connection_string(
connection_string, share_name)
for item in list(share_client.list_directories_and_files(dir_name)):
if item["is_directory"]:
print("Directory:", item["name"])
else:
print("File:", dir_name + "/" + item["name"])
except ResourceNotFoundError as ex:
print("ResourceNotFoundError:", ex.message)
# </Snippet_ListFilesAndDirs>
# <Snippet_DownloadFile>
def download_azure_file(self, connection_string, share_name, dir_name, file_name):
try:
# Build the remote path
source_file_path = dir_name + "/" + file_name
# Add a prefix to the filename to
# distinguish it from the uploaded file
dest_file_name = "DOWNLOADED-" + file_name
# Create a ShareFileClient from a connection string
file_client = ShareFileClient.from_connection_string(
connection_string, share_name, source_file_path)
print("Downloading to:", dest_file_name)
# Open a file for writing bytes on the local system
with open(dest_file_name, "wb") as data:
# Download the file from Azure into a stream
stream = file_client.download_file()
# Write the stream to the local file
data.write(stream.readall())
except ResourceNotFoundError as ex:
print("ResourceNotFoundError:", ex.message)
# </Snippet_DownloadFile>
# <Snippet_CreateSnapshot>
def create_snapshot(self, connection_string, share_name):
try:
# Create a ShareClient from a connection string
share_client = ShareClient.from_connection_string(
connection_string, share_name)
# Create a snapshot
snapshot = share_client.create_snapshot()
print("Created snapshot:", snapshot["snapshot"])
# Return the snapshot time so
# it can be accessed later
return snapshot["snapshot"]
except ResourceNotFoundError as ex:
print("ResourceNotFoundError:", ex.message)
# </Snippet_CreateSnapshot>
# <Snippet_ListSharesAndSnapshots>
def list_shares_snapshots(self, connection_string):
try:
# <Snippet_CreateShareServiceClient>
# Create a ShareServiceClient from a connection string
service_client = ShareServiceClient.from_connection_string(connection_string)
# </Snippet_CreateShareServiceClient>
# List the shares in the file service
shares = list(service_client.list_shares(include_snapshots=True))
for share in shares:
if (share["snapshot"]):
print("Share:", share["name"], "Snapshot:", share["snapshot"])
else:
print("Share:", share["name"])
except ResourceNotFoundError as ex:
print("ResourceNotFoundError:", ex.message)
# </Snippet_ListSharesAndSnapshots>
def get_first_snapshot(self, connection_string):
try:
# Create a ShareServiceClient from a connection string
service_client = ShareServiceClient.from_connection_string(connection_string)
# List the shares in the file service
shares = list(service_client.list_shares(include_snapshots=True))
for share in shares:
if (share["snapshot"]):
return share["snapshot"]
except ResourceNotFoundError as ex:
print("ResourceNotFoundError:", ex.message)
# <Snippet_BrowseSnapshotDir>
def browse_snapshot_dir(self, connection_string, share_name, snapshot_time, dir_name):
try:
# Create a ShareClient from a connection string
snapshot = ShareClient.from_connection_string(
conn_str=connection_string, share_name=share_name, snapshot=snapshot_time)
print("Snapshot:", snapshot_time)
for item in list(snapshot.list_directories_and_files(dir_name)):
if item["is_directory"]:
print("Directory:", item["name"])
else:
print("File:", dir_name + "/" + item["name"])
except ResourceNotFoundError as ex:
print("ResourceNotFoundError:", ex.message)
# </Snippet_BrowseSnapshotDir>
# <Snippet_DownloadSnapshotFile>
def download_snapshot_file(self, connection_string, share_name, snapshot_time, dir_name, file_name):
try:
# Build the remote path
source_file_path = dir_name + "/" + file_name
# Add a prefix to the local filename to
# indicate it's a file from a snapshot
dest_file_name = "SNAPSHOT-" + file_name
# Create a ShareFileClient from a connection string
snapshot_file_client = ShareFileClient.from_connection_string(
conn_str=connection_string, share_name=share_name,
file_path=source_file_path, snapshot=snapshot_time)
print("Downloading to:", dest_file_name)
# Open a file for writing bytes on the local system
with open(dest_file_name, "wb") as data:
# Download the file from Azure into a stream
stream = snapshot_file_client.download_file()
# Write the stream to the local file
data.write(stream.readall())
except ResourceNotFoundError as ex:
print("ResourceNotFoundError:", ex.message)
# </Snippet_DownloadSnapshotFile>
# <Snippet_DeleteSnapshot>
def delete_snapshot(self, connection_string, share_name, snapshot_time):
try:
# Create a ShareClient for a snapshot
snapshot_client = ShareClient.from_connection_string(conn_str=connection_string, share_name=share_name, snapshot=snapshot_time)
print("Deleting snapshot:", snapshot_time)
# Delete the snapshot
snapshot_client.delete_share()
except ResourceNotFoundError as ex:
print("ResourceNotFoundError:", ex.message)
# </Snippet_DeleteSnapshot>
# <Snippet_DeleteFile>
def delete_azure_file(self, connection_string, share_name, file_path):
try:
# Create a ShareFileClient from a connection string
file_client = ShareFileClient.from_connection_string(
connection_string, share_name, file_path)
print("Deleting file:", share_name + "/" + file_path)
# Delete the file
file_client.delete_file()
except ResourceNotFoundError as ex:
print("ResourceNotFoundError:", ex.message)
# </Snippet_DeleteFile>
# <Snippet_DeleteShare>
def delete_share(self, connection_string, share_name):
try:
# Create a ShareClient from a connection string
share_client = ShareClient.from_connection_string(
connection_string, share_name)
print("Deleting share:", share_name)
# Delete the share and snapshots
share_client.delete_share(delete_snapshots=True)
except ResourceNotFoundError as ex:
print("ResourceNotFoundError:", ex.message)
# </Snippet_DeleteShare>
def menu(self):
os.system("cls")
print("Choose an Azure Files scenario:")
print("1) Create a file share")
print("2) Create a directory")
print("3) Upload a file")
print("4) List files and directories")
print("5) Download a file")
print("6) Create a share snapshot")
print("7) List shares and snapshots")
print("8) Browse share snapshot")
print("9) Download a file from a snapshot")
print("10) Delete a snapshot")
print("11) Delete a file")
print("12) Delete a share with snapshots")
print("X) Exit to main menu")
option = input("\r\nSelect an option: ")
if option == "1":
# Create a file share
self.create_file_share(self.constants.connection_string, self.constants.share_name)
input("Press Enter to continue ")
return True
elif option == "2":
# Create a directory in the file share
self.create_directory(self.constants.connection_string, self.constants.share_name, self.constants.dir_name)
input("Press Enter to continue ")
return True
elif option == "3":
# Upload a local file
dest_file_path = self.constants.dir_name + "/" + self.constants.file_name
self.upload_local_file(self.constants.connection_string, self.constants.file_name, self.constants.share_name, dest_file_path)
input("Press Enter to continue ")
return True
elif option == "4":
# List the files and directories in the specified directory
self.list_files_and_dirs(self.constants.connection_string, self.constants.share_name, self.constants.dir_name)
input("Press Enter to continue ")
return True
elif option == "5":
# Download a file
self.download_azure_file(self.constants.connection_string, self.constants.share_name, self.constants.dir_name, self.constants.file_name)
input("Press Enter to continue ")
return True
elif option == "6":
# Create a share snapshot, store the snapshot for later use
self.snapshot_time = self.create_snapshot(self.constants.connection_string, self.constants.share_name)
input("Press Enter to continue ")
return True
elif option == "7":
# List the file shares in a storage account
self.list_shares_snapshots(self.constants.connection_string)
input("Press Enter to continue ")
return True
elif option == "8":
# List files and directories in a snapshot directory
snapshot = self.get_first_snapshot(self.constants.connection_string)
self.browse_snapshot_dir(self.constants.connection_string, self.constants.share_name, snapshot, self.constants.dir_name)
input("Press Enter to continue ")
return True
elif option == "9":
# Download a file from a snapshot
snapshot = self.get_first_snapshot(self.constants.connection_string)
self.download_snapshot_file(self.constants.connection_string, self.constants.share_name, snapshot, self.constants.dir_name, self.constants.file_name)
input("Press Enter to continue ")
return True
elif option == "10":
# Delete a snapshot
snapshot = self.get_first_snapshot(self.constants.connection_string)
self.delete_snapshot(self.constants.connection_string, self.constants.share_name, snapshot)
input("Press Enter to continue ")
return True
elif option == "11":
# Delete a file in a share
file_path = self.constants.dir_name + "/" + self.constants.file_name
self.delete_azure_file(self.constants.connection_string, self.constants.share_name, file_path)
input("Press Enter to continue ")
return True
elif option == "12":
# Delete a file share with snapshots
self.delete_share(self.constants.connection_string, self.constants.share_name)
input("Press Enter to continue ")
return True
elif option == "x" or option == "X":
return False
else:
print("Unknown option:", str(option))
input("Press Enter to continue ")
return True
|
[
"azure.storage.fileshare.ShareDirectoryClient.from_connection_string",
"azure.storage.fileshare.ShareServiceClient.from_connection_string",
"os.system",
"azure.storage.fileshare.ShareClient.from_connection_string",
"constants.Constants",
"azure.storage.fileshare.ShareFileClient.from_connection_string"
] |
[((1352, 1363), 'constants.Constants', 'Constants', ([], {}), '()\n', (1361, 1363), False, 'from constants import Constants\n'), ((10831, 10847), 'os.system', 'os.system', (['"""cls"""'], {}), "('cls')\n", (10840, 10847), False, 'import os\n'), ((1561, 1626), 'azure.storage.fileshare.ShareClient.from_connection_string', 'ShareClient.from_connection_string', (['connection_string', 'share_name'], {}), '(connection_string, share_name)\n', (1595, 1626), False, 'from azure.storage.fileshare import ShareServiceClient, ShareClient, ShareDirectoryClient, ShareFileClient\n'), ((2077, 2165), 'azure.storage.fileshare.ShareDirectoryClient.from_connection_string', 'ShareDirectoryClient.from_connection_string', (['connection_string', 'share_name', 'dir_name'], {}), '(connection_string, share_name,\n dir_name)\n', (2120, 2165), False, 'from azure.storage.fileshare import ShareServiceClient, ShareClient, ShareDirectoryClient, ShareFileClient\n'), ((2743, 2832), 'azure.storage.fileshare.ShareFileClient.from_connection_string', 'ShareFileClient.from_connection_string', (['connection_string', 'share_name', 'dest_file_path'], {}), '(connection_string, share_name,\n dest_file_path)\n', (2781, 2832), False, 'from azure.storage.fileshare import ShareServiceClient, ShareClient, ShareDirectoryClient, ShareFileClient\n'), ((3395, 3460), 'azure.storage.fileshare.ShareClient.from_connection_string', 'ShareClient.from_connection_string', (['connection_string', 'share_name'], {}), '(connection_string, share_name)\n', (3429, 3460), False, 'from azure.storage.fileshare import ShareServiceClient, ShareClient, ShareDirectoryClient, ShareFileClient\n'), ((4348, 4439), 'azure.storage.fileshare.ShareFileClient.from_connection_string', 'ShareFileClient.from_connection_string', (['connection_string', 'share_name', 'source_file_path'], {}), '(connection_string, share_name,\n source_file_path)\n', (4386, 4439), False, 'from azure.storage.fileshare import ShareServiceClient, ShareClient, ShareDirectoryClient, ShareFileClient\n'), ((5162, 5227), 'azure.storage.fileshare.ShareClient.from_connection_string', 'ShareClient.from_connection_string', (['connection_string', 'share_name'], {}), '(connection_string, share_name)\n', (5196, 5227), False, 'from azure.storage.fileshare import ShareServiceClient, ShareClient, ShareDirectoryClient, ShareFileClient\n'), ((5903, 5963), 'azure.storage.fileshare.ShareServiceClient.from_connection_string', 'ShareServiceClient.from_connection_string', (['connection_string'], {}), '(connection_string)\n', (5944, 5963), False, 'from azure.storage.fileshare import ShareServiceClient, ShareClient, ShareDirectoryClient, ShareFileClient\n'), ((6677, 6737), 'azure.storage.fileshare.ShareServiceClient.from_connection_string', 'ShareServiceClient.from_connection_string', (['connection_string'], {}), '(connection_string)\n', (6718, 6737), False, 'from azure.storage.fileshare import ShareServiceClient, ShareClient, ShareDirectoryClient, ShareFileClient\n'), ((7309, 7423), 'azure.storage.fileshare.ShareClient.from_connection_string', 'ShareClient.from_connection_string', ([], {'conn_str': 'connection_string', 'share_name': 'share_name', 'snapshot': 'snapshot_time'}), '(conn_str=connection_string, share_name=\n share_name, snapshot=snapshot_time)\n', (7343, 7423), False, 'from azure.storage.fileshare import ShareServiceClient, ShareClient, ShareDirectoryClient, ShareFileClient\n'), ((8388, 8533), 'azure.storage.fileshare.ShareFileClient.from_connection_string', 'ShareFileClient.from_connection_string', ([], {'conn_str': 'connection_string', 'share_name': 'share_name', 'file_path': 'source_file_path', 'snapshot': 'snapshot_time'}), '(conn_str=connection_string,\n share_name=share_name, file_path=source_file_path, snapshot=snapshot_time)\n', (8426, 8533), False, 'from azure.storage.fileshare import ShareServiceClient, ShareClient, ShareDirectoryClient, ShareFileClient\n'), ((9298, 9412), 'azure.storage.fileshare.ShareClient.from_connection_string', 'ShareClient.from_connection_string', ([], {'conn_str': 'connection_string', 'share_name': 'share_name', 'snapshot': 'snapshot_time'}), '(conn_str=connection_string, share_name=\n share_name, snapshot=snapshot_time)\n', (9332, 9412), False, 'from azure.storage.fileshare import ShareServiceClient, ShareClient, ShareDirectoryClient, ShareFileClient\n'), ((9881, 9966), 'azure.storage.fileshare.ShareFileClient.from_connection_string', 'ShareFileClient.from_connection_string', (['connection_string', 'share_name', 'file_path'], {}), '(connection_string, share_name, file_path\n )\n', (9919, 9966), False, 'from azure.storage.fileshare import ShareServiceClient, ShareClient, ShareDirectoryClient, ShareFileClient\n'), ((10432, 10497), 'azure.storage.fileshare.ShareClient.from_connection_string', 'ShareClient.from_connection_string', (['connection_string', 'share_name'], {}), '(connection_string, share_name)\n', (10466, 10497), False, 'from azure.storage.fileshare import ShareServiceClient, ShareClient, ShareDirectoryClient, ShareFileClient\n')]
|
import numpy as np
from finitewave.core.fibrosis import FibrosisPattern
class ScarGauss2DPattern(FibrosisPattern):
def __init__(self, mean, std, corr, size):
self.mean = mean
self.std = std
self.corr = corr
self.size = size
def generate(self, size, mesh=None):
if mesh is None:
mesh = np.zeros(size)
covs = [[self.std[0]**2, self.std[0]*self.std[1]*self.corr],
[self.std[0]*self.std[1]*self.corr, self.std[1]**2]]
nrm = np.random.multivariate_normal(self.mean, self.covs, self.size).T
mesh[nrm[0].astype(int), nrm[1].astype(int)] = 2
return mesh
|
[
"numpy.zeros",
"numpy.random.multivariate_normal"
] |
[((349, 363), 'numpy.zeros', 'np.zeros', (['size'], {}), '(size)\n', (357, 363), True, 'import numpy as np\n'), ((517, 579), 'numpy.random.multivariate_normal', 'np.random.multivariate_normal', (['self.mean', 'self.covs', 'self.size'], {}), '(self.mean, self.covs, self.size)\n', (546, 579), True, 'import numpy as np\n')]
|
"""
@Time: 2020/8/17 18:08
@Author: Zhirui(<NAME>
@E-mail: <EMAIL>
@Program:
"""
import os
import random
import pandas as pd
import numpy as np
from sklearn.impute import SimpleImputer
import tensorflow as tf
from tensorflow.keras import layers, optimizers
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import plot_model
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from utils.logger import logger
from data_process import get_apptointment_info, get_treat_info
from wtp.duration.predict_lgb_model import NUM_FEATURES, CATE_FEATURES
from wtp.duration.config_duration import DT_MODEL_DIR
def one_hot_encoding(processed_data):
cate_onehot_data = pd.DataFrame({})
update_cate_features = []
for feature in CATE_FEATURES:
tmp = pd.get_dummies(processed_data[[feature]], prefix=f"{feature}_")
update_cate_features.extend(tmp.columns)
cate_onehot_data = pd.concat([cate_onehot_data, tmp], axis=1)
cate_onehot_data['AppointmentSerNum'] = processed_data['AppointmentSerNum']
cate_onehot_data = cate_onehot_data.groupby(by='AppointmentSerNum').sum()
return cate_onehot_data, update_cate_features
def fill_num(processed_data):
imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
imp_mean.fit(processed_data[NUM_FEATURES])
processed_data.loc[:, NUM_FEATURES] = imp_mean.transform(processed_data[NUM_FEATURES])
return processed_data
def split_feature_label(all_data, update_cate_features):
patients_lst = []
train_samples_lst = []
label_samples_lst = []
for pat, sample in all_data.groupby('PatientSerNum'):
sample = sample[NUM_FEATURES + update_cate_features]
label_samples_lst.append(sample.iloc[-1, 1])
sample.iloc[-1, 1] = 0
patients_lst.append(pat)
train_samples_lst.append(sample.values)
return patients_lst, train_samples_lst, label_samples_lst
def process_sequence_data(processed_data):
logger.debug(f'Fill zero in nan!')
processed_data = fill_num(processed_data)
logger.debug(f'Process numerical features!')
num_features_single_value = ['age', 'Scheduled_duration', 'Actual_duration']
num_data_single_value = processed_data[num_features_single_value + ['AppointmentSerNum']]
num_data_single_value = num_data_single_value.groupby(by='AppointmentSerNum').mean()
num_data_single_value = num_data_single_value.reset_index(drop=False)
num_features_multiple_value = ['ImagesTaken', 'MU', 'MUCoeff', 'TreatmentTime']
num_data_multiple_value = processed_data[num_features_multiple_value + ['AppointmentSerNum']]
num_data_multiple_value = num_data_multiple_value.groupby(by='AppointmentSerNum').sum()
num_data_multiple_value = num_data_multiple_value.reset_index(drop=False)
num_data = pd.merge(num_data_single_value, num_data_multiple_value, on='AppointmentSerNum', how='inner')
logger.debug(f'Encode categorical features!')
cate_onehot_data, update_cate_features = one_hot_encoding(processed_data)
feature_data = pd.merge(num_data, cate_onehot_data, on='AppointmentSerNum', how='inner')
logger.debug(f'Add appointment information!')
information_features = ['PatientSerNum', 'AppointmentSerNum',
'ScheduledStartTime', 'ScheduledEndTime', 'ActualStartDate', 'ActualEndDate']# FractionNumber
information_data = processed_data[information_features]
information_data = information_data.drop_duplicates().reset_index(drop=True)
all_data = pd.merge(feature_data, information_data, on='AppointmentSerNum', how='inner')
logger.debug(f'Split features and labels!')
all_data = all_data.sort_values(by=['PatientSerNum', 'ScheduledStartTime']).reset_index(drop=True)
patients_lst, train_samples_lst, label_samples_lst = split_feature_label(all_data, update_cate_features)
return patients_lst, train_samples_lst, label_samples_lst
def sequence_model():
model = Sequential()
model.add(
layers.LSTM(128,
batch_input_shape=(None, None, 209),
dropout=0.1,
recurrent_dropout=0.5,
name='input'
)
)
# model_sequence.add(layers.LSTM(
# output_dim = 32,
# ))
# stateful = True 本次batch的参数返回到下一次的训练中
model.add(layers.Dense(32))
model.add(layers.Dense(1))
return model
def generate_data(x_set, y_set, batch_size):
i = 0
while True:
feature_samples = []
label_samples = []
for b in range(batch_size):
if i == len(x_set):
i = 0
random.seed(1)
random.shuffle(x_set)
random.seed(1)
random.shuffle(y_set)
feature_samples.append(x_set[i])
label_samples.append(y_set[i])
i = i + 1
feature_samples = tf.keras.preprocessing.sequence.pad_sequences(np.array(feature_samples), padding="pre")
yield feature_samples, np.array(label_samples)
# yield ({'input': train_samples}, {'output': batch_samples})
def split_train_test(train_samples_lst, label_samples_lst, seed=1):
random.seed(seed)
random.shuffle(train_samples_lst)
random.seed(seed)
random.shuffle(label_samples_lst)
data_length = len(train_samples_lst)
train_set = train_samples_lst[: int(data_length * 0.9)]
label_train_set = label_samples_lst[: int(data_length * 0.9)]
val_set = train_samples_lst[int(data_length * 0.9): int(data_length * 0.95)]
label_val_set = label_samples_lst[int(data_length * 0.9): int(data_length * 0.95)]
test_set = train_samples_lst[int(data_length * 0.95):]
label_test_set = label_samples_lst[int(data_length * 0.95):]
return train_set, label_train_set, val_set, label_val_set, test_set, label_test_set
def train_and_test(train_set, label_train_set, val_set, label_val_set, test_set, label_test_set, seed, model_name):
logger.debug(f'Start training model for {seed}!')
model_sequence = sequence_model()
opt = optimizers.Adam(lr=0.001)
model_sequence.compile(
optimizer=opt,
loss='mae',
metrics=['mean_absolute_percentage_error', 'mae']
)
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
min_delta=1e5,
patience=5,
verbose=1,
restore_best_weights=True)
checkpoint_path_industry = os.path.join(DT_MODEL_DIR, f"{model_name}.h5")
cp_callback_model = tf.keras.callbacks.ModelCheckpoint(checkpoint_path_industry,
monitor='val_loss',
verbose=0,
save_best_only=True,
save_weights_only=False,
mode='min',
period=1)
random.seed(seed)
random.shuffle(train_set)
random.seed(seed)
random.shuffle(label_train_set)
model_sequence.fit_generator(generate_data(train_set, label_train_set, 32),
steps_per_epoch=len(train_set) // 32,
epochs=100,
callbacks=[early_stopping, cp_callback_model],
# batch_size=256,
validation_data=generate_data(val_set, label_val_set, 32),
validation_steps=len(val_set) // 32)
logger.debug(f'Start testing model!')
padding_test_set = tf.keras.preprocessing.sequence.pad_sequences(np.array(test_set), padding="pre")
y_pred = model_sequence.predict(padding_test_set)
residual = np.array(label_test_set) - y_pred.reshape(-1, )
logger.debug(f"MAE: {np.mean(np.abs(residual))}")
logger.debug(f"MAPE: {100. * np.mean(np.abs(residual / np.array(label_test_set)))}")
return y_pred
if __name__ == '__main__':
processed_appointment_data = get_apptointment_info()
processed_treatment_data = get_treat_info()
processed_data = pd.merge(processed_appointment_data, processed_treatment_data,
on=['PatientSerNum', 'date'], how='inner')
_, train_samples_lst, label_samples_lst = process_sequence_data(processed_data)
train_set, label_train_set, val_set, label_val_set, test_set, label_test_set = \
split_train_test(train_samples_lst, label_samples_lst)
pred_y_ensemble = []
for seed in range(10):
pred_y = train_and_test(train_set, label_train_set, val_set, label_val_set, test_set, label_test_set,
seed=seed, model_name=f'sequence_model_{seed}.h5')
pred_y_ensemble.append(pred_y.reshape(-1, ))
|
[
"data_process.get_treat_info",
"numpy.abs",
"tensorflow.keras.layers.Dense",
"random.shuffle",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.models.Sequential",
"os.path.join",
"tensorflow.keras.callbacks.EarlyStopping",
"pandas.DataFrame",
"sklearn.impute.SimpleImputer",
"pandas.merge",
"random.seed",
"tensorflow.keras.optimizers.Adam",
"pandas.concat",
"utils.logger.logger.debug",
"pandas.get_dummies",
"data_process.get_apptointment_info",
"tensorflow.compat.v1.logging.set_verbosity",
"tensorflow.keras.layers.LSTM",
"numpy.array"
] |
[((356, 418), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (390, 418), True, 'import tensorflow as tf\n'), ((703, 719), 'pandas.DataFrame', 'pd.DataFrame', (['{}'], {}), '({})\n', (715, 719), True, 'import pandas as pd\n'), ((1237, 1290), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'missing_values': 'np.nan', 'strategy': '"""mean"""'}), "(missing_values=np.nan, strategy='mean')\n", (1250, 1290), False, 'from sklearn.impute import SimpleImputer\n'), ((1986, 2020), 'utils.logger.logger.debug', 'logger.debug', (['f"""Fill zero in nan!"""'], {}), "(f'Fill zero in nan!')\n", (1998, 2020), False, 'from utils.logger import logger\n'), ((2072, 2116), 'utils.logger.logger.debug', 'logger.debug', (['f"""Process numerical features!"""'], {}), "(f'Process numerical features!')\n", (2084, 2116), False, 'from utils.logger import logger\n'), ((2822, 2920), 'pandas.merge', 'pd.merge', (['num_data_single_value', 'num_data_multiple_value'], {'on': '"""AppointmentSerNum"""', 'how': '"""inner"""'}), "(num_data_single_value, num_data_multiple_value, on=\n 'AppointmentSerNum', how='inner')\n", (2830, 2920), True, 'import pandas as pd\n'), ((2921, 2966), 'utils.logger.logger.debug', 'logger.debug', (['f"""Encode categorical features!"""'], {}), "(f'Encode categorical features!')\n", (2933, 2966), False, 'from utils.logger import logger\n'), ((3064, 3137), 'pandas.merge', 'pd.merge', (['num_data', 'cate_onehot_data'], {'on': '"""AppointmentSerNum"""', 'how': '"""inner"""'}), "(num_data, cate_onehot_data, on='AppointmentSerNum', how='inner')\n", (3072, 3137), True, 'import pandas as pd\n'), ((3143, 3188), 'utils.logger.logger.debug', 'logger.debug', (['f"""Add appointment information!"""'], {}), "(f'Add appointment information!')\n", (3155, 3188), False, 'from utils.logger import logger\n'), ((3533, 3610), 'pandas.merge', 'pd.merge', (['feature_data', 'information_data'], {'on': '"""AppointmentSerNum"""', 'how': '"""inner"""'}), "(feature_data, information_data, on='AppointmentSerNum', how='inner')\n", (3541, 3610), True, 'import pandas as pd\n'), ((3616, 3659), 'utils.logger.logger.debug', 'logger.debug', (['f"""Split features and labels!"""'], {}), "(f'Split features and labels!')\n", (3628, 3659), False, 'from utils.logger import logger\n'), ((3971, 3983), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (3981, 3983), False, 'from tensorflow.keras.models import Sequential\n'), ((5211, 5228), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (5222, 5228), False, 'import random\n'), ((5233, 5266), 'random.shuffle', 'random.shuffle', (['train_samples_lst'], {}), '(train_samples_lst)\n', (5247, 5266), False, 'import random\n'), ((5271, 5288), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (5282, 5288), False, 'import random\n'), ((5293, 5326), 'random.shuffle', 'random.shuffle', (['label_samples_lst'], {}), '(label_samples_lst)\n', (5307, 5326), False, 'import random\n'), ((5997, 6046), 'utils.logger.logger.debug', 'logger.debug', (['f"""Start training model for {seed}!"""'], {}), "(f'Start training model for {seed}!')\n", (6009, 6046), False, 'from utils.logger import logger\n'), ((6095, 6120), 'tensorflow.keras.optimizers.Adam', 'optimizers.Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (6110, 6120), False, 'from tensorflow.keras import layers, optimizers\n'), ((6278, 6404), 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(100000.0)', 'patience': '(5)', 'verbose': '(1)', 'restore_best_weights': '(True)'}), "(monitor='val_loss', min_delta=100000.0,\n patience=5, verbose=1, restore_best_weights=True)\n", (6310, 6404), True, 'import tensorflow as tf\n'), ((6643, 6689), 'os.path.join', 'os.path.join', (['DT_MODEL_DIR', 'f"""{model_name}.h5"""'], {}), "(DT_MODEL_DIR, f'{model_name}.h5')\n", (6655, 6689), False, 'import os\n'), ((6714, 6882), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['checkpoint_path_industry'], {'monitor': '"""val_loss"""', 'verbose': '(0)', 'save_best_only': '(True)', 'save_weights_only': '(False)', 'mode': '"""min"""', 'period': '(1)'}), "(checkpoint_path_industry, monitor=\n 'val_loss', verbose=0, save_best_only=True, save_weights_only=False,\n mode='min', period=1)\n", (6748, 6882), True, 'import tensorflow as tf\n'), ((7233, 7250), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (7244, 7250), False, 'import random\n'), ((7255, 7280), 'random.shuffle', 'random.shuffle', (['train_set'], {}), '(train_set)\n', (7269, 7280), False, 'import random\n'), ((7285, 7302), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (7296, 7302), False, 'import random\n'), ((7307, 7338), 'random.shuffle', 'random.shuffle', (['label_train_set'], {}), '(label_train_set)\n', (7321, 7338), False, 'import random\n'), ((7833, 7870), 'utils.logger.logger.debug', 'logger.debug', (['f"""Start testing model!"""'], {}), "(f'Start testing model!')\n", (7845, 7870), False, 'from utils.logger import logger\n'), ((8315, 8338), 'data_process.get_apptointment_info', 'get_apptointment_info', ([], {}), '()\n', (8336, 8338), False, 'from data_process import get_apptointment_info, get_treat_info\n'), ((8370, 8386), 'data_process.get_treat_info', 'get_treat_info', ([], {}), '()\n', (8384, 8386), False, 'from data_process import get_apptointment_info, get_treat_info\n'), ((8408, 8518), 'pandas.merge', 'pd.merge', (['processed_appointment_data', 'processed_treatment_data'], {'on': "['PatientSerNum', 'date']", 'how': '"""inner"""'}), "(processed_appointment_data, processed_treatment_data, on=[\n 'PatientSerNum', 'date'], how='inner')\n", (8416, 8518), True, 'import pandas as pd\n'), ((798, 861), 'pandas.get_dummies', 'pd.get_dummies', (['processed_data[[feature]]'], {'prefix': 'f"""{feature}_"""'}), "(processed_data[[feature]], prefix=f'{feature}_')\n", (812, 861), True, 'import pandas as pd\n'), ((938, 980), 'pandas.concat', 'pd.concat', (['[cate_onehot_data, tmp]'], {'axis': '(1)'}), '([cate_onehot_data, tmp], axis=1)\n', (947, 980), True, 'import pandas as pd\n'), ((4007, 4114), 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['(128)'], {'batch_input_shape': '(None, None, 209)', 'dropout': '(0.1)', 'recurrent_dropout': '(0.5)', 'name': '"""input"""'}), "(128, batch_input_shape=(None, None, 209), dropout=0.1,\n recurrent_dropout=0.5, name='input')\n", (4018, 4114), False, 'from tensorflow.keras import layers, optimizers\n'), ((4361, 4377), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(32)'], {}), '(32)\n', (4373, 4377), False, 'from tensorflow.keras import layers, optimizers\n'), ((4393, 4408), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {}), '(1)\n', (4405, 4408), False, 'from tensorflow.keras import layers, optimizers\n'), ((7940, 7958), 'numpy.array', 'np.array', (['test_set'], {}), '(test_set)\n', (7948, 7958), True, 'import numpy as np\n'), ((8044, 8068), 'numpy.array', 'np.array', (['label_test_set'], {}), '(label_test_set)\n', (8052, 8068), True, 'import numpy as np\n'), ((4969, 4994), 'numpy.array', 'np.array', (['feature_samples'], {}), '(feature_samples)\n', (4977, 4994), True, 'import numpy as np\n'), ((4662, 4676), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (4673, 4676), False, 'import random\n'), ((4693, 4714), 'random.shuffle', 'random.shuffle', (['x_set'], {}), '(x_set)\n', (4707, 4714), False, 'import random\n'), ((4731, 4745), 'random.seed', 'random.seed', (['(1)'], {}), '(1)\n', (4742, 4745), False, 'import random\n'), ((4762, 4783), 'random.shuffle', 'random.shuffle', (['y_set'], {}), '(y_set)\n', (4776, 4783), False, 'import random\n'), ((5043, 5066), 'numpy.array', 'np.array', (['label_samples'], {}), '(label_samples)\n', (5051, 5066), True, 'import numpy as np\n'), ((8125, 8141), 'numpy.abs', 'np.abs', (['residual'], {}), '(residual)\n', (8131, 8141), True, 'import numpy as np\n'), ((8205, 8229), 'numpy.array', 'np.array', (['label_test_set'], {}), '(label_test_set)\n', (8213, 8229), True, 'import numpy as np\n')]
|
# import new Network name here and add in model_class args
from .Network import MYNET
from utils import *
from tqdm import tqdm
import torch.nn.functional as F
def base_train(model, trainloader, optimizer, scheduler, epoch, args):
tl = Averager()
ta = Averager()
model = model.train()
# standard classification for pretrain
tqdm_gen = tqdm(trainloader)
for i, batch in enumerate(tqdm_gen, 1):
data, train_label = [_.cuda() for _ in batch]
logits = model(data)
logits = logits[:, :args.base_class]
loss = F.cross_entropy(logits, train_label)
acc = count_acc(logits, train_label)
total_loss = loss
lrc = scheduler.get_last_lr()[0]
tqdm_gen.set_description(
'Session 0, epo {}, lrc={:.4f},total loss={:.4f} acc={:.4f}'.format(epoch, lrc, total_loss.item(), acc))
tl.add(total_loss.item())
ta.add(acc)
optimizer.zero_grad()
loss.backward()
optimizer.step()
tl = tl.item()
ta = ta.item()
return tl, ta
def replace_base_fc(trainset, transform, model, args):
# replace fc.weight with the embedding average of train data
model = model.eval()
trainloader = torch.utils.data.DataLoader(dataset=trainset, batch_size=128,
num_workers=8, pin_memory=True, shuffle=False)
trainloader.dataset.transform = transform
embedding_list = []
label_list = []
# data_list=[]
with torch.no_grad():
for i, batch in enumerate(trainloader):
data, label = [_.cuda() for _ in batch]
model.module.mode = 'encoder'
embedding = model(data)
embedding_list.append(embedding.cpu())
label_list.append(label.cpu())
embedding_list = torch.cat(embedding_list, dim=0)
label_list = torch.cat(label_list, dim=0)
proto_list = []
for class_index in range(args.base_class):
data_index = (label_list == class_index).nonzero()
embedding_this = embedding_list[data_index.squeeze(-1)]
embedding_this = embedding_this.mean(0)
proto_list.append(embedding_this)
proto_list = torch.stack(proto_list, dim=0)
model.module.fc.weight.data[:args.base_class] = proto_list
return model
def test(model, testloader, epoch, args, session):
test_class = args.base_class + session * args.way
model = model.eval()
vl = Averager()
va = Averager()
with torch.no_grad():
tqdm_gen = tqdm(testloader)
for i, batch in enumerate(tqdm_gen, 1):
data, test_label = [_.cuda() for _ in batch]
logits = model(data)
logits = logits[:, :test_class]
loss = F.cross_entropy(logits, test_label)
acc = count_acc(logits, test_label)
vl.add(loss.item())
va.add(acc)
vl = vl.item()
va = va.item()
print('epo {}, test, loss={:.4f} acc={:.4f}'.format(epoch, vl, va))
return vl, va
|
[
"tqdm.tqdm",
"torch.nn.functional.cross_entropy"
] |
[((357, 374), 'tqdm.tqdm', 'tqdm', (['trainloader'], {}), '(trainloader)\n', (361, 374), False, 'from tqdm import tqdm\n'), ((563, 599), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'train_label'], {}), '(logits, train_label)\n', (578, 599), True, 'import torch.nn.functional as F\n'), ((2514, 2530), 'tqdm.tqdm', 'tqdm', (['testloader'], {}), '(testloader)\n', (2518, 2530), False, 'from tqdm import tqdm\n'), ((2732, 2767), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['logits', 'test_label'], {}), '(logits, test_label)\n', (2747, 2767), True, 'import torch.nn.functional as F\n')]
|
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib import pyplot
import matplotlib
import seaborn as sns
"""
Visualization functions for processing graphs, hypergraphs and simplicial
complexes. TODOs add options for saving visuals.
"""
def visualize_q_percolation(Q):
"""
Takes a python dictionary of variables (keys) and values
:param Q:
:return:
"""
values = Q
plt.ylabel('Number of Components for Q-Dimension')
plt.title('Q-Value Percolation')
plt.plot(values)
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
def visualize_p_percolation(P):
"""
Takes a python dictionary of variables (keys) and values
:param P:
:return:
"""
values = P
plt.ylabel('Number of Simplicies for Q-Dimension')
plt.title('P-Value Percolation')
plt.plot(values)
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
def visualize_q(Q, simplex_set):
"""
:param Q:
:param simplex_set:
:return:
"""
labels = simplex_set
values = Q
pos = np.arange(len(labels))
plt.bar(pos, values, align='center')
plt.xticks(pos, labels)
plt.ylabel('Dimensions of Simplex')
plt.title('Q Structure Vector')
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
def visualize_q_slice(Q, simplex_set, theta=1):
terms = {}
count = 0
for i in Q:
if i > theta:
terms[simplex_set[count]] = i
else:
pass
count = count + 1
values = []
labels = []
for i in terms.items():
values.append(i[1])
labels.append(i[0])
y_pos = np.arange(len(labels))
plt.bar(y_pos, values, align='center')
plt.xticks(y_pos, labels)
plt.ylabel('Number of Shared Vertices')
plt.title('Top Simplicies')
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
return terms
def visualize_eccentricity(ecc):
"""
Takes a vector of eccentricity values
:param ecc: numpy array or list
:return:
"""
labels = range(len(ecc))
values = ecc
y_pos = np.arange(len(labels))
plt.bar(y_pos, values, align='center')
plt.xticks(y_pos, labels)
plt.ylabel('Eccentricity 0 to 1')
plt.title('Simplex Eccentricity')
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
def visualize_retained_ecc(ecc):
labels = range(len(ecc))
values = ecc
y_pos = np.arange(len(labels))
plt.bar(y_pos, values, align='center')
plt.xticks(y_pos, labels)
plt.ylabel('Eccentricity 0 to 1')
plt.title('Retained Topics By Simplex Eccentricity')
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
return
def visualize_simple_graph(I, IT):
am = (np.dot(I.as_matrix(), IT.as_matrix()) > 0).astype(int)
# np.fill_diagonal(am, 0)
G = nx.from_numpy_matrix(am)
# Draw simplex graph
pos = nx.shell_layout(G)
nx.draw(G, pos)
# show graph
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
def visualize_weighted_graph(edges):
G = nx.Graph()
G.add_weighted_edges_from(edges)
pos = nx.spring_layout(G)
nx.draw(G, pos=pos, with_labels=True)
# show graph
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
def visualize_incidence_graph(edges):
G = nx.Graph()
G.add_weighted_edges_from(edges)
pos = nx.spring_layout(G)
nx.draw(G, pos=pos, with_labels=True)
# show graph
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
def visualize_bipart_graph(bigraph):
l, r = nx.bipartite.sets(bigraph)
pos = {}
# Update position for node from each group
pos.update((node, (1, i)) for i, node in enumerate(l))
pos.update((node, (2, i)) for i, node in enumerate(r))
nx.draw(bigraph, pos=pos, with_labels=True)
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
def visualize_qmatrix(qmatrix):
sns.set()
ax = sns.heatmap(qmatrix)
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
def visualize_conjugate(conj):
sns.set()
ax = sns.heatmap(conj)
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
def visualize_pri_histogram(ranking):
labels = range(len(ranking))
values = ranking
y_pos = np.arange(len(labels))
plt.bar(y_pos, values, align='center')
plt.xticks(y_pos, labels)
plt.ylabel('Preference Ranking Score')
plt.title('Preference Ranking Index')
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
def visualize_pri_line(ranking, theta):
labels = range(len(ranking))
values = ranking
y_pos = np.arange(len(labels))
plt.plot(y_pos, values, "s-")
plt.xticks(y_pos, labels)
plt.ylabel('Preference Ranking Score')
plt.title('Preference Ranking Index')
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
def visualize_psi_histogram(psi, theta):
labels = range(len(psi))
values = psi
y_pos = np.arange(len(labels))
plt.bar(y_pos, values, align='center')
plt.xticks(y_pos, labels)
plt.ylabel('Preference Satisfaction Score')
plt.title('Preference Satisfaction Index')
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
def visualize_psi_line(psi, theta):
labels = range(len(psi))
values = psi
y_pos = np.arange(len(labels))
plt.plot(y_pos, values, "o-")
plt.xticks(y_pos, labels)
plt.ylabel('Preference Satisfaction Score')
plt.title('Preference Satisfaction Index')
fig = matplotlib.pyplot.gcf()
fig.set_size_inches(10, 8)
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"networkx.from_numpy_matrix",
"matplotlib.pyplot.plot",
"seaborn.heatmap",
"matplotlib.pyplot.bar",
"networkx.bipartite.sets",
"networkx.spring_layout",
"networkx.draw",
"matplotlib.pyplot.xticks",
"networkx.Graph",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.gcf",
"seaborn.set",
"networkx.shell_layout"
] |
[((434, 484), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Components for Q-Dimension"""'], {}), "('Number of Components for Q-Dimension')\n", (444, 484), True, 'import matplotlib.pyplot as plt\n'), ((489, 521), 'matplotlib.pyplot.title', 'plt.title', (['"""Q-Value Percolation"""'], {}), "('Q-Value Percolation')\n", (498, 521), True, 'import matplotlib.pyplot as plt\n'), ((526, 542), 'matplotlib.pyplot.plot', 'plt.plot', (['values'], {}), '(values)\n', (534, 542), True, 'import matplotlib.pyplot as plt\n'), ((553, 576), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (574, 576), False, 'import matplotlib\n'), ((612, 622), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (620, 622), True, 'import matplotlib.pyplot as plt\n'), ((780, 830), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Simplicies for Q-Dimension"""'], {}), "('Number of Simplicies for Q-Dimension')\n", (790, 830), True, 'import matplotlib.pyplot as plt\n'), ((835, 867), 'matplotlib.pyplot.title', 'plt.title', (['"""P-Value Percolation"""'], {}), "('P-Value Percolation')\n", (844, 867), True, 'import matplotlib.pyplot as plt\n'), ((872, 888), 'matplotlib.pyplot.plot', 'plt.plot', (['values'], {}), '(values)\n', (880, 888), True, 'import matplotlib.pyplot as plt\n'), ((899, 922), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (920, 922), False, 'import matplotlib\n'), ((958, 968), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (966, 968), True, 'import matplotlib.pyplot as plt\n'), ((1148, 1184), 'matplotlib.pyplot.bar', 'plt.bar', (['pos', 'values'], {'align': '"""center"""'}), "(pos, values, align='center')\n", (1155, 1184), True, 'import matplotlib.pyplot as plt\n'), ((1189, 1212), 'matplotlib.pyplot.xticks', 'plt.xticks', (['pos', 'labels'], {}), '(pos, labels)\n', (1199, 1212), True, 'import matplotlib.pyplot as plt\n'), ((1217, 1252), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Dimensions of Simplex"""'], {}), "('Dimensions of Simplex')\n", (1227, 1252), True, 'import matplotlib.pyplot as plt\n'), ((1257, 1288), 'matplotlib.pyplot.title', 'plt.title', (['"""Q Structure Vector"""'], {}), "('Q Structure Vector')\n", (1266, 1288), True, 'import matplotlib.pyplot as plt\n'), ((1299, 1322), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (1320, 1322), False, 'import matplotlib\n'), ((1358, 1368), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1366, 1368), True, 'import matplotlib.pyplot as plt\n'), ((1740, 1778), 'matplotlib.pyplot.bar', 'plt.bar', (['y_pos', 'values'], {'align': '"""center"""'}), "(y_pos, values, align='center')\n", (1747, 1778), True, 'import matplotlib.pyplot as plt\n'), ((1783, 1808), 'matplotlib.pyplot.xticks', 'plt.xticks', (['y_pos', 'labels'], {}), '(y_pos, labels)\n', (1793, 1808), True, 'import matplotlib.pyplot as plt\n'), ((1813, 1852), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of Shared Vertices"""'], {}), "('Number of Shared Vertices')\n", (1823, 1852), True, 'import matplotlib.pyplot as plt\n'), ((1857, 1884), 'matplotlib.pyplot.title', 'plt.title', (['"""Top Simplicies"""'], {}), "('Top Simplicies')\n", (1866, 1884), True, 'import matplotlib.pyplot as plt\n'), ((1895, 1918), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (1916, 1918), False, 'import matplotlib\n'), ((1954, 1964), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1962, 1964), True, 'import matplotlib.pyplot as plt\n'), ((2209, 2247), 'matplotlib.pyplot.bar', 'plt.bar', (['y_pos', 'values'], {'align': '"""center"""'}), "(y_pos, values, align='center')\n", (2216, 2247), True, 'import matplotlib.pyplot as plt\n'), ((2252, 2277), 'matplotlib.pyplot.xticks', 'plt.xticks', (['y_pos', 'labels'], {}), '(y_pos, labels)\n', (2262, 2277), True, 'import matplotlib.pyplot as plt\n'), ((2282, 2315), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Eccentricity 0 to 1"""'], {}), "('Eccentricity 0 to 1')\n", (2292, 2315), True, 'import matplotlib.pyplot as plt\n'), ((2320, 2353), 'matplotlib.pyplot.title', 'plt.title', (['"""Simplex Eccentricity"""'], {}), "('Simplex Eccentricity')\n", (2329, 2353), True, 'import matplotlib.pyplot as plt\n'), ((2364, 2387), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (2385, 2387), False, 'import matplotlib\n'), ((2423, 2433), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2431, 2433), True, 'import matplotlib.pyplot as plt\n'), ((2554, 2592), 'matplotlib.pyplot.bar', 'plt.bar', (['y_pos', 'values'], {'align': '"""center"""'}), "(y_pos, values, align='center')\n", (2561, 2592), True, 'import matplotlib.pyplot as plt\n'), ((2597, 2622), 'matplotlib.pyplot.xticks', 'plt.xticks', (['y_pos', 'labels'], {}), '(y_pos, labels)\n', (2607, 2622), True, 'import matplotlib.pyplot as plt\n'), ((2627, 2660), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Eccentricity 0 to 1"""'], {}), "('Eccentricity 0 to 1')\n", (2637, 2660), True, 'import matplotlib.pyplot as plt\n'), ((2665, 2717), 'matplotlib.pyplot.title', 'plt.title', (['"""Retained Topics By Simplex Eccentricity"""'], {}), "('Retained Topics By Simplex Eccentricity')\n", (2674, 2717), True, 'import matplotlib.pyplot as plt\n'), ((2728, 2751), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (2749, 2751), False, 'import matplotlib\n'), ((2787, 2797), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2795, 2797), True, 'import matplotlib.pyplot as plt\n'), ((2949, 2973), 'networkx.from_numpy_matrix', 'nx.from_numpy_matrix', (['am'], {}), '(am)\n', (2969, 2973), True, 'import networkx as nx\n'), ((3009, 3027), 'networkx.shell_layout', 'nx.shell_layout', (['G'], {}), '(G)\n', (3024, 3027), True, 'import networkx as nx\n'), ((3032, 3047), 'networkx.draw', 'nx.draw', (['G', 'pos'], {}), '(G, pos)\n', (3039, 3047), True, 'import networkx as nx\n'), ((3075, 3098), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (3096, 3098), False, 'import matplotlib\n'), ((3134, 3144), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3142, 3144), True, 'import matplotlib.pyplot as plt\n'), ((3192, 3202), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (3200, 3202), True, 'import networkx as nx\n'), ((3250, 3269), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (3266, 3269), True, 'import networkx as nx\n'), ((3274, 3311), 'networkx.draw', 'nx.draw', (['G'], {'pos': 'pos', 'with_labels': '(True)'}), '(G, pos=pos, with_labels=True)\n', (3281, 3311), True, 'import networkx as nx\n'), ((3339, 3362), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (3360, 3362), False, 'import matplotlib\n'), ((3398, 3408), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3406, 3408), True, 'import matplotlib.pyplot as plt\n'), ((3457, 3467), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (3465, 3467), True, 'import networkx as nx\n'), ((3515, 3534), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (3531, 3534), True, 'import networkx as nx\n'), ((3539, 3576), 'networkx.draw', 'nx.draw', (['G'], {'pos': 'pos', 'with_labels': '(True)'}), '(G, pos=pos, with_labels=True)\n', (3546, 3576), True, 'import networkx as nx\n'), ((3604, 3627), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (3625, 3627), False, 'import matplotlib\n'), ((3663, 3673), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3671, 3673), True, 'import matplotlib.pyplot as plt\n'), ((3724, 3750), 'networkx.bipartite.sets', 'nx.bipartite.sets', (['bigraph'], {}), '(bigraph)\n', (3741, 3750), True, 'import networkx as nx\n'), ((3933, 3976), 'networkx.draw', 'nx.draw', (['bigraph'], {'pos': 'pos', 'with_labels': '(True)'}), '(bigraph, pos=pos, with_labels=True)\n', (3940, 3976), True, 'import networkx as nx\n'), ((3987, 4010), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (4008, 4010), False, 'import matplotlib\n'), ((4046, 4056), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4054, 4056), True, 'import matplotlib.pyplot as plt\n'), ((4095, 4104), 'seaborn.set', 'sns.set', ([], {}), '()\n', (4102, 4104), True, 'import seaborn as sns\n'), ((4114, 4134), 'seaborn.heatmap', 'sns.heatmap', (['qmatrix'], {}), '(qmatrix)\n', (4125, 4134), True, 'import seaborn as sns\n'), ((4145, 4168), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (4166, 4168), False, 'import matplotlib\n'), ((4204, 4214), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4212, 4214), True, 'import matplotlib.pyplot as plt\n'), ((4252, 4261), 'seaborn.set', 'sns.set', ([], {}), '()\n', (4259, 4261), True, 'import seaborn as sns\n'), ((4271, 4288), 'seaborn.heatmap', 'sns.heatmap', (['conj'], {}), '(conj)\n', (4282, 4288), True, 'import seaborn as sns\n'), ((4299, 4322), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (4320, 4322), False, 'import matplotlib\n'), ((4358, 4368), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4366, 4368), True, 'import matplotlib.pyplot as plt\n'), ((4502, 4540), 'matplotlib.pyplot.bar', 'plt.bar', (['y_pos', 'values'], {'align': '"""center"""'}), "(y_pos, values, align='center')\n", (4509, 4540), True, 'import matplotlib.pyplot as plt\n'), ((4545, 4570), 'matplotlib.pyplot.xticks', 'plt.xticks', (['y_pos', 'labels'], {}), '(y_pos, labels)\n', (4555, 4570), True, 'import matplotlib.pyplot as plt\n'), ((4575, 4613), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Preference Ranking Score"""'], {}), "('Preference Ranking Score')\n", (4585, 4613), True, 'import matplotlib.pyplot as plt\n'), ((4618, 4655), 'matplotlib.pyplot.title', 'plt.title', (['"""Preference Ranking Index"""'], {}), "('Preference Ranking Index')\n", (4627, 4655), True, 'import matplotlib.pyplot as plt\n'), ((4666, 4689), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (4687, 4689), False, 'import matplotlib\n'), ((4725, 4735), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4733, 4735), True, 'import matplotlib.pyplot as plt\n'), ((4871, 4900), 'matplotlib.pyplot.plot', 'plt.plot', (['y_pos', 'values', '"""s-"""'], {}), "(y_pos, values, 's-')\n", (4879, 4900), True, 'import matplotlib.pyplot as plt\n'), ((4905, 4930), 'matplotlib.pyplot.xticks', 'plt.xticks', (['y_pos', 'labels'], {}), '(y_pos, labels)\n', (4915, 4930), True, 'import matplotlib.pyplot as plt\n'), ((4935, 4973), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Preference Ranking Score"""'], {}), "('Preference Ranking Score')\n", (4945, 4973), True, 'import matplotlib.pyplot as plt\n'), ((4978, 5015), 'matplotlib.pyplot.title', 'plt.title', (['"""Preference Ranking Index"""'], {}), "('Preference Ranking Index')\n", (4987, 5015), True, 'import matplotlib.pyplot as plt\n'), ((5026, 5049), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (5047, 5049), False, 'import matplotlib\n'), ((5085, 5095), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5093, 5095), True, 'import matplotlib.pyplot as plt\n'), ((5224, 5262), 'matplotlib.pyplot.bar', 'plt.bar', (['y_pos', 'values'], {'align': '"""center"""'}), "(y_pos, values, align='center')\n", (5231, 5262), True, 'import matplotlib.pyplot as plt\n'), ((5267, 5292), 'matplotlib.pyplot.xticks', 'plt.xticks', (['y_pos', 'labels'], {}), '(y_pos, labels)\n', (5277, 5292), True, 'import matplotlib.pyplot as plt\n'), ((5297, 5340), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Preference Satisfaction Score"""'], {}), "('Preference Satisfaction Score')\n", (5307, 5340), True, 'import matplotlib.pyplot as plt\n'), ((5345, 5387), 'matplotlib.pyplot.title', 'plt.title', (['"""Preference Satisfaction Index"""'], {}), "('Preference Satisfaction Index')\n", (5354, 5387), True, 'import matplotlib.pyplot as plt\n'), ((5398, 5421), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (5419, 5421), False, 'import matplotlib\n'), ((5457, 5467), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5465, 5467), True, 'import matplotlib.pyplot as plt\n'), ((5591, 5620), 'matplotlib.pyplot.plot', 'plt.plot', (['y_pos', 'values', '"""o-"""'], {}), "(y_pos, values, 'o-')\n", (5599, 5620), True, 'import matplotlib.pyplot as plt\n'), ((5625, 5650), 'matplotlib.pyplot.xticks', 'plt.xticks', (['y_pos', 'labels'], {}), '(y_pos, labels)\n', (5635, 5650), True, 'import matplotlib.pyplot as plt\n'), ((5655, 5698), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Preference Satisfaction Score"""'], {}), "('Preference Satisfaction Score')\n", (5665, 5698), True, 'import matplotlib.pyplot as plt\n'), ((5703, 5745), 'matplotlib.pyplot.title', 'plt.title', (['"""Preference Satisfaction Index"""'], {}), "('Preference Satisfaction Index')\n", (5712, 5745), True, 'import matplotlib.pyplot as plt\n'), ((5756, 5779), 'matplotlib.pyplot.gcf', 'matplotlib.pyplot.gcf', ([], {}), '()\n', (5777, 5779), False, 'import matplotlib\n'), ((5815, 5825), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5823, 5825), True, 'import matplotlib.pyplot as plt\n')]
|
import logging
logging.basicConfig(
format='%(asctime)s [CTFd-Whale | %(levelname)s] %(message)s',
level=logging.DEBUG,
datefmt='%H:%M:%S'
)
|
[
"logging.basicConfig"
] |
[((16, 149), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s [CTFd-Whale | %(levelname)s] %(message)s"""', 'level': 'logging.DEBUG', 'datefmt': '"""%H:%M:%S"""'}), "(format=\n '%(asctime)s [CTFd-Whale | %(levelname)s] %(message)s', level=logging.\n DEBUG, datefmt='%H:%M:%S')\n", (35, 149), False, 'import logging\n')]
|
# MIT License
#
# Copyright (c) 2018 <NAME>, <EMAIL>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import pytest
from dateutil.parser import parse
from web3 import HTTPProvider, Web3
from ethereumetl.service.eth_service import EthService
from ethereumetl.service.graph_operations import OutOfBoundsError
run_slow_tests = os.environ.get('ETHEREUM_ETL_RUN_SLOW_TESTS', None) == '1'
skip_slow_tests = pytest.mark.skipif(not run_slow_tests, reason='Slow running tests')
@skip_slow_tests
@pytest.mark.parametrize("date,expected_start_block,expected_end_block", [
('2015-07-30', 0, 6911),
('2015-07-31', 6912, 13774),
('2017-01-01', 2912407, 2918517),
('2017-01-02', 2918518, 2924575),
('2018-06-10', 5761663, 5767303)
])
def test_get_block_range_for_date(date, expected_start_block, expected_end_block):
eth_service = get_new_eth_service()
parsed_date = parse(date)
blocks = eth_service.get_block_range_for_date(parsed_date)
assert blocks == (expected_start_block, expected_end_block)
@skip_slow_tests
@pytest.mark.parametrize("date", [
'2015-07-29',
'2030-01-01'
])
def test_get_block_range_for_date_fail(date):
eth_service = get_new_eth_service()
parsed_date = parse(date)
with pytest.raises(OutOfBoundsError):
eth_service.get_block_range_for_date(parsed_date)
@skip_slow_tests
@pytest.mark.parametrize("start_timestamp,end_timestamp,expected_start_block,expected_end_block", [
(1438270128, 1438270128, 10, 10),
(1438270128, 1438270129, 10, 10)
])
def test_get_block_range_for_timestamps(start_timestamp, end_timestamp, expected_start_block, expected_end_block):
eth_service = get_new_eth_service()
blocks = eth_service.get_block_range_for_timestamps(start_timestamp, end_timestamp)
assert blocks == (expected_start_block, expected_end_block)
@skip_slow_tests
@pytest.mark.parametrize("start_timestamp,end_timestamp", [
(1438270129, 1438270131)
])
def test_get_block_range_for_timestamps_fail(start_timestamp, end_timestamp):
eth_service = get_new_eth_service()
with pytest.raises(ValueError):
eth_service.get_block_range_for_timestamps(start_timestamp, end_timestamp)
def get_new_eth_service():
web3 = Web3(HTTPProvider('https://mainnet.infura.io/'))
return EthService(web3)
|
[
"dateutil.parser.parse",
"ethereumetl.service.eth_service.EthService",
"os.environ.get",
"web3.HTTPProvider",
"pytest.raises",
"pytest.mark.skipif",
"pytest.mark.parametrize"
] |
[((1424, 1491), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not run_slow_tests)'], {'reason': '"""Slow running tests"""'}), "(not run_slow_tests, reason='Slow running tests')\n", (1442, 1491), False, 'import pytest\n'), ((1512, 1756), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""date,expected_start_block,expected_end_block"""', "[('2015-07-30', 0, 6911), ('2015-07-31', 6912, 13774), ('2017-01-01', \n 2912407, 2918517), ('2017-01-02', 2918518, 2924575), ('2018-06-10', \n 5761663, 5767303)]"], {}), "('date,expected_start_block,expected_end_block', [(\n '2015-07-30', 0, 6911), ('2015-07-31', 6912, 13774), ('2017-01-01', \n 2912407, 2918517), ('2017-01-02', 2918518, 2924575), ('2018-06-10', \n 5761663, 5767303)])\n", (1535, 1756), False, 'import pytest\n'), ((2064, 2125), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""date"""', "['2015-07-29', '2030-01-01']"], {}), "('date', ['2015-07-29', '2030-01-01'])\n", (2087, 2125), False, 'import pytest\n'), ((2372, 2547), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""start_timestamp,end_timestamp,expected_start_block,expected_end_block"""', '[(1438270128, 1438270128, 10, 10), (1438270128, 1438270129, 10, 10)]'], {}), "(\n 'start_timestamp,end_timestamp,expected_start_block,expected_end_block',\n [(1438270128, 1438270128, 10, 10), (1438270128, 1438270129, 10, 10)])\n", (2395, 2547), False, 'import pytest\n'), ((2876, 2965), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""start_timestamp,end_timestamp"""', '[(1438270129, 1438270131)]'], {}), "('start_timestamp,end_timestamp', [(1438270129, \n 1438270131)])\n", (2899, 2965), False, 'import pytest\n'), ((1347, 1398), 'os.environ.get', 'os.environ.get', (['"""ETHEREUM_ETL_RUN_SLOW_TESTS"""', 'None'], {}), "('ETHEREUM_ETL_RUN_SLOW_TESTS', None)\n", (1361, 1398), False, 'import os\n'), ((1905, 1916), 'dateutil.parser.parse', 'parse', (['date'], {}), '(date)\n', (1910, 1916), False, 'from dateutil.parser import parse\n'), ((2240, 2251), 'dateutil.parser.parse', 'parse', (['date'], {}), '(date)\n', (2245, 2251), False, 'from dateutil.parser import parse\n'), ((3304, 3320), 'ethereumetl.service.eth_service.EthService', 'EthService', (['web3'], {}), '(web3)\n', (3314, 3320), False, 'from ethereumetl.service.eth_service import EthService\n'), ((2261, 2292), 'pytest.raises', 'pytest.raises', (['OutOfBoundsError'], {}), '(OutOfBoundsError)\n', (2274, 2292), False, 'import pytest\n'), ((3094, 3119), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3107, 3119), False, 'import pytest\n'), ((3249, 3291), 'web3.HTTPProvider', 'HTTPProvider', (['"""https://mainnet.infura.io/"""'], {}), "('https://mainnet.infura.io/')\n", (3261, 3291), False, 'from web3 import HTTPProvider, Web3\n')]
|
# Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
import sys
import mock
import qisys.interact
from qisys.test.fake_interact import FakeInteract
def test_ask_yes_no():
""" Test that you can answer with several types of common answers """
with mock.patch('__builtin__.raw_input') as m:
m.side_effect = ["y", "yes", "Yes", "n", "no", "No"]
expected_res = [True, True, True, False, False, False]
for res in expected_res:
actual = qisys.interact.ask_yes_no("coffee?")
assert actual == res
def test_ask_yes_no_default():
""" Test that just pressing enter returns the default value """
with mock.patch('__builtin__.raw_input') as m:
m.side_effect = ["", ""]
assert qisys.interact.ask_yes_no("coffee?", default=True) is True
assert qisys.interact.ask_yes_no("coffee?", default=False) is False
def test_ask_yes_no_wrong_input():
""" Test that we keep asking when answer does not make sense """
with mock.patch('__builtin__.raw_input') as m:
m.side_effect = ["coffee!", "n"]
assert qisys.interact.ask_yes_no("tea?") is False
assert m.call_count == 2
def test_ask_string():
with mock.patch('__builtin__.raw_input') as m:
m.side_effect = ["sugar!", ""]
res = qisys.interact.ask_string("coffee with what?")
assert res == "sugar!"
res = qisys.interact.ask_string("coffee with what?", default="milk")
assert res == "milk"
def test_ask_program(record_messages):
with mock.patch('__builtin__.raw_input') as m:
m.side_effect = ["doesnotexists", "y", __file__, "y", sys.executable]
res = qisys.interact.ask_program("path to program")
assert res == qisys.sh.to_native_path(sys.executable)
assert record_messages.find("does not exist")
assert record_messages.find("is not a valid executable")
def test_get_editor_visual(monkeypatch):
monkeypatch.setenv("VISUAL", "/usr/bin/vim")
assert qisys.interact.get_editor() == "/usr/bin/vim"
def test_get_editor_editor(monkeypatch):
monkeypatch.delenv("VISUAL", raising=False)
monkeypatch.setenv("EDITOR", "/usr/bin/vim")
assert qisys.interact.get_editor() == "/usr/bin/vim"
def test_get_editor_ask(monkeypatch):
monkeypatch.delenv("VISUAL", raising=False)
monkeypatch.delenv("EDITOR", raising=False)
with mock.patch('__builtin__.raw_input') as m:
m.side_effect = ["/usr/bin/vim"]
res = qisys.interact.get_editor()
assert res == qisys.sh.to_native_path("/usr/bin/vim")
assert m.called
def test_ask_app(tmpdir):
foo_app_path = tmpdir.ensure("Applications/foo.app", dir=True)
with mock.patch('__builtin__.raw_input') as m:
m.side_effect = ["doesnotexists", "y", foo_app_path.strpath]
assert qisys.interact.ask_app("foo") == foo_app_path.strpath
assert len(m.mock_calls) == 3
def test_fake_interact_list():
fake_interact = FakeInteract()
fake_interact.answers = [False, "coffee!"]
with mock.patch('qisys.interact', fake_interact):
assert qisys.interact.ask_yes_no("tea?") is False
assert qisys.interact.ask_string("then what?") == "coffee!"
def test_fake_interact_dict():
fake_interact = FakeInteract()
fake_interact.answers = {"coffee": "y", "tea": "n"}
with mock.patch('qisys.interact', fake_interact):
assert qisys.interact.ask_yes_no("Do you like tea?") == "n"
assert qisys.interact.ask_yes_no("Do you like coffee?") == "y"
def test_questions_are_recorded():
fake_interact = FakeInteract()
fake_interact.answers = {"coffee": "y", "tea": "n"}
with mock.patch('qisys.interact', fake_interact):
assert qisys.interact.ask_yes_no("Do you like tea?") == "n"
assert fake_interact.questions[0]['message'] == "Do you like tea?"
assert fake_interact.questions[0]['default'] is False
|
[
"qisys.test.fake_interact.FakeInteract",
"mock.patch"
] |
[((3088, 3102), 'qisys.test.fake_interact.FakeInteract', 'FakeInteract', ([], {}), '()\n', (3100, 3102), False, 'from qisys.test.fake_interact import FakeInteract\n'), ((3383, 3397), 'qisys.test.fake_interact.FakeInteract', 'FakeInteract', ([], {}), '()\n', (3395, 3397), False, 'from qisys.test.fake_interact import FakeInteract\n'), ((3704, 3718), 'qisys.test.fake_interact.FakeInteract', 'FakeInteract', ([], {}), '()\n', (3716, 3718), False, 'from qisys.test.fake_interact import FakeInteract\n'), ((373, 408), 'mock.patch', 'mock.patch', (['"""__builtin__.raw_input"""'], {}), "('__builtin__.raw_input')\n", (383, 408), False, 'import mock\n'), ((773, 808), 'mock.patch', 'mock.patch', (['"""__builtin__.raw_input"""'], {}), "('__builtin__.raw_input')\n", (783, 808), False, 'import mock\n'), ((1113, 1148), 'mock.patch', 'mock.patch', (['"""__builtin__.raw_input"""'], {}), "('__builtin__.raw_input')\n", (1123, 1148), False, 'import mock\n'), ((1321, 1356), 'mock.patch', 'mock.patch', (['"""__builtin__.raw_input"""'], {}), "('__builtin__.raw_input')\n", (1331, 1356), False, 'import mock\n'), ((1650, 1685), 'mock.patch', 'mock.patch', (['"""__builtin__.raw_input"""'], {}), "('__builtin__.raw_input')\n", (1660, 1685), False, 'import mock\n'), ((2502, 2537), 'mock.patch', 'mock.patch', (['"""__builtin__.raw_input"""'], {}), "('__builtin__.raw_input')\n", (2512, 2537), False, 'import mock\n'), ((2817, 2852), 'mock.patch', 'mock.patch', (['"""__builtin__.raw_input"""'], {}), "('__builtin__.raw_input')\n", (2827, 2852), False, 'import mock\n'), ((3159, 3202), 'mock.patch', 'mock.patch', (['"""qisys.interact"""', 'fake_interact'], {}), "('qisys.interact', fake_interact)\n", (3169, 3202), False, 'import mock\n'), ((3463, 3506), 'mock.patch', 'mock.patch', (['"""qisys.interact"""', 'fake_interact'], {}), "('qisys.interact', fake_interact)\n", (3473, 3506), False, 'import mock\n'), ((3784, 3827), 'mock.patch', 'mock.patch', (['"""qisys.interact"""', 'fake_interact'], {}), "('qisys.interact', fake_interact)\n", (3794, 3827), False, 'import mock\n')]
|
import os
import unittest
from flask import current_app
from flask_testing import TestCase
from service import create_app
app = create_app()
class TestProductionConfig(TestCase):
''' Tests for production config. '''
def create_app(self):
''' Create an app instance. '''
app.config.from_object('service.config.ProductionConfig')
return app
def test_app_is_production(self):
''' Test correctness of production config attributes. '''
self.assertTrue(app.config['SECRET_KEY'] == os.environ.get('SECRET_KEY'))
self.assertFalse(app.config['DEBUG'])
self.assertFalse(app.config['TESTING'])
self.assertTrue(app.config['BCRYPT_LOG_ROUNDS'] == 13)
self.assertTrue(app.config['TOKEN_EXPIRATION_DAYS'] == 30)
self.assertTrue(app.config['TOKEN_EXPIRATION_SECONDS'] == 0)
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"os.environ.get",
"service.create_app"
] |
[((137, 149), 'service.create_app', 'create_app', ([], {}), '()\n', (147, 149), False, 'from service import create_app\n'), ((922, 937), 'unittest.main', 'unittest.main', ([], {}), '()\n', (935, 937), False, 'import unittest\n'), ((557, 585), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (571, 585), False, 'import os\n')]
|
# Copyright 2013 Google Inc. All Rights Reserved.
"""config command group."""
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exc
from googlecloudsdk.core import config
from googlecloudsdk.core import properties
class Config(base.Group):
"""View and edit Google Cloud SDK properties."""
@staticmethod
def PropertiesCompleter(prefix, parsed_args, **kwargs):
all_sections = properties.VALUES.AllSections()
options = []
if '/' in prefix:
# Section has been specified, only return properties under that section.
parts = prefix.split('/', 1)
section = parts[0]
prefix = parts[1]
if section in all_sections:
section_str = section + '/'
props = properties.VALUES.Section(section).AllProperties()
options.extend([section_str + p for p in props if p.startswith(prefix)])
else:
# No section. Return matching sections and properties in the default
# group.
options.extend([s + '/' for s in all_sections if s.startswith(prefix)])
section = properties.VALUES.default_section.name
props = properties.VALUES.Section(section).AllProperties()
options.extend([p for p in props if p.startswith(prefix)])
return options
def ParsePropertyString(self, property_string):
"""Parses a string into a section and property name.
Args:
property_string: str, The property string in the format section/property.
Returns:
(str, str), The section and property. Both will be none if the input
string is empty. Property can be None if the string ends with a slash.
"""
if not property_string:
return None, None
if '/' in property_string:
section, prop = tuple(property_string.split('/', 1))
else:
section = None
prop = property_string
section = section or properties.VALUES.default_section.name
prop = prop or None
return section, prop
def PropertyFromString(self, property_string):
"""Gets the property object corresponding the given string.
Args:
property_string: str, The string to parse. It can be in the format
section/property, or just property if the section is the default one.
Returns:
properties.Property, The property.
"""
section, prop = self.ParsePropertyString(property_string)
if not prop:
raise c_exc.InvalidArgumentException(
'property', 'Must be in the form: [SECTION/]PROPERTY')
return properties.VALUES.Section(section).Property(prop)
|
[
"googlecloudsdk.core.properties.VALUES.Section",
"googlecloudsdk.calliope.exceptions.InvalidArgumentException",
"googlecloudsdk.core.properties.VALUES.AllSections"
] |
[((432, 463), 'googlecloudsdk.core.properties.VALUES.AllSections', 'properties.VALUES.AllSections', ([], {}), '()\n', (461, 463), False, 'from googlecloudsdk.core import properties\n'), ((2392, 2481), 'googlecloudsdk.calliope.exceptions.InvalidArgumentException', 'c_exc.InvalidArgumentException', (['"""property"""', '"""Must be in the form: [SECTION/]PROPERTY"""'], {}), "('property',\n 'Must be in the form: [SECTION/]PROPERTY')\n", (2422, 2481), True, 'from googlecloudsdk.calliope import exceptions as c_exc\n'), ((2500, 2534), 'googlecloudsdk.core.properties.VALUES.Section', 'properties.VALUES.Section', (['section'], {}), '(section)\n', (2525, 2534), False, 'from googlecloudsdk.core import properties\n'), ((1133, 1167), 'googlecloudsdk.core.properties.VALUES.Section', 'properties.VALUES.Section', (['section'], {}), '(section)\n', (1158, 1167), False, 'from googlecloudsdk.core import properties\n'), ((753, 787), 'googlecloudsdk.core.properties.VALUES.Section', 'properties.VALUES.Section', (['section'], {}), '(section)\n', (778, 787), False, 'from googlecloudsdk.core import properties\n')]
|
from ctypes import *
import struct
from io import BytesIO
class Properties:
def __init__(self, properties) -> None:
self.lnk = False
self.embedding = False
self.storage = False
self.default_mode = False
self.compressed_no_except = False
self.compressed_except = False
self.set_properties(properties)
pass
def set_properties(self, properties):
type_prop = properties & 0xF
if type_prop == 0:
self.lnk = True
if type_prop & 0x1:
self.embedding = True
if type_prop & 0x2:
self.storage = True
compress_prop = (properties >> 4)
if compress_prop == 0:
self.default_mode = True
if compress_prop & 0x1:
self.compressed_no_except = True
if compress_prop & 0x2:
self.compressed_except = True
class HWPTAG_binData:
def __init__(self, bytez):
stream = BytesIO(bytez)
self.properties = Properties(struct.unpack("<H", stream.read(2))[0])
if self.properties.lnk:
self.abs_path_len = struct.unpack("<H", stream.read(2))[0]
self.abs_lnk_path = stream.read(self.abs_path_len)
self.rel_path_len = struct.unpack("<H", stream.read(2))[0]
self.rel_lnk_path = stream.read(self.rel_lnk_path)
raise Exception("Please input original .hwp file")
self.bin_id = struct.unpack("<H", stream.read(2))[0]
self.bin_name_len = struct.unpack("<H", stream.read(2))[0]
self.extension = stream.read(self.bin_name_len*2).decode("utf16").replace("\x00","")
|
[
"io.BytesIO"
] |
[((964, 978), 'io.BytesIO', 'BytesIO', (['bytez'], {}), '(bytez)\n', (971, 978), False, 'from io import BytesIO\n')]
|
# Copyright 2017 Politecnico di Torino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
"""
Input packets filter plug-in.
@author: <NAME>
"""
from cybertop.plugins import FilterPlugin
class FilterInputPackets(FilterPlugin):
"""
Filters an attack event based on the input packets.
"""
def filter(self, value, attackEvent):
"""
Filters an attack event.
@param value: The optional value for the filter.
@param attackEvent: The attack event to analyze.
@return: True if the event must be accepted, False if the event must be discarded.
"""
inputBytes = attackEvent.fields["inputPackets"]
groups = re.findall("(==|!=|<|<=|>|>=)(\d+)", value)
relationship = groups[0][0]
number = int(groups[0][1])
if relationship == "==" and inputBytes == number:
return True
elif relationship == "!=" and inputBytes != number:
return True
elif relationship == "<" and inputBytes < number:
return True
elif relationship == "<=" and inputBytes <= number:
return True
elif relationship == ">" and inputBytes > number:
return True
elif relationship == ">=" and inputBytes >= number:
return True
else:
return False
|
[
"re.findall"
] |
[((1192, 1236), 're.findall', 're.findall', (['"""(==|!=|<|<=|>|>=)(\\\\d+)"""', 'value'], {}), "('(==|!=|<|<=|>|>=)(\\\\d+)', value)\n", (1202, 1236), False, 'import re\n')]
|
import argparse
import json
import time
import os
import sys
# import threading
from lib.Utils import Utils
from lib.Constants import Constants
from lib.SocketConnection import SocketConnection
utils = Utils()
constants = Constants()
# Argument parser
parser = argparse.ArgumentParser(description='HTTP Request Smuggling vulnerability detection tool')
parser.add_argument("-u", "--url", help="set the target url")
parser.add_argument("-urls", "--urls", help="set list of target urls, i.e (urls.txt)")
parser.add_argument("-t", "--timeout", default=10, help="set socket timeout, default - 10")
parser.add_argument("-m", "--method", default="POST", help="set HTTP Methods, i.e (GET or POST), default - POST")
parser.add_argument("-r", "--retry", default=2, help="set the retry count to re-execute the payload, default - 2")
parser.add_argument("-o", "--output", dest="reports", default="reports", help="Set output folder")
parser.add_argument("-p", "--payloads", dest="payloads", default="payloads.json", help="Payloads file")
args = parser.parse_args()
def hrs_detection(connection, method, permute_type, content_length_key, te_key, te_value, smuggle_type, content_length, payload, timeout):
host = connection.host
port = connection.port
path = connection.path
headers = ''
headers += '{} {} HTTP/1.1{}'.format(method, path, constants.crlf)
headers += 'Host: {}{}'.format(host, constants.crlf)
headers += '{} {}{}'.format(content_length_key,content_length, constants.crlf)
headers += '{}{}{}'.format(te_key, te_value, constants.crlf)
smuggle_body = headers + payload
permute_type = "["+permute_type+"]"
elapsed_time = "-"
try:
connection.connect(timeout)
# Start
start_time = time.time()
connection.send_payload(smuggle_body)
response = connection.receive_data().decode("utf-8")
end_time = time.time()
# End
connection.close_connection()
elapsed_time = str(round((end_time - start_time) % 60, 2))+"s"
test = f"{host}{path}, {permute_type}, {smuggle_type}, {elapsed_time}"
if time.time() - start_time >= args.timeout:
with open(connection.reports, "rw+") as f:
f.write(test)
print(f"{test}, NOK")
except Exception as e:
print(e)
# There is a delay of 1 second after executing each payload
time.sleep(1)
if __name__ == "__main__":
# If the python version less than 3.x then it will exit
if sys.version_info < (3, 0):
print(constants.python_version_error_msg)
sys.exit(1)
# Both (url/urls) options not allowed at the same time
if args.urls and args.url:
print(constants.invalid_url_options)
sys.exit(1)
target_urls = list()
if args.urls:
urls = utils.read_target_list(args.urls)
if constants.file_not_found in urls:
print(f"[{args.urls}] not found in your local directory")
sys.exit(1)
target_urls = urls
if args.url:
target_urls.append(args.url)
method = args.method.upper()
if method != "POST" and method != "GET":
print(constants.invalid_method_type)
sys.exit(1)
# To detect the HRS it requires at least 1 retry count
if args.retry == 0:
print(constants.invalid_retry_count)
sys.exit(1)
data = []
with open(args.payloads) as payloads:
data = json.load(payloads)
try:
for url in target_urls:
result = utils.url_parser(url)
connection = SocketConnection(result, args.reports)
try:
# Try every permutation
for permute in data[constants.permute]:
# Try every type (TECL, CLTE)
for d in data[constants.detection]:
# Based on the retry value it will re-execute the same payload again
for _ in range(args.retry):
transfer_encoding_obj = permute[constants.transfer_encoding]
hrs_detection(connection,
method,
permute[constants.type],
permute[constants.content_length_key],
transfer_encoding_obj[constants.te_key],
transfer_encoding_obj[constants.te_value],
d[constants.type],
d[constants.content_length],
d[constants.payload],
args.timeout)
except ValueError as _:
print(result)
except KeyboardInterrupt as e:
print(e)
|
[
"json.load",
"argparse.ArgumentParser",
"lib.Utils.Utils",
"lib.Constants.Constants",
"lib.SocketConnection.SocketConnection",
"time.time",
"time.sleep",
"sys.exit"
] |
[((203, 210), 'lib.Utils.Utils', 'Utils', ([], {}), '()\n', (208, 210), False, 'from lib.Utils import Utils\n'), ((223, 234), 'lib.Constants.Constants', 'Constants', ([], {}), '()\n', (232, 234), False, 'from lib.Constants import Constants\n'), ((263, 358), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""HTTP Request Smuggling vulnerability detection tool"""'}), "(description=\n 'HTTP Request Smuggling vulnerability detection tool')\n", (286, 358), False, 'import argparse\n'), ((2406, 2419), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2416, 2419), False, 'import time\n'), ((1762, 1773), 'time.time', 'time.time', ([], {}), '()\n', (1771, 1773), False, 'import time\n'), ((1900, 1911), 'time.time', 'time.time', ([], {}), '()\n', (1909, 1911), False, 'import time\n'), ((2600, 2611), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2608, 2611), False, 'import sys\n'), ((2756, 2767), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2764, 2767), False, 'import sys\n'), ((3227, 3238), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3235, 3238), False, 'import sys\n'), ((3376, 3387), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3384, 3387), False, 'import sys\n'), ((3460, 3479), 'json.load', 'json.load', (['payloads'], {}), '(payloads)\n', (3469, 3479), False, 'import json\n'), ((2997, 3008), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3005, 3008), False, 'import sys\n'), ((3590, 3628), 'lib.SocketConnection.SocketConnection', 'SocketConnection', (['result', 'args.reports'], {}), '(result, args.reports)\n', (3606, 3628), False, 'from lib.SocketConnection import SocketConnection\n'), ((2126, 2137), 'time.time', 'time.time', ([], {}), '()\n', (2135, 2137), False, 'import time\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 13 12:10:41 2019
@author: reiters
"""
import numpy as np
#t1=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/usedTextures/noiseBig_epoch_501_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
#t2=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/usedTextures/noiseBig_epoch_541_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
#t3=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/usedTextures/noiseBig_epoch_507_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
#t4=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/usedTextures/noiseBig_epoch_511_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
t1=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseBig_epoch_500_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
t2=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseBig_epoch_501_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
#t1=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_crack2_evaluated/best/noiseBig_epoch_512_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
#t2=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_crack2_evaluated/best/noiseBig_epoch_529_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
#t1=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_sand2_evaluated/best/noiseBig_epoch_500_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
#t2=np.load('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_sand2_evaluated/best/noiseBig_epoch_529_fc1.0_ngf80_ndf80_dep5-5.npy',None,'allow_pickle',True)
img1Ratio=np.linspace(0,1,11) # for curtain-rocks
#img1Ratio=np.linspace(0.2,0.35,11) # for curtain-crack
#img1Ratio=np.linspace(0.4,.7,11) # for curtain-sand
intNoise=[]
for x in range( len(img1Ratio)):
intNoise.append(img1Ratio[x]*t1 + [1-img1Ratio[x]]*t2)
#np.save('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_crack2_evaluated/best/noiseImage1',intNoise)
#np.save('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_sand2_evaluated/best/noiseImage1',intNoise)
np.save('/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseImage1',intNoise)
|
[
"numpy.load",
"numpy.save",
"numpy.linspace"
] |
[((689, 867), 'numpy.load', 'np.load', (['"""/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseBig_epoch_500_fc1.0_ngf80_ndf80_dep5-5.npy"""', 'None', '"""allow_pickle"""', '(True)'], {}), "(\n '/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseBig_epoch_500_fc1.0_ngf80_ndf80_dep5-5.npy'\n , None, 'allow_pickle', True)\n", (696, 867), True, 'import numpy as np\n'), ((858, 1036), 'numpy.load', 'np.load', (['"""/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseBig_epoch_501_fc1.0_ngf80_ndf80_dep5-5.npy"""', 'None', '"""allow_pickle"""', '(True)'], {}), "(\n '/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseBig_epoch_501_fc1.0_ngf80_ndf80_dep5-5.npy'\n , None, 'allow_pickle', True)\n", (865, 1036), True, 'import numpy as np\n'), ((1734, 1755), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {}), '(0, 1, 11)\n', (1745, 1755), True, 'import numpy as np\n'), ((2232, 2356), 'numpy.save', 'np.save', (['"""/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseImage1"""', 'intNoise'], {}), "(\n '/gpfs/laur/sepia_tools/PSGAN_textures/best_paired_models/curtain_rocks1_evaluated/noiseImage1'\n , intNoise)\n", (2239, 2356), True, 'import numpy as np\n')]
|
#!/usr/bin/env python3
#
# MIT License
#
# Copyright (c) 2020-2021 EntySec
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import os
import sys
import ctypes
import threading
from core.exceptions import exceptions
from core.formatter import formatter
from core.badges import badges
from core.storage import local_storage
from core.modules import modules
class jobs():
def __init__(self):
self.exceptions = exceptions()
self.formatter = formatter()
self.badges = badges()
self.local_storage = local_storage()
self.modules = modules()
self.job_process = None
def stop_dead(self):
jobs = self.local_storage.get("jobs")
if jobs:
for job_id in list(jobs):
if not jobs[job_id]['job_process'].is_alive():
self.delete_job(job_id)
def check_jobs(self):
if not self.local_storage.get("jobs"):
return True
return False
def check_module_job(self, module_name):
jobs = self.local_storage.get("jobs")
if jobs:
for job_id in jobs.keys():
if jobs[job_id]['module_name'] == module_name:
return True
return False
def exit_jobs(self):
if self.check_jobs():
return True
self.badges.output_warning("You have some running jobs.")
if self.badges.input_question("Exit anyway? [y/N] ").lower() in ['yes', 'y']:
self.badges.output_process("Stopping all jobs...")
self.stop_all_jobs()
return True
return False
def stop_all_jobs(self):
if not self.check_jobs():
for job_id in list(self.local_storage.get("jobs").keys()):
self.delete_job(job_id)
def stop_job(self, job):
if job.is_alive():
exc = ctypes.py_object(SystemExit)
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(job.ident), exc)
if res == 0:
raise self.exceptions.GlobalException
if res > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(job.ident, None)
raise self.exceptions.GlobalException
def start_job(self, job_function, job_arguments):
self.job_process = threading.Thread(target=job_function, args=job_arguments)
self.job_process.setDaemon(True)
self.job_process.start()
def delete_job(self, job_id):
if not self.check_jobs():
job_id = int(job_id)
if job_id in list(self.local_storage.get("jobs").keys()):
try:
self.stop_job(self.local_storage.get("jobs")[job_id]['job_process'])
self.local_storage.delete_element("jobs", job_id)
except Exception:
self.badges.output_error("Failed to stop job!")
else:
self.badges.output_error("Invalid job id!")
else:
self.badges.output_error("Invalid job id!")
def create_job(self, job_name, module_name, job_function, job_arguments=()):
self.start_job(job_function, job_arguments)
if not self.local_storage.get("jobs"):
self.local_storage.set("jobs", dict())
job_id = len(self.local_storage.get("jobs"))
job_data = {
job_id: {
'job_name': job_name,
'module_name': module_name,
'job_process': self.job_process
}
}
self.local_storage.update("jobs", job_data)
return job_id
|
[
"ctypes.py_object",
"core.modules.modules",
"threading.Thread",
"ctypes.pythonapi.PyThreadState_SetAsyncExc",
"core.formatter.formatter",
"core.exceptions.exceptions",
"ctypes.c_long",
"core.badges.badges",
"core.storage.local_storage"
] |
[((1433, 1445), 'core.exceptions.exceptions', 'exceptions', ([], {}), '()\n', (1443, 1445), False, 'from core.exceptions import exceptions\n'), ((1471, 1482), 'core.formatter.formatter', 'formatter', ([], {}), '()\n', (1480, 1482), False, 'from core.formatter import formatter\n'), ((1505, 1513), 'core.badges.badges', 'badges', ([], {}), '()\n', (1511, 1513), False, 'from core.badges import badges\n'), ((1543, 1558), 'core.storage.local_storage', 'local_storage', ([], {}), '()\n', (1556, 1558), False, 'from core.storage import local_storage\n'), ((1582, 1591), 'core.modules.modules', 'modules', ([], {}), '()\n', (1589, 1591), False, 'from core.modules import modules\n'), ((3337, 3394), 'threading.Thread', 'threading.Thread', ([], {'target': 'job_function', 'args': 'job_arguments'}), '(target=job_function, args=job_arguments)\n', (3353, 3394), False, 'import threading\n'), ((2885, 2913), 'ctypes.py_object', 'ctypes.py_object', (['SystemExit'], {}), '(SystemExit)\n', (2901, 2913), False, 'import ctypes\n'), ((2975, 2999), 'ctypes.c_long', 'ctypes.c_long', (['job.ident'], {}), '(job.ident)\n', (2988, 2999), False, 'import ctypes\n'), ((3125, 3184), 'ctypes.pythonapi.PyThreadState_SetAsyncExc', 'ctypes.pythonapi.PyThreadState_SetAsyncExc', (['job.ident', 'None'], {}), '(job.ident, None)\n', (3167, 3184), False, 'import ctypes\n')]
|
"""
Copyright (c) 2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import networkx as nx
from beta.nncf.api.compression import CompressionAlgorithmBuilder
from beta.nncf.api.compression import CompressionAlgorithmController
from beta.nncf.tensorflow.algorithm_selector import TF_COMPRESSION_ALGORITHMS
from beta.nncf.tensorflow.graph import patterns as p
from beta.nncf.tensorflow.graph.converter import convert_keras_model_to_nxmodel
from beta.nncf.tensorflow.graph.pattern_matching import search_all
from beta.nncf.tensorflow.graph.transformations.layout import TransformationLayout
from beta.nncf.tensorflow.graph.transformations.commands import InsertionCommand
from beta.nncf.tensorflow.graph.transformations.commands import AfterLayer
from beta.nncf.tensorflow.graph.transformations.commands import LayerWeight
from beta.nncf.tensorflow.graph.transformations.commands import TransformationPriority
from beta.nncf.tensorflow.graph.utils import get_original_name_and_instance_index
from beta.nncf.tensorflow.layers.common import ELEMENTWISE_LAYERS
from beta.nncf.tensorflow.layers.common import LAYERS_AGNOSTIC_TO_DATA_PRECISION
from beta.nncf.tensorflow.layers.common import LAYERS_WITH_WEIGHTS
from beta.nncf.tensorflow.layers.custom_objects import NNCF_QUANTIZATION_OPERATONS
from beta.nncf.tensorflow.quantization.config import QuantizerConfig
from beta.nncf.tensorflow.quantization.config import QuantizationMode
from beta.nncf.tensorflow.quantization.config import QuantizationConstraints
from beta.nncf.tensorflow.quantization.initializers.minmax import MinMaxInitializer
from beta.nncf.tensorflow.quantization.layers import FakeQuantize
from beta.nncf.utils.logger import logger
from beta.nncf.utils.utils import is_ignored
ACTIVATIONS = "activations"
WEIGHTS = "weights"
QUANTIZER_GROUPS = [
ACTIVATIONS,
WEIGHTS
]
QUANTIZATION_LAYERS = LAYERS_WITH_WEIGHTS
NOT_SUPPORT_LAYERS = [
'Lambda'
]
@TF_COMPRESSION_ALGORITHMS.register('quantization')
class QuantizationBuilder(CompressionAlgorithmBuilder):
def __init__(self, config):
super().__init__(config)
self.quantize_inputs = self.config.get('quantize_inputs', True)
self.quantize_outputs = self.config.get('quantize_outputs', False)
self.global_quantizer_constraints = {}
self.ignored_scopes_per_group = {}
self.target_scopes_per_group = {}
for quantizer_group in QUANTIZER_GROUPS:
self._parse_group_params(self.config, quantizer_group)
def build_controller(self, model):
return QuantizationController(model)
def _parse_group_params(self, config, quantizer_group):
params_dict = config.get(quantizer_group, {})
self.global_quantizer_constraints[quantizer_group] = QuantizationConstraints(
num_bits=params_dict.get('bits'),
mode=params_dict.get('mode'),
signed=params_dict.get('signed'),
per_channel=params_dict.get('per_channel'),
narrow_range=(quantizer_group == WEIGHTS)
)
self.ignored_scopes_per_group[quantizer_group] = config.get('ignored_scopes', []) \
+ params_dict.get('ignored_scopes', [])
self.target_scopes_per_group[quantizer_group] = params_dict.get('target_scopes')
def _get_default_qconfig(self, constraints: QuantizationConstraints = None):
qconfig = QuantizerConfig(num_bits=8,
mode=QuantizationMode.SYMMETRIC,
signed=None,
per_channel=False,
narrow_range=False)
if constraints is not None:
qconfig = constraints.apply_constraints_to(qconfig)
return qconfig
def _create_quantizer(self, qconfig: QuantizerConfig):
quantizer_cls = NNCF_QUANTIZATION_OPERATONS.get(qconfig.mode)
return quantizer_cls(qconfig)
def get_transformation_layout(self, model):
nxmodel = convert_keras_model_to_nxmodel(model)
for node_name, node in nxmodel.nodes.items():
if node['type'] in NOT_SUPPORT_LAYERS:
logger.warning('The layer {} is not supported by the quantization algorithm'
.format(get_original_name_and_instance_index(node_name)[0]))
transformations = TransformationLayout()
qconfig = self._get_default_qconfig(self.global_quantizer_constraints[WEIGHTS])
shared_nodes = set()
for node_name, node in nxmodel.nodes.items():
original_node_name, _ = get_original_name_and_instance_index(node_name)
if node['type'] not in QUANTIZATION_LAYERS \
or is_ignored(node_name, self.ignored_scopes_per_group[WEIGHTS]) \
or original_node_name in shared_nodes:
continue
if node['is_shared']:
shared_nodes.add(original_node_name)
operation = self._create_quantizer(qconfig)
weight_attr_name = QUANTIZATION_LAYERS[node['type']]['weight_attr_name']
transformations.register(
InsertionCommand(
target_point=LayerWeight(original_node_name, weight_attr_name),
callable_object=operation,
priority=TransformationPriority.QUANTIZATION_PRIORITY
))
insertion_points = self._find_insertion_points(nxmodel)
qconfig = self._get_default_qconfig(self.global_quantizer_constraints[ACTIVATIONS])
for original_node_name, instance_index in insertion_points:
fake_quantize_name = self._get_fake_quantize_name(original_node_name, instance_index)
fake_quantize_layer = FakeQuantize(qconfig, name=fake_quantize_name)
transformations.register(
InsertionCommand(
target_point=AfterLayer(original_node_name, instance_index),
callable_object=fake_quantize_layer,
priority=TransformationPriority.QUANTIZATION_PRIORITY
))
return transformations
def _find_insertion_points(self, nxmodel):
def _filter_fn(node_name, node):
return not is_ignored(node_name, self.ignored_scopes_per_group[ACTIVATIONS]) \
and 'float' in node['dtype'].lower()
pattern = p.LINEAR_OPS | p.ELEMENTWISE | p.ANY_BN_ACT_COMBO | \
p.LINEAR_OPS + p.ANY_AG_BN_ACT_COMBO | p.ELEMENTWISE + p.ANY_AG_BN_ACT_COMBO | p.SINGLE_OPS
matches = search_all(nxmodel, pattern)
topological_order = {node: k for k, node in enumerate(nx.topological_sort(nxmodel))}
insertion_points = [max(match, key=topological_order.__getitem__) for match in matches]
if self.quantize_inputs:
for node_name, degree in nxmodel.in_degree:
if degree > 0:
continue
preprocessing_nodes = self._get_input_preprocessing_nodes(nxmodel, node_name)
if preprocessing_nodes:
for n in preprocessing_nodes[:-1]:
if n in insertion_points:
insertion_points.remove(node_name)
elif _filter_fn(node_name, nxmodel.nodes[node_name]):
insertion_points = [node_name] + insertion_points
if not self.quantize_outputs:
outputs = []
for node_name in nxmodel.nodes:
if nxmodel.out_degree(node_name) == 0:
outputs.append(node_name)
for output in outputs:
for quantized_node in self._get_quantized_nodes_for_output(nxmodel, insertion_points, output):
insertion_points.remove(quantized_node)
insertion_points = [point for point in insertion_points
if _filter_fn(point, nxmodel.nodes[point])]
return [get_original_name_and_instance_index(point) for point in insertion_points]
def _get_input_preprocessing_nodes(self, nxmodel, node_name, preprocessing_nodes=None):
if preprocessing_nodes is None:
preprocessing_nodes = []
if nxmodel.out_degree(node_name) == 1:
successor = next(nxmodel.successors(node_name))
if nxmodel.nodes[successor]['type'] in ELEMENTWISE_LAYERS and nxmodel.in_degree(successor) == 1:
preprocessing_nodes.append(successor)
return self._get_input_preprocessing_nodes(nxmodel, successor, preprocessing_nodes)
return preprocessing_nodes
def _get_quantized_nodes_for_output(self, nxmodel, insetrion_points, node_name, quantized_nodes_for_output=None):
if quantized_nodes_for_output is None:
if node_name in insetrion_points:
return [node_name]
quantized_nodes_for_output = []
for predecessor in nxmodel.predecessors(node_name):
if nxmodel.out_degree(predecessor) > 1:
logger.warning('Removing of FakeQuantize after layer {} '
'with multiple outputs is not fully supported'.format(predecessor))
if nxmodel.nodes[predecessor]['type'] in LAYERS_AGNOSTIC_TO_DATA_PRECISION:
self._get_quantized_nodes_for_output(nxmodel, insetrion_points,
predecessor, quantized_nodes_for_output)
elif predecessor in insetrion_points:
quantized_nodes_for_output.append(predecessor)
return quantized_nodes_for_output
def _get_fake_quantize_name(self, node_name, instance_index):
if instance_index == 0:
return '{}/fake_quantize'.format(node_name)
return '{}/fake_quantize_{}'.format(node_name, instance_index)
class QuantizationController(CompressionAlgorithmController):
def __init__(self, target_model):
super().__init__(target_model)
self._initializer = MinMaxInitializer()
def initialize(self, dataset=None, loss=None):
self._initializer(self._model, dataset, loss)
|
[
"beta.nncf.tensorflow.quantization.initializers.minmax.MinMaxInitializer",
"beta.nncf.tensorflow.graph.utils.get_original_name_and_instance_index",
"networkx.topological_sort",
"beta.nncf.tensorflow.graph.pattern_matching.search_all",
"beta.nncf.tensorflow.graph.converter.convert_keras_model_to_nxmodel",
"beta.nncf.tensorflow.graph.transformations.commands.LayerWeight",
"beta.nncf.tensorflow.algorithm_selector.TF_COMPRESSION_ALGORITHMS.register",
"beta.nncf.tensorflow.quantization.layers.FakeQuantize",
"beta.nncf.utils.utils.is_ignored",
"beta.nncf.tensorflow.graph.transformations.layout.TransformationLayout",
"beta.nncf.tensorflow.layers.custom_objects.NNCF_QUANTIZATION_OPERATONS.get",
"beta.nncf.tensorflow.graph.transformations.commands.AfterLayer",
"beta.nncf.tensorflow.quantization.config.QuantizerConfig"
] |
[((2436, 2486), 'beta.nncf.tensorflow.algorithm_selector.TF_COMPRESSION_ALGORITHMS.register', 'TF_COMPRESSION_ALGORITHMS.register', (['"""quantization"""'], {}), "('quantization')\n", (2470, 2486), False, 'from beta.nncf.tensorflow.algorithm_selector import TF_COMPRESSION_ALGORITHMS\n'), ((3924, 4040), 'beta.nncf.tensorflow.quantization.config.QuantizerConfig', 'QuantizerConfig', ([], {'num_bits': '(8)', 'mode': 'QuantizationMode.SYMMETRIC', 'signed': 'None', 'per_channel': '(False)', 'narrow_range': '(False)'}), '(num_bits=8, mode=QuantizationMode.SYMMETRIC, signed=None,\n per_channel=False, narrow_range=False)\n', (3939, 4040), False, 'from beta.nncf.tensorflow.quantization.config import QuantizerConfig\n'), ((4380, 4425), 'beta.nncf.tensorflow.layers.custom_objects.NNCF_QUANTIZATION_OPERATONS.get', 'NNCF_QUANTIZATION_OPERATONS.get', (['qconfig.mode'], {}), '(qconfig.mode)\n', (4411, 4425), False, 'from beta.nncf.tensorflow.layers.custom_objects import NNCF_QUANTIZATION_OPERATONS\n'), ((4531, 4568), 'beta.nncf.tensorflow.graph.converter.convert_keras_model_to_nxmodel', 'convert_keras_model_to_nxmodel', (['model'], {}), '(model)\n', (4561, 4568), False, 'from beta.nncf.tensorflow.graph.converter import convert_keras_model_to_nxmodel\n'), ((4886, 4908), 'beta.nncf.tensorflow.graph.transformations.layout.TransformationLayout', 'TransformationLayout', ([], {}), '()\n', (4906, 4908), False, 'from beta.nncf.tensorflow.graph.transformations.layout import TransformationLayout\n'), ((7096, 7124), 'beta.nncf.tensorflow.graph.pattern_matching.search_all', 'search_all', (['nxmodel', 'pattern'], {}), '(nxmodel, pattern)\n', (7106, 7124), False, 'from beta.nncf.tensorflow.graph.pattern_matching import search_all\n'), ((10517, 10536), 'beta.nncf.tensorflow.quantization.initializers.minmax.MinMaxInitializer', 'MinMaxInitializer', ([], {}), '()\n', (10534, 10536), False, 'from beta.nncf.tensorflow.quantization.initializers.minmax import MinMaxInitializer\n'), ((5116, 5163), 'beta.nncf.tensorflow.graph.utils.get_original_name_and_instance_index', 'get_original_name_and_instance_index', (['node_name'], {}), '(node_name)\n', (5152, 5163), False, 'from beta.nncf.tensorflow.graph.utils import get_original_name_and_instance_index\n'), ((6276, 6322), 'beta.nncf.tensorflow.quantization.layers.FakeQuantize', 'FakeQuantize', (['qconfig'], {'name': 'fake_quantize_name'}), '(qconfig, name=fake_quantize_name)\n', (6288, 6322), False, 'from beta.nncf.tensorflow.quantization.layers import FakeQuantize\n'), ((8477, 8520), 'beta.nncf.tensorflow.graph.utils.get_original_name_and_instance_index', 'get_original_name_and_instance_index', (['point'], {}), '(point)\n', (8513, 8520), False, 'from beta.nncf.tensorflow.graph.utils import get_original_name_and_instance_index\n'), ((5244, 5305), 'beta.nncf.utils.utils.is_ignored', 'is_ignored', (['node_name', 'self.ignored_scopes_per_group[WEIGHTS]'], {}), '(node_name, self.ignored_scopes_per_group[WEIGHTS])\n', (5254, 5305), False, 'from beta.nncf.utils.utils import is_ignored\n'), ((6770, 6835), 'beta.nncf.utils.utils.is_ignored', 'is_ignored', (['node_name', 'self.ignored_scopes_per_group[ACTIVATIONS]'], {}), '(node_name, self.ignored_scopes_per_group[ACTIVATIONS])\n', (6780, 6835), False, 'from beta.nncf.utils.utils import is_ignored\n'), ((7188, 7216), 'networkx.topological_sort', 'nx.topological_sort', (['nxmodel'], {}), '(nxmodel)\n', (7207, 7216), True, 'import networkx as nx\n'), ((5728, 5777), 'beta.nncf.tensorflow.graph.transformations.commands.LayerWeight', 'LayerWeight', (['original_node_name', 'weight_attr_name'], {}), '(original_node_name, weight_attr_name)\n', (5739, 5777), False, 'from beta.nncf.tensorflow.graph.transformations.commands import LayerWeight\n'), ((6428, 6474), 'beta.nncf.tensorflow.graph.transformations.commands.AfterLayer', 'AfterLayer', (['original_node_name', 'instance_index'], {}), '(original_node_name, instance_index)\n', (6438, 6474), False, 'from beta.nncf.tensorflow.graph.transformations.commands import AfterLayer\n'), ((4806, 4853), 'beta.nncf.tensorflow.graph.utils.get_original_name_and_instance_index', 'get_original_name_and_instance_index', (['node_name'], {}), '(node_name)\n', (4842, 4853), False, 'from beta.nncf.tensorflow.graph.utils import get_original_name_and_instance_index\n')]
|
import abc
import csv
import uuid
import json
import os
import numpy as np
import requests
import joblib
import pandas
from scipy.sparse import csr_matrix
from tworaven_apps.solver_interfaces.models import SAVED_MODELS_PATH, R_SERVICE, get_metric, StatisticalModel
from tworaven_solver import Dataset
from collections import defaultdict
from sklearn import model_selection
from tworaven_solver.model import BaseModelWrapper
class Model(object):
def __init__(self, model, system, predictors, targets, model_id=None, search_id=None, train_specification=None, task=None):
if model_id is None:
db_model = StatisticalModel.objects.create()
model_id = 'oss-' + str(db_model.model_id)
self.model = model
self.system = system
self.model_id = model_id
self.search_id = search_id
self.predictors = predictors
self.targets = targets
# which dataset model is currently trained on
self.train_specification = train_specification
self.task = task
@abc.abstractmethod
def describe(self):
pass
@abc.abstractmethod
def score(self, specification):
pass
@abc.abstractmethod
def produce(self, specification):
pass
@abc.abstractmethod
def save(self):
pass
@staticmethod
def load(model_id):
model_folder_path = os.path.join(SAVED_MODELS_PATH, model_id)
metadata_path = os.path.join(model_folder_path, 'metadata.json')
if not os.path.exists(metadata_path):
raise FileNotFoundError
with open(metadata_path, 'r') as metadata_file:
metadata = json.load(metadata_file)
if metadata['system'] in ['auto_sklearn', 'tpot', 'mlbox', 'mljar-supervised']:
preprocess = None
if os.path.exists(os.path.join(model_folder_path, 'preprocess.joblib')):
preprocess = joblib.load(os.path.join(model_folder_path, 'preprocess.joblib'))
return ModelSklearn(
model=joblib.load(os.path.join(model_folder_path, 'model.joblib')),
predictors=metadata['predictors'],
targets=metadata['targets'],
system=metadata['system'],
model_id=model_id,
search_id=metadata['search_id'],
train_specification=metadata['train_specification'],
preprocess=preprocess,
task=metadata['task'])
if metadata['system'] == 'ludwig':
from ludwig.api import LudwigModel
return ModelLudwig(
model=LudwigModel.load(model_folder_path),
predictors=metadata['predictors'],
targets=metadata['targets'],
model_id=model_id,
search_id=metadata['search_id'],
task=metadata['task'])
if metadata['system'] == 'h2o':
import h2o
h2o.init()
return ModelH2O(
model=h2o.load_model(os.path.join(model_folder_path, metadata['model_filename'])),
model_id=model_id,
predictors=metadata['predictors'],
targets=metadata['targets'],
search_id=metadata['search_id'],
train_specification=metadata['train_specification'],
task=metadata['task'])
if metadata['system'] == 'TwoRavens':
return ModelTwoRavens(
model=BaseModelWrapper.load(model_folder_path, metadata),
system='TwoRavens',
predictors=metadata['predictors'],
targets=metadata['targets'],
model_id=metadata['model_id'],
search_id=metadata['search_id'],
task=metadata['task']
)
raise ValueError(f'System type "{metadata["system"]}" is not recognized.')
def make_splits(self, configuration, data):
if configuration['method'] == 'K_FOLD':
split_arguments = {
'n_splits': configuration.get('folds', 10),
'shuffle': configuration.get('shuffle', False),
'random_state': configuration.get('randomSeed')
}
if configuration['stratified']:
return ((data.iloc[train_indices], data.iloc[test_indices]) for train_indices, test_indices in
model_selection.StratifiedKFold(**split_arguments).split(data, data[self.targets[0]]))
else:
return ((data.iloc[train_indices], data.iloc[test_indices]) for train_indices, test_indices in
model_selection.KFold(**split_arguments).split(data))
elif configuration['method'] == 'HOLDOUT':
try:
return [model_selection.train_test_split(
data,
test_size=float(configuration.get('trainTestRatio', 0.35)),
stratify=data[self.targets[0]] if configuration.get('stratified') else None,
random_state=configuration.get('randomSeed'))]
except (TypeError, ValueError):
try:
return [model_selection.train_test_split(
data,
test_size=float(configuration.get('trainTestRatio', 0.35)),
stratify=None,
random_state=configuration.get('randomSeed'))]
except (TypeError, ValueError):
return [model_selection.train_test_split(
data,
random_state=configuration.get('randomSeed'))]
else:
raise ValueError(f'Invalid evaluation method: {configuration.method}')
class ModelSklearn(Model):
def __init__(self, model, system, predictors, targets, model_id=None, search_id=None, preprocess=None, train_specification=None, task=None):
super().__init__(model, system, predictors, targets, model_id, search_id, train_specification, task)
# categorical one hot encoding
self.preprocess = preprocess
def make_stimulus(self, data):
stimulus = data[self.predictors]
if self.preprocess:
stimulus = self.preprocess.transform(stimulus)
if self.system == 'mlbox':
# must have a dense pandas array
if issubclass(type(stimulus), csr_matrix):
stimulus = stimulus.toarray()
stimulus = pandas.DataFrame(stimulus)
if self.system == 'mljar-supervised':
# must have a pandas array with formatted column names (so they don't get modified by the solver)
stimulus = pandas.DataFrame(stimulus)
stimulus.columns = [str(i).strip() for i in stimulus.columns]
return stimulus
def describe(self):
model_name = self.model.__class__.__name__
description = str(self.model)
if self.system == 'mljar-supervised':
model_name = self.model.get_name()
if self.system == 'mlbox':
model_name = self.model.get_estimator().__class__.__name__
description = str(self.model.get_estimator())
return {
"model": model_name,
"description": description,
"model_id": self.model_id,
"search_id": self.search_id,
"system": self.system
}
def score(self, specification):
dataframe = Dataset(specification['input']).get_dataframe()[self.predictors + self.targets].dropna()
dataframe.reset_index(drop=True, inplace=True)
configuration = specification['configuration']
splits = self.make_splits(configuration, dataframe)
split_scores = defaultdict(list)
split_weights = defaultdict(list)
for train_split, test_split in splits:
self.fit(self.make_stimulus(train_split), train_split[self.targets[0]])
actual = np.array(test_split[self.targets[0]])
predicted = self.model.predict(self.make_stimulus(test_split))
if 'CLASSIFICATION' in self.task:
actual = actual.astype(int)
if self.system == 'mljar-supervised':
predicted = pandas.DataFrame((predicted.idxmax(axis=1) == 'p_1').astype(int))
predicted.columns = [self.targets[0]]
for metric in specification['performanceMetrics']:
try:
split_scores[json.dumps(metric)].append(get_metric(metric)(actual, predicted))
split_weights[json.dumps(metric)].append(test_split.size)
except Exception:
pass
scores = []
for metric in split_scores:
if split_scores[metric]:
scores.append({
'value': np.average(split_scores[metric], weights=split_weights[metric]),
'metric': json.loads(metric),
'target': self.targets[0]
})
return {
'search_id': self.search_id,
'model_id': self.model_id,
'scores': scores,
'system': self.system
}
def fit(self, stimulus, target, specification=None):
# check if model has already been trained for the same dataset
specification_str = json.dumps(specification) if specification else None
if self.train_specification and self.train_specification == specification_str:
return
self.train_specification = specification_str
if self.system == 'auto_sklearn':
self.model.refit(stimulus, target)
elif self.system == 'mljar-supervised':
self.model.train({"train": {
"X": stimulus, 'y': target
}})
else:
self.model.fit(stimulus, target)
self.save()
def produce(self, specification):
configuration = specification.get('configuration', {})
predict_type = configuration.get('predict_type', 'RAW')
# REFIT
dataframe_train = Dataset(specification['train']).get_dataframe().dropna()
stimulus = self.make_stimulus(dataframe_train[self.predictors])
self.fit(stimulus, dataframe_train[self.targets[0]], specification['train'])
# PRODUCE
dataframe = Dataset(specification['input']).get_dataframe().dropna()
dataframe.reset_index(drop=True, inplace=True)
stimulus = self.make_stimulus(dataframe[self.predictors])
output_directory_path = specification['output']['resource_uri'].replace('file://', '')
output_path = '/' + os.path.join(
*output_directory_path.split('/'),
str(uuid.uuid4()) + '.csv')
if self.system == 'mljar-supervised':
predictions = self.model.predict(stimulus)
if predict_type == 'RAW':
predictions = pandas.DataFrame((predictions.idxmax(axis=1) == 'p_1').astype(int))
predictions.columns = [self.targets[0]]
else:
if predict_type == 'RAW':
predictions = self.model.predict(stimulus)
if len(predictions.shape) > 1:
predictions = np.argmax(predictions, axis=-1)
predictions = pandas.DataFrame(predictions, columns=[self.targets[0]]).astype(int)
else:
predictions = self.model.predict_proba(stimulus)
# TODO: standardize probability column names
predictions = pandas.DataFrame(predictions, columns=[f'p_{i}' for i in range(predictions.shape[1])])
predictions.reset_index(drop=True, inplace=True)
predictions.insert(0, 'd3mIndex', dataframe['d3mIndex'])
if not os.path.exists(output_directory_path):
os.makedirs(output_directory_path)
cwd = os.getcwd()
try:
os.chdir('/')
predictions.to_csv(output_path, index=False, quoting=csv.QUOTE_NONNUMERIC)
finally:
os.chdir(cwd)
return {
'produce': {
'input': specification['input'],
'configuration': configuration,
'data_pointer': output_path
},
'search_id': self.search_id,
'model_id': self.model_id,
'system': self.system
}
def save(self):
model_folder_path = os.path.join(SAVED_MODELS_PATH, self.model_id)
metadata_path = os.path.join(model_folder_path, 'metadata.json')
if not os.path.exists(metadata_path):
os.makedirs(model_folder_path)
with open(metadata_path, 'w') as metadata_file:
json.dump({
'system': str(self.system),
'model_id': str(self.model_id),
'predictors': self.predictors,
'targets': self.targets,
'train_specification': self.train_specification,
'search_id': self.search_id,
'task': self.task
}, metadata_file)
joblib.dump(self.model, os.path.join(model_folder_path, 'model.joblib'))
if self.preprocess:
joblib.dump(self.preprocess, os.path.join(model_folder_path, 'preprocess.joblib'))
class ModelCaret(Model):
def __init__(self, model, predictors, targets, model_id=None, search_id=None):
super().__init__(model, 'caret', predictors, targets, model_id, search_id)
def describe(self):
response = requests.post(
R_SERVICE + 'caretDescribe.app',
json={'model_id': self.model_id}).json()
if not response['success']:
raise ValueError(response['message'])
return response['data']
def score(self, specification):
response = requests.post(
R_SERVICE + 'caretScore.app',
json={
'model_id': self.model_id,
'specification': specification
}).json()
if not response['success']:
raise ValueError(response['message'])
return response['data']
def produce(self, specification):
response = requests.post(
R_SERVICE + 'caretProduce.app',
json={
'model_id': self.model_id,
'specification': specification
}).json()
if not response['success']:
raise ValueError(response['message'])
return response['data']
def save(self):
# ignore, model is only present in remote caret.app
raise ValueError('Caret model is not saveable in Python.')
class ModelH2O(Model):
def __init__(self, model, predictors, targets, model_id=None, search_id=None, train_specification=None, task=None):
super().__init__(model, 'h2o', predictors, targets, model_id, search_id, train_specification, task=task)
def describe(self):
return {
"model": f'{self.model.algo}-{self.model.type}',
"description": f'{self.model.algo}-{self.model.type}',
"model_id": self.model_id,
'search_id': self.search_id,
"system": self.system,
}
def fit(self, data, specification=None):
# check if model has already been trained for the same dataset
specification_str = json.dumps(specification) if specification else None
if self.train_specification and self.train_specification == specification_str:
return
self.train_specification = specification_str
self.model.train(y=self.targets[0], x=self.predictors, training_frame=data)
self.save()
def score(self, specification):
import h2o
configuration = specification['configuration']
resource_uri = Dataset(specification['input']).get_resource_uri()
data = h2o.import_file(resource_uri)
y = self.targets[0]
if 'CLASSIFICATION' in self.task:
if data.types[y] == u'real':
data[y] = data[y].ascharacter()
data[y] = data[y].asfactor()
results = pandas.DataFrame({
'predict': self.model.predict(data).as_data_frame()['predict'],
'actual': data[y].as_data_frame()[y]
}).dropna()
if 'CLASSIFICATION' in self.task:
if data.types[y] == u'real':
data[y] = data[y].ascharacter()
results['actual'] = results['actual'].astype(int)
scores = []
for metric_schema in specification['performanceMetrics']:
try:
scores.append({
'value': get_metric(metric_schema)(
results['actual'],
results['predict']),
'metric': metric_schema,
'target': y
})
except ValueError as err:
print(f'Could not evaluate metric: {str(metric_schema)}')
print(err)
# if configuration.get('stratified'):
# # how does h2o know which column to stratify for? weirdness here
# folds = data.stratified_kfold_column(n_folds=configuration['folds'])
# else:
# folds = data.kfold_column(n_folds=configuration['folds'])
#
# split_scores = defaultdict(list)
# split_weights = defaultdict(list)
# for split_id in range(configuration['folds']):
# train, test = data[folds != split_id], data[folds == split_id]
# self.fit(train)
# results = pandas.DataFrame({
# 'predict': self.model.predict(test).as_data_frame()['predict'],
# 'actual': test[self.targets[0]].as_data_frame()[self.targets[0]]
# }).dropna()
#
# if 'CLASSIFICATION' in self.task:
# results['actual'] = results['actual'].astype(int)
#
# for metric_schema in specification['performanceMetrics']:
# try:
# split_scores[json.dumps(metric_schema)].append(get_metric(metric_schema)(
# results['actual'],
# results['predict']))
# split_weights[json.dumps(metric_schema)].append(results.size)
# except ValueError as err:
# print(f'Could not evaluate metric: {str(metric_schema)}')
# print(err)
# for metric in split_scores:
# scores.append({
# 'value': np.average(split_scores[metric], weights=split_weights[metric]),
# 'metric': json.loads(metric),
# 'target': self.targets[0]
# })
return {
'search_id': self.search_id,
'model_id': self.model_id,
'scores': scores,
"system": self.system
}
def produce(self, specification):
import h2o
configuration = specification.get('configuration', {})
predict_type = configuration.get('predict_type', 'RAW')
train = h2o.import_file(Dataset(specification['train']).get_resource_uri())
y = self.targets[0]
if 'CLASSIFICATION' in self.task:
if train.types[y] == u'real':
train[y] = train[y].ascharacter()
train[self.targets[0]] = train[self.targets[0]].asfactor()
self.fit(train, specification['train'])
test_dataset = Dataset(specification['input'])
data = h2o.import_file(test_dataset.get_resource_uri())
if 'CLASSIFICATION' in self.task:
if data.types[y] == u'real':
data[y] = data[y].ascharacter()
data[y] = data[y].asfactor()
# retry once
try:
predictions = self.model.predict(data).as_data_frame()
except Exception as err:
predictions = self.model.predict(data).as_data_frame()
if predict_type == 'RAW':
if 'CLASSIFICATION' in self.task:
if data.types[y] == u'real':
data[y] = data[y].ascharacter()
predictions = predictions[['predict']]
predictions.columns = [y]
else:
# TODO: standardize probability column names
predictions.drop('predict', 1, inplace=True)
predictions['d3mIndex'] = test_dataset.get_dataframe()['d3mIndex']
output_directory_path = specification['output']['resource_uri'].replace('file://', '')
output_path = '/' + os.path.join(
*output_directory_path.split('/'),
str(uuid.uuid4()) + '.csv')
if not os.path.exists(output_directory_path):
os.makedirs(output_directory_path)
cwd = os.getcwd()
try:
os.chdir('/')
predictions.to_csv(output_path, index=False, quoting=csv.QUOTE_NONNUMERIC)
finally:
os.chdir(cwd)
return {
'produce': {
'input': specification['input'],
'configuration': configuration,
'data_pointer': output_path
},
'search_id': self.search_id,
'model_id': self.model_id,
"system": self.system
}
def save(self):
import h2o
model_folder_path = os.path.join(SAVED_MODELS_PATH, self.model_id)
metadata_path = os.path.join(model_folder_path, 'metadata.json')
if not os.path.exists(metadata_path):
os.makedirs(model_folder_path)
model_path = h2o.save_model(self.model, path=model_folder_path, force=True)
with open(metadata_path, 'w') as metadata_file:
json.dump({
'system': self.system,
'model_id': self.model_id,
'search_id': self.search_id,
'model_filename': os.path.basename(model_path),
'predictors': self.predictors,
'targets': self.targets,
'train_specification': self.train_specification,
'task': self.task
}, metadata_file)
class ModelLudwig(Model):
def __init__(self, model, predictors, targets, model_id=None, search_id=None, train_specification=None, task=None):
super().__init__(model, 'ludwig', predictors, targets, model_id, search_id, train_specification, task)
def describe(self):
return {
# TODO: extract more relevant description of model algorithm
"model": 'multilayer feedforward network',
"description": str(self.model),
"model_id": self.model_id,
"search_id": self.search_id,
"system": self.system
}
def score(self, specification):
# TODO: refitting -> respect configuration
configuration = specification['configuration']
dataframe = Dataset(specification['input']).get_dataframe()
target = self.targets[0]
if self.task == 'CLASSIFICATION':
dataframe[target] = dataframe[target].astype(str)
predicted = self.model.predict(dataframe)
scores = []
for metric in specification['performanceMetrics']:
scores.append({
'value': get_metric(metric)(dataframe[target], predicted[f'{target}_predictions']),
'metric': metric,
'target': target
})
return {
'search_id': self.search_id,
'model_id': self.model_id,
'scores': scores,
'system': self.system
}
def produce(self, specification):
configuration = specification.get('configuration', {})
predict_type = configuration.get('predict_type', 'RAW')
dataset = Dataset(specification['input'])
dataframe = dataset.get_dataframe()
predictions = self.model.predict(dataframe)
if predict_type == 'RAW':
predictions = predictions[[f'{self.targets[0]}_predictions']]
predictions.columns = [self.targets[0]]
if predict_type == 'PROBABILITIES':
predictions = predictions[[i for i in predictions.columns.values if i.startswith(f'{self.targets}_probabilities_')]]
predictions.insert(0, 'd3mIndex', dataframe['d3mIndex'])
output_directory_path = specification['output']['resource_uri'].replace('file://', '')
output_path = '/' + os.path.join(
*output_directory_path.split('/'),
str(uuid.uuid4()) + '.csv')
if not os.path.exists(output_directory_path):
os.makedirs(output_directory_path)
cwd = os.getcwd()
try:
os.chdir('/')
predictions.to_csv(output_path, index=False, quoting=csv.QUOTE_NONNUMERIC)
finally:
os.chdir(cwd)
return {
'produce': {
'input': specification['input'],
'configuration': configuration,
'data_pointer': output_path
},
'search_id': self.search_id,
'model_id': self.model_id,
"system": self.system
}
def save(self):
model_folder_path = os.path.join(SAVED_MODELS_PATH, self.model_id)
metadata_path = os.path.join(model_folder_path, 'metadata.json')
if not os.path.exists(metadata_path):
os.makedirs(model_folder_path)
self.model.save(model_folder_path)
with open(metadata_path, 'w') as metadata_file:
json.dump({
'system': self.system,
'model_id': self.model_id,
'search_id': self.search_id,
'model_filename': model_folder_path,
'predictors': self.predictors,
'targets': self.targets,
'task': self.task
}, metadata_file)
class ModelTwoRavens(Model):
def describe(self):
description = self.model.describe() or {}
# print(description)
return {
"model": self.model.pipeline_specification['model']['strategy'].lower(),
"description": str(self.model.model),
**description,
"pipeline_specification": self.model.pipeline_specification,
"problem_specification": self.model.problem_specification,
"model_id": self.model_id,
"search_id": self.search_id,
"system": self.system
}
def score(self, score_specification):
# Looks like this function will only be called when encounter a test split
dataframe = Dataset(score_specification['input']).get_dataframe()
prob_flag = False
for eachMetric in score_specification['performanceMetrics']:
if eachMetric.get('metric', '').startswith('ROC'):
prob_flag = True
if self.task == "FORECASTING":
# For score computation, we only take the given "forecastingHorizon" into account
forecast_length = self.model.problem_specification.get('forecastingHorizon', {"value": 10})
forecast_length = forecast_length.get('value', 10)
predicted = self.model.forecast(dataframe, forecast_length, forecast_mode='test')
elif self.task in ['CLASSIFICATION', 'REGRESSION']:
# TODO: respect configuration on holdout vs cross-validation, do refitting, etc.
if self.task == 'CLASSIFICATION':
for target in self.targets:
dataframe[target] = dataframe[target].astype(str)
predicted = self.model.predict(dataframe)
if prob_flag:
# Compute score if it's a classification problem
predicted_prob = self.model.predict_proba(dataframe)
if len(predicted_prob.shape) > 1 and predicted_prob.shape[1] == 2:
# Binary Classification, keep the probability of greater class only
predicted_prob = predicted_prob[:, [1]].ravel()
if self.task == 'CLASSIFICATION':
for target in self.targets:
predicted[target] = predicted[target].astype(str)
else:
raise NotImplementedError
scores = []
for target in self.targets:
results = pandas.DataFrame({'actual': dataframe[target], 'predicted': predicted[target]})
results.dropna(inplace=True)
for eachMetric in score_specification['performanceMetrics']:
try:
if eachMetric.get('metric', '').startswith('ROC'):
tmp_value = get_metric(eachMetric, self.model.problem_specification)(results['actual'], predicted_prob)
else:
tmp_value = get_metric(eachMetric, self.model.problem_specification)(results['actual'], results['predicted'])
scores.append({
'value': tmp_value,
'metric': eachMetric,
'target': target
})
except ValueError:
pass
return {
'search_id': self.search_id,
'model_id': self.model_id,
'scores': scores,
'system': self.system
}
def fit(self, dataframe=None, data_specification=None):
self.model.refit(
dataframe=dataframe,
data_specification=data_specification)
def produce(self, produce_specification):
# Looks like produce_specification contains input.name -- [train|test|all]
configuration = produce_specification.get('configuration', {})
predict_type = configuration.get('predict_type', 'RAW')
dataframe = Dataset(produce_specification['input']).get_dataframe()
data_type = produce_specification['input'].get('name', 'test')
if self.task in ['REGRESSION', 'CLASSIFICATION']:
dataframe_train = Dataset(produce_specification['train']).get_dataframe().dropna()
# self.fit(dataframe=dataframe_train, data_specification=produce_specification['train'])
if predict_type == 'RAW':
if "FORECASTING" == self.task:
predicted = self.model.forecast(dataframe, len(dataframe.index), data_type)
else:
# This branch seems will never be reached
predicted = self.model.predict(dataframe)
else:
predicted = self.model.predict_proba(dataframe)
# TODO: standardize probability column names
predicted = pandas.DataFrame(predicted, columns=[f'p_{i}' for i in range(predicted.shape[1])])
output_directory_path = produce_specification['output']['resource_uri'].replace('file://', '')
output_path = '/' + os.path.join(
*output_directory_path.split('/'),
str(uuid.uuid4()) + '.csv')
if 'd3mIndex' not in predicted.columns.values:
predicted.insert(0, 'd3mIndex', dataframe['d3mIndex'])
if not os.path.exists(output_directory_path):
os.makedirs(output_directory_path)
cwd = os.getcwd()
try:
os.chdir('/')
predicted.to_csv(output_path, index=False, quoting=csv.QUOTE_NONNUMERIC)
finally:
os.chdir(cwd)
return {
'produce': {
'input': produce_specification['input'],
'configuration': configuration,
'data_pointer': output_path
},
'search_id': self.search_id,
'model_id': self.model_id,
'system': self.system
}
def save(self):
model_folder_dir = os.path.join(SAVED_MODELS_PATH, self.model_id)
metadata_path = os.path.join(model_folder_dir, 'metadata.json')
os.makedirs(model_folder_dir, exist_ok=True)
with open(metadata_path, 'w') as metadata_file:
json.dump({
'system': str(self.system),
'model_id': str(self.model_id),
'predictors': self.predictors,
'targets': self.targets,
'search_id': self.search_id,
'task': self.task
}, metadata_file)
self.model.save(model_folder_dir)
|
[
"numpy.argmax",
"json.dumps",
"collections.defaultdict",
"tworaven_solver.model.BaseModelWrapper.load",
"h2o.import_file",
"requests.post",
"os.path.join",
"os.chdir",
"pandas.DataFrame",
"h2o.save_model",
"json.loads",
"h2o.init",
"os.path.exists",
"tworaven_apps.solver_interfaces.models.StatisticalModel.objects.create",
"json.dump",
"numpy.average",
"os.path.basename",
"ludwig.api.LudwigModel.load",
"json.load",
"uuid.uuid4",
"os.makedirs",
"os.getcwd",
"sklearn.model_selection.KFold",
"tworaven_solver.Dataset",
"numpy.array",
"sklearn.model_selection.StratifiedKFold",
"tworaven_apps.solver_interfaces.models.get_metric"
] |
[((1390, 1431), 'os.path.join', 'os.path.join', (['SAVED_MODELS_PATH', 'model_id'], {}), '(SAVED_MODELS_PATH, model_id)\n', (1402, 1431), False, 'import os\n'), ((1456, 1504), 'os.path.join', 'os.path.join', (['model_folder_path', '"""metadata.json"""'], {}), "(model_folder_path, 'metadata.json')\n", (1468, 1504), False, 'import os\n'), ((7754, 7771), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7765, 7771), False, 'from collections import defaultdict\n'), ((7796, 7813), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7807, 7813), False, 'from collections import defaultdict\n'), ((11870, 11881), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (11879, 11881), False, 'import os\n'), ((12423, 12469), 'os.path.join', 'os.path.join', (['SAVED_MODELS_PATH', 'self.model_id'], {}), '(SAVED_MODELS_PATH, self.model_id)\n', (12435, 12469), False, 'import os\n'), ((12494, 12542), 'os.path.join', 'os.path.join', (['model_folder_path', '"""metadata.json"""'], {}), "(model_folder_path, 'metadata.json')\n", (12506, 12542), False, 'import os\n'), ((15839, 15868), 'h2o.import_file', 'h2o.import_file', (['resource_uri'], {}), '(resource_uri)\n', (15854, 15868), False, 'import h2o\n'), ((19430, 19461), 'tworaven_solver.Dataset', 'Dataset', (["specification['input']"], {}), "(specification['input'])\n", (19437, 19461), False, 'from tworaven_solver import Dataset\n'), ((20716, 20727), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (20725, 20727), False, 'import os\n'), ((21288, 21334), 'os.path.join', 'os.path.join', (['SAVED_MODELS_PATH', 'self.model_id'], {}), '(SAVED_MODELS_PATH, self.model_id)\n', (21300, 21334), False, 'import os\n'), ((21359, 21407), 'os.path.join', 'os.path.join', (['model_folder_path', '"""metadata.json"""'], {}), "(model_folder_path, 'metadata.json')\n", (21371, 21407), False, 'import os\n'), ((21520, 21582), 'h2o.save_model', 'h2o.save_model', (['self.model'], {'path': 'model_folder_path', 'force': '(True)'}), '(self.model, path=model_folder_path, force=True)\n', (21534, 21582), False, 'import h2o\n'), ((23716, 23747), 'tworaven_solver.Dataset', 'Dataset', (["specification['input']"], {}), "(specification['input'])\n", (23723, 23747), False, 'from tworaven_solver import Dataset\n'), ((24587, 24598), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (24596, 24598), False, 'import os\n'), ((25140, 25186), 'os.path.join', 'os.path.join', (['SAVED_MODELS_PATH', 'self.model_id'], {}), '(SAVED_MODELS_PATH, self.model_id)\n', (25152, 25186), False, 'import os\n'), ((25211, 25259), 'os.path.join', 'os.path.join', (['model_folder_path', '"""metadata.json"""'], {}), "(model_folder_path, 'metadata.json')\n", (25223, 25259), False, 'import os\n'), ((31089, 31100), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (31098, 31100), False, 'import os\n'), ((31647, 31693), 'os.path.join', 'os.path.join', (['SAVED_MODELS_PATH', 'self.model_id'], {}), '(SAVED_MODELS_PATH, self.model_id)\n', (31659, 31693), False, 'import os\n'), ((31718, 31765), 'os.path.join', 'os.path.join', (['model_folder_dir', '"""metadata.json"""'], {}), "(model_folder_dir, 'metadata.json')\n", (31730, 31765), False, 'import os\n'), ((31775, 31819), 'os.makedirs', 'os.makedirs', (['model_folder_dir'], {'exist_ok': '(True)'}), '(model_folder_dir, exist_ok=True)\n', (31786, 31819), False, 'import os\n'), ((632, 665), 'tworaven_apps.solver_interfaces.models.StatisticalModel.objects.create', 'StatisticalModel.objects.create', ([], {}), '()\n', (663, 665), False, 'from tworaven_apps.solver_interfaces.models import SAVED_MODELS_PATH, R_SERVICE, get_metric, StatisticalModel\n'), ((1521, 1550), 'os.path.exists', 'os.path.exists', (['metadata_path'], {}), '(metadata_path)\n', (1535, 1550), False, 'import os\n'), ((1668, 1692), 'json.load', 'json.load', (['metadata_file'], {}), '(metadata_file)\n', (1677, 1692), False, 'import json\n'), ((2958, 2968), 'h2o.init', 'h2o.init', ([], {}), '()\n', (2966, 2968), False, 'import h2o\n'), ((6494, 6520), 'pandas.DataFrame', 'pandas.DataFrame', (['stimulus'], {}), '(stimulus)\n', (6510, 6520), False, 'import pandas\n'), ((6701, 6727), 'pandas.DataFrame', 'pandas.DataFrame', (['stimulus'], {}), '(stimulus)\n', (6717, 6727), False, 'import pandas\n'), ((7967, 8004), 'numpy.array', 'np.array', (['test_split[self.targets[0]]'], {}), '(test_split[self.targets[0]])\n', (7975, 8004), True, 'import numpy as np\n'), ((9355, 9380), 'json.dumps', 'json.dumps', (['specification'], {}), '(specification)\n', (9365, 9380), False, 'import json\n'), ((11769, 11806), 'os.path.exists', 'os.path.exists', (['output_directory_path'], {}), '(output_directory_path)\n', (11783, 11806), False, 'import os\n'), ((11820, 11854), 'os.makedirs', 'os.makedirs', (['output_directory_path'], {}), '(output_directory_path)\n', (11831, 11854), False, 'import os\n'), ((11907, 11920), 'os.chdir', 'os.chdir', (['"""/"""'], {}), "('/')\n", (11915, 11920), False, 'import os\n'), ((12037, 12050), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (12045, 12050), False, 'import os\n'), ((12559, 12588), 'os.path.exists', 'os.path.exists', (['metadata_path'], {}), '(metadata_path)\n', (12573, 12588), False, 'import os\n'), ((12602, 12632), 'os.makedirs', 'os.makedirs', (['model_folder_path'], {}), '(model_folder_path)\n', (12613, 12632), False, 'import os\n'), ((13101, 13148), 'os.path.join', 'os.path.join', (['model_folder_path', '"""model.joblib"""'], {}), "(model_folder_path, 'model.joblib')\n", (13113, 13148), False, 'import os\n'), ((15322, 15347), 'json.dumps', 'json.dumps', (['specification'], {}), '(specification)\n', (15332, 15347), False, 'import json\n'), ((20615, 20652), 'os.path.exists', 'os.path.exists', (['output_directory_path'], {}), '(output_directory_path)\n', (20629, 20652), False, 'import os\n'), ((20666, 20700), 'os.makedirs', 'os.makedirs', (['output_directory_path'], {}), '(output_directory_path)\n', (20677, 20700), False, 'import os\n'), ((20753, 20766), 'os.chdir', 'os.chdir', (['"""/"""'], {}), "('/')\n", (20761, 20766), False, 'import os\n'), ((20883, 20896), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (20891, 20896), False, 'import os\n'), ((21424, 21453), 'os.path.exists', 'os.path.exists', (['metadata_path'], {}), '(metadata_path)\n', (21438, 21453), False, 'import os\n'), ((21467, 21497), 'os.makedirs', 'os.makedirs', (['model_folder_path'], {}), '(model_folder_path)\n', (21478, 21497), False, 'import os\n'), ((24486, 24523), 'os.path.exists', 'os.path.exists', (['output_directory_path'], {}), '(output_directory_path)\n', (24500, 24523), False, 'import os\n'), ((24537, 24571), 'os.makedirs', 'os.makedirs', (['output_directory_path'], {}), '(output_directory_path)\n', (24548, 24571), False, 'import os\n'), ((24624, 24637), 'os.chdir', 'os.chdir', (['"""/"""'], {}), "('/')\n", (24632, 24637), False, 'import os\n'), ((24754, 24767), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (24762, 24767), False, 'import os\n'), ((25276, 25305), 'os.path.exists', 'os.path.exists', (['metadata_path'], {}), '(metadata_path)\n', (25290, 25305), False, 'import os\n'), ((25319, 25349), 'os.makedirs', 'os.makedirs', (['model_folder_path'], {}), '(model_folder_path)\n', (25330, 25349), False, 'import os\n'), ((25463, 25689), 'json.dump', 'json.dump', (["{'system': self.system, 'model_id': self.model_id, 'search_id': self.\n search_id, 'model_filename': model_folder_path, 'predictors': self.\n predictors, 'targets': self.targets, 'task': self.task}", 'metadata_file'], {}), "({'system': self.system, 'model_id': self.model_id, 'search_id':\n self.search_id, 'model_filename': model_folder_path, 'predictors': self\n .predictors, 'targets': self.targets, 'task': self.task}, metadata_file)\n", (25472, 25689), False, 'import json\n'), ((28237, 28316), 'pandas.DataFrame', 'pandas.DataFrame', (["{'actual': dataframe[target], 'predicted': predicted[target]}"], {}), "({'actual': dataframe[target], 'predicted': predicted[target]})\n", (28253, 28316), False, 'import pandas\n'), ((30988, 31025), 'os.path.exists', 'os.path.exists', (['output_directory_path'], {}), '(output_directory_path)\n', (31002, 31025), False, 'import os\n'), ((31039, 31073), 'os.makedirs', 'os.makedirs', (['output_directory_path'], {}), '(output_directory_path)\n', (31050, 31073), False, 'import os\n'), ((31126, 31139), 'os.chdir', 'os.chdir', (['"""/"""'], {}), "('/')\n", (31134, 31139), False, 'import os\n'), ((31254, 31267), 'os.chdir', 'os.chdir', (['cwd'], {}), '(cwd)\n', (31262, 31267), False, 'import os\n'), ((1843, 1895), 'os.path.join', 'os.path.join', (['model_folder_path', '"""preprocess.joblib"""'], {}), "(model_folder_path, 'preprocess.joblib')\n", (1855, 1895), False, 'import os\n'), ((13220, 13272), 'os.path.join', 'os.path.join', (['model_folder_path', '"""preprocess.joblib"""'], {}), "(model_folder_path, 'preprocess.joblib')\n", (13232, 13272), False, 'import os\n'), ((13511, 13596), 'requests.post', 'requests.post', (["(R_SERVICE + 'caretDescribe.app')"], {'json': "{'model_id': self.model_id}"}), "(R_SERVICE + 'caretDescribe.app', json={'model_id': self.model_id}\n )\n", (13524, 13596), False, 'import requests\n'), ((13800, 13913), 'requests.post', 'requests.post', (["(R_SERVICE + 'caretScore.app')"], {'json': "{'model_id': self.model_id, 'specification': specification}"}), "(R_SERVICE + 'caretScore.app', json={'model_id': self.model_id,\n 'specification': specification})\n", (13813, 13913), False, 'import requests\n'), ((14166, 14282), 'requests.post', 'requests.post', (["(R_SERVICE + 'caretProduce.app')"], {'json': "{'model_id': self.model_id, 'specification': specification}"}), "(R_SERVICE + 'caretProduce.app', json={'model_id': self.\n model_id, 'specification': specification})\n", (14179, 14282), False, 'import requests\n'), ((15773, 15804), 'tworaven_solver.Dataset', 'Dataset', (["specification['input']"], {}), "(specification['input'])\n", (15780, 15804), False, 'from tworaven_solver import Dataset\n'), ((22832, 22863), 'tworaven_solver.Dataset', 'Dataset', (["specification['input']"], {}), "(specification['input'])\n", (22839, 22863), False, 'from tworaven_solver import Dataset\n'), ((26534, 26571), 'tworaven_solver.Dataset', 'Dataset', (["score_specification['input']"], {}), "(score_specification['input'])\n", (26541, 26571), False, 'from tworaven_solver import Dataset\n'), ((29692, 29731), 'tworaven_solver.Dataset', 'Dataset', (["produce_specification['input']"], {}), "(produce_specification['input'])\n", (29699, 29731), False, 'from tworaven_solver import Dataset\n'), ((1939, 1991), 'os.path.join', 'os.path.join', (['model_folder_path', '"""preprocess.joblib"""'], {}), "(model_folder_path, 'preprocess.joblib')\n", (1951, 1991), False, 'import os\n'), ((2626, 2661), 'ludwig.api.LudwigModel.load', 'LudwigModel.load', (['model_folder_path'], {}), '(model_folder_path)\n', (2642, 2661), False, 'from ludwig.api import LudwigModel\n'), ((3489, 3539), 'tworaven_solver.model.BaseModelWrapper.load', 'BaseModelWrapper.load', (['model_folder_path', 'metadata'], {}), '(model_folder_path, metadata)\n', (3510, 3539), False, 'from tworaven_solver.model import BaseModelWrapper\n'), ((11238, 11269), 'numpy.argmax', 'np.argmax', (['predictions'], {'axis': '(-1)'}), '(predictions, axis=-1)\n', (11247, 11269), True, 'import numpy as np\n'), ((19072, 19103), 'tworaven_solver.Dataset', 'Dataset', (["specification['train']"], {}), "(specification['train'])\n", (19079, 19103), False, 'from tworaven_solver import Dataset\n'), ((21824, 21852), 'os.path.basename', 'os.path.basename', (['model_path'], {}), '(model_path)\n', (21840, 21852), False, 'import os\n'), ((2061, 2108), 'os.path.join', 'os.path.join', (['model_folder_path', '"""model.joblib"""'], {}), "(model_folder_path, 'model.joblib')\n", (2073, 2108), False, 'import os\n'), ((3035, 3094), 'os.path.join', 'os.path.join', (['model_folder_path', "metadata['model_filename']"], {}), "(model_folder_path, metadata['model_filename'])\n", (3047, 3094), False, 'import os\n'), ((8846, 8909), 'numpy.average', 'np.average', (['split_scores[metric]'], {'weights': 'split_weights[metric]'}), '(split_scores[metric], weights=split_weights[metric])\n', (8856, 8909), True, 'import numpy as np\n'), ((8941, 8959), 'json.loads', 'json.loads', (['metric'], {}), '(metric)\n', (8951, 8959), False, 'import json\n'), ((10094, 10125), 'tworaven_solver.Dataset', 'Dataset', (["specification['train']"], {}), "(specification['train'])\n", (10101, 10125), False, 'from tworaven_solver import Dataset\n'), ((10348, 10379), 'tworaven_solver.Dataset', 'Dataset', (["specification['input']"], {}), "(specification['input'])\n", (10355, 10379), False, 'from tworaven_solver import Dataset\n'), ((10727, 10739), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (10737, 10739), False, 'import uuid\n'), ((11300, 11356), 'pandas.DataFrame', 'pandas.DataFrame', (['predictions'], {'columns': '[self.targets[0]]'}), '(predictions, columns=[self.targets[0]])\n', (11316, 11356), False, 'import pandas\n'), ((20575, 20587), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (20585, 20587), False, 'import uuid\n'), ((23202, 23220), 'tworaven_apps.solver_interfaces.models.get_metric', 'get_metric', (['metric'], {}), '(metric)\n', (23212, 23220), False, 'from tworaven_apps.solver_interfaces.models import SAVED_MODELS_PATH, R_SERVICE, get_metric, StatisticalModel\n'), ((24446, 24458), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (24456, 24458), False, 'import uuid\n'), ((30825, 30837), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (30835, 30837), False, 'import uuid\n'), ((7470, 7501), 'tworaven_solver.Dataset', 'Dataset', (["specification['input']"], {}), "(specification['input'])\n", (7477, 7501), False, 'from tworaven_solver import Dataset\n'), ((8515, 8533), 'tworaven_apps.solver_interfaces.models.get_metric', 'get_metric', (['metric'], {}), '(metric)\n', (8525, 8533), False, 'from tworaven_apps.solver_interfaces.models import SAVED_MODELS_PATH, R_SERVICE, get_metric, StatisticalModel\n'), ((16611, 16636), 'tworaven_apps.solver_interfaces.models.get_metric', 'get_metric', (['metric_schema'], {}), '(metric_schema)\n', (16621, 16636), False, 'from tworaven_apps.solver_interfaces.models import SAVED_MODELS_PATH, R_SERVICE, get_metric, StatisticalModel\n'), ((28560, 28616), 'tworaven_apps.solver_interfaces.models.get_metric', 'get_metric', (['eachMetric', 'self.model.problem_specification'], {}), '(eachMetric, self.model.problem_specification)\n', (28570, 28616), False, 'from tworaven_apps.solver_interfaces.models import SAVED_MODELS_PATH, R_SERVICE, get_metric, StatisticalModel\n'), ((28714, 28770), 'tworaven_apps.solver_interfaces.models.get_metric', 'get_metric', (['eachMetric', 'self.model.problem_specification'], {}), '(eachMetric, self.model.problem_specification)\n', (28724, 28770), False, 'from tworaven_apps.solver_interfaces.models import SAVED_MODELS_PATH, R_SERVICE, get_metric, StatisticalModel\n'), ((29908, 29947), 'tworaven_solver.Dataset', 'Dataset', (["produce_specification['train']"], {}), "(produce_specification['train'])\n", (29915, 29947), False, 'from tworaven_solver import Dataset\n'), ((4416, 4466), 'sklearn.model_selection.StratifiedKFold', 'model_selection.StratifiedKFold', ([], {}), '(**split_arguments)\n', (4447, 4466), False, 'from sklearn import model_selection\n'), ((4656, 4696), 'sklearn.model_selection.KFold', 'model_selection.KFold', ([], {}), '(**split_arguments)\n', (4677, 4696), False, 'from sklearn import model_selection\n'), ((8488, 8506), 'json.dumps', 'json.dumps', (['metric'], {}), '(metric)\n', (8498, 8506), False, 'import json\n'), ((8588, 8606), 'json.dumps', 'json.dumps', (['metric'], {}), '(metric)\n', (8598, 8606), False, 'import json\n')]
|
#!/usr/bin/env python3
print('Content-type:text/html\n\n')
import os
import cgi, cgitb
cgitb.enable()
if os.environ['REQUEST_METHOD'] == 'POST':
form = cgi.FieldStorage()
if not os.path.isfile('./interrogar-ud/credenciais.txt'):
open('./interrogar-ud/credenciais.txt', 'w').write('')
credenciais = open('./interrogar-ud/credenciais.txt', 'r').read().splitlines()
if form['nome'].value in credenciais and form['senha'].value == 'unic<PASSWORD>io':
if not "HTTP_COOKIE" in os.environ: os.environ["HTTP_COOKIE"] = "conectado=true"
print('<script>\ndocument.cookie = "conectado=true; expires=2030; path=./cgi-bin/inquerito.py";\nwindow.alert("Credenciado com sucesso!\\nBem-vindo/a, ' + form['nome'].value + '.");\nwindow.close();\n</script>')
else:
print('Acesso não autorizado.')
|
[
"os.path.isfile",
"cgi.FieldStorage",
"cgitb.enable"
] |
[((88, 102), 'cgitb.enable', 'cgitb.enable', ([], {}), '()\n', (100, 102), False, 'import cgi, cgitb\n'), ((155, 173), 'cgi.FieldStorage', 'cgi.FieldStorage', ([], {}), '()\n', (171, 173), False, 'import cgi, cgitb\n'), ((182, 231), 'os.path.isfile', 'os.path.isfile', (['"""./interrogar-ud/credenciais.txt"""'], {}), "('./interrogar-ud/credenciais.txt')\n", (196, 231), False, 'import os\n')]
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Face detection loss."""
import numpy as np
import mindspore.nn as nn
from mindspore.ops import operations as P
from mindspore.nn.loss.loss import _Loss
from mindspore.nn import Dense, Cell
from mindspore import Tensor
from mindspore.common import dtype as mstype
class PtLinspace(Cell):
def __init__(self):
super(PtLinspace, self).__init__()
self.TupleToArray = P.TupleToArray()
def construct(self, start, end, steps):
lin_x = ()
step = (end - start + 1) / steps
for i in range(start, end + 1, step):
lin_x += (i,)
lin_x = self.TupleToArray(lin_x)
return lin_x
class MSELoss(_Loss):
def __init__(self):
super(MSELoss, self).__init__()
self.sum = P.Sum()
self.mean = P.ReduceMean(keepdims=False)
self.pow = P.Pow()
self.sqrt = P.Sqrt()
def construct(self, nembeddings1, nembeddings2):
dist = nembeddings1 - nembeddings2
dist_pow = self.pow(dist, 2.0)
dist_sum = self.sum(dist_pow, 1)
dist_sqrt = self.sqrt(dist_sum)
loss = self.mean(dist_sqrt, 0)
return loss
class YoloLoss(Cell):
""" Computes yolo loss from darknet network output and target annotation.
Args:
num_classes (int): number of categories
anchors (list): 2D list representing anchor boxes
coord_scale (float): weight of bounding box coordinates
no_object_scale (float): weight of regions without target boxes
object_scale (float): weight of regions with target boxes
class_scale (float): weight of categorical predictions
thresh (float): minimum iou between a predicted box and ground truth for them to be considered matching
seen (int): How many images the network has already been trained on.
"""
def __init__(self, num_classes, anchors, anchors_mask, reduction=32, seen=0, coord_scale=1.0, no_object_scale=1.0,
object_scale=1.0, class_scale=1.0, thresh=0.5, head_idx=0.0):
super(YoloLoss, self).__init__()
self.num_classes = num_classes
self.num_anchors = len(anchors_mask)
self.anchor_step = len(anchors[0]) # each scale has step anchors
self.anchors = np.array(anchors, dtype=np.float32) / reduction # scale every anchor for every scale
self.tensor_anchors = Tensor(self.anchors, mstype.float32)
self.anchors_mask = anchors_mask
anchors_w = []
anchors_h = []
for i in range(len(anchors_mask)):
anchors_w.append(self.anchors[self.anchors_mask[i]][0])
anchors_h.append(self.anchors[self.anchors_mask[i]][1])
self.anchors_w = Tensor(np.array(anchors_w).reshape(len(self.anchors_mask), 1))
self.anchors_h = Tensor(np.array(anchors_h).reshape(len(self.anchors_mask), 1))
self.reduction = reduction
self.seen = seen
self.head_idx = head_idx
self.zero = Tensor(0)
self.coord_scale = coord_scale
self.no_object_scale = no_object_scale
self.object_scale = object_scale
self.class_scale = class_scale
self.thresh = thresh
self.info = {'avg_iou': 0, 'class': 0, 'obj': 0, 'no_obj': 0,
'recall50': 0, 'recall75': 0, 'obj_cur': 0, 'obj_all': 0,
'coord_xy': 0, 'coord_wh': 0}
self.Shape = P.Shape()
self.Reshape = P.Reshape()
self.Sigmoid = P.Sigmoid()
self.ZerosLike = P.ZerosLike()
self.ScatterNd = P.ScatterNd()
self.ScatterNdUpdate = P.ScatterNdUpdate()
self.concat0 = P.Concat(0)
self.concat0_2 = P.Concat(0)
self.concat0_3 = P.Concat(0)
self.concat0_4 = P.Concat(0)
self.concat1 = P.Concat(1)
self.concat1_2 = P.Concat(1)
self.concat1_3 = P.Concat(1)
self.concat1_4 = P.Concat(1)
self.concat2 = P.Concat(2)
self.concat2_2 = P.Concat(2)
self.concat2_3 = P.Concat(2)
self.concat2_4 = P.Concat(2)
self.Tile = P.Tile()
self.Transpose = P.Transpose()
self.TupleToArray = P.TupleToArray()
self.ScalarToArray = P.ScalarToArray()
self.Cast = P.Cast()
self.Exp = P.Exp()
self.Sum = P.ReduceSum()
self.Log = P.Log()
self.TensorAdd = P.TensorAdd()
self.RealDiv = P.RealDiv()
self.Div = P.Div()
self.SmoothL1Loss = P.SmoothL1Loss()
self.Sub = P.Sub()
self.Greater = P.Greater()
self.GreaterEqual = P.GreaterEqual()
self.Minimum = P.Minimum()
self.Maximum = P.Maximum()
self.Less = P.Less()
self.OnesLike = P.OnesLike()
self.Fill = P.Fill()
self.Equal = P.Equal()
self.BCE = P.SigmoidCrossEntropyWithLogits()
self.CE = P.SoftmaxCrossEntropyWithLogits()
self.DType = P.DType()
self.PtLinspace = PtLinspace()
self.OneHot = nn.OneHot(-1, self.num_classes, 1.0, 0.0)
self.Squeeze2 = P.Squeeze(2)
self.ArgMax = P.Argmax()
self.ArgMaxWithValue1 = P.ArgMaxWithValue(1)
self.ReduceSum = P.ReduceSum()
self.Log = P.Log()
self.GatherNd = P.GatherNd()
self.Abs = P.Abs()
self.Select = P.Select()
self.IOU = P.IOU()
def construct(self, output, coord_mask, conf_pos_mask, conf_neg_mask, cls_mask, t_coord, t_conf, t_cls, gt_list):
"""
Compute Yolo loss.
"""
output_d = self.Shape(output)
num_batch = output_d[0]
num_anchors = self.num_anchors
num_classes = self.num_classes
num_channels = output_d[1] / num_anchors
height = output_d[2]
width = output_d[3]
output = self.Reshape(output, (num_batch, num_anchors, num_channels, height * width))
coord_01 = output[:, :, :2] # tx,ty
coord_23 = output[:, :, 2:4] # tw,th
coord = self.concat2((coord_01, coord_23))
conf = self.Squeeze2(output[:, :, 4:5, :])
cls = output[:, :, 5:]
cls = self.Reshape(cls, (num_batch*num_anchors, num_classes, height*width))
perm = (0, 2, 1)
cls = self.Transpose(cls, perm)
cls_shp = self.Shape(cls)
cls = self.Reshape(cls, (cls_shp[0] * cls_shp[1] * cls_shp[2] / num_classes, num_classes))
lin_x = self.PtLinspace(0, width - 1, width)
lin_x = self.Tile(lin_x, (height, ))
lin_x = self.Cast(lin_x, mstype.float32)
lin_y = self.PtLinspace(0, height - 1, height)
lin_y = self.Reshape(lin_y, (height, 1))
lin_y = self.Tile(lin_y, (1, width))
lin_y = self.Reshape(lin_y, (self.Shape(lin_y)[0] * self.Shape(lin_y)[1], ))
lin_y = self.Cast(lin_y, mstype.float32)
anchor_w = self.anchors_w
anchor_h = self.anchors_h
anchor_w = self.Cast(anchor_w, mstype.float32)
anchor_h = self.Cast(anchor_h, mstype.float32)
coord_x = self.Sigmoid(coord[:, :, 0:1, :])
pred_boxes_0 = self.Squeeze2(coord_x) + lin_x
shape_pb0 = self.Shape(pred_boxes_0)
pred_boxes_0 = self.Reshape(pred_boxes_0, (shape_pb0[0] * shape_pb0[1] * shape_pb0[2], 1))
coord_y = self.Sigmoid(coord[:, :, 1:2, :])
pred_boxes_1 = self.Squeeze2(coord_y) + lin_y
shape_pb1 = self.Shape(pred_boxes_1)
pred_boxes_1 = self.Reshape(pred_boxes_1, (shape_pb1[0] * shape_pb1[1] * shape_pb1[2], 1))
pred_boxes_2 = self.Exp(self.Squeeze2(coord[:, :, 2:3, :])) * anchor_w
shape_pb2 = self.Shape(pred_boxes_2)
pred_boxes_2 = self.Reshape(pred_boxes_2, (shape_pb2[0] * shape_pb2[1] * shape_pb2[2], 1))
pred_boxes_3 = self.Exp(self.Squeeze2(coord[:, :, 3:4, :])) * anchor_h
shape_pb3 = self.Shape(pred_boxes_3)
pred_boxes_3 = self.Reshape(pred_boxes_3, (shape_pb3[0] * shape_pb3[1] * shape_pb3[2], 1))
pred_boxes_x1 = pred_boxes_0 - pred_boxes_2 / 2
pred_boxes_y1 = pred_boxes_1 - pred_boxes_3 / 2
pred_boxes_x2 = pred_boxes_0 + pred_boxes_2 / 2
pred_boxes_y2 = pred_boxes_1 + pred_boxes_3 / 2
pred_boxes_points = self.concat1_4((pred_boxes_x1, pred_boxes_y1, pred_boxes_x2, pred_boxes_y2))
total_anchors = num_anchors * height * width
mask_concat = None
conf_neg_mask_zero = self.ZerosLike(conf_neg_mask)
pred_boxes_points = pred_boxes_points * 64
gt_list = gt_list * 64
for b in range(num_batch):
cur_pred_boxes = pred_boxes_points[b * total_anchors:(b + 1) * total_anchors]
iou_gt_pred = self.IOU(self.Cast(cur_pred_boxes, mstype.float16), self.Cast(gt_list[b], mstype.float16))
mask = self.Cast((iou_gt_pred > self.thresh), mstype.float16)
mask = self.ReduceSum(mask, 0)
mask = mask > 0
shape_neg = self.Shape(conf_neg_mask[0])
mask = self.Reshape(mask, (1, shape_neg[0], shape_neg[1]))
if b == 0:
mask_concat = mask
else:
mask_concat = self.concat0_2((mask_concat, mask))
conf_neg_mask = self.Select(mask_concat, conf_neg_mask_zero, conf_neg_mask)
coord_mask = self.Tile(coord_mask, (1, 1, 4, 1))
coord_mask = coord_mask[:, :, :2]
coord_center = coord[:, :, :2]
t_coord_center = t_coord[:, :, :2]
coord_wh = coord[:, :, 2:]
t_coord_wh = t_coord[:, :, 2:]
one_hot_label = None
shape_cls_mask = None
if num_classes > 1:
shape_t_cls = self.Shape(t_cls)
t_cls = self.Reshape(t_cls, (shape_t_cls[0] * shape_t_cls[1] * shape_t_cls[2],))
one_hot_label = self.OneHot(self.Cast(t_cls, mstype.int32))
shape_cls_mask = self.Shape(cls_mask)
cls_mask = self.Reshape(cls_mask, (1, shape_cls_mask[0] * shape_cls_mask[1] * shape_cls_mask[2]))
added_scale = 1.0 + self.head_idx * 0.5
loss_coord_center = added_scale * 2.0 * 1.0 * self.coord_scale * self.Sum(
coord_mask * self.BCE(coord_center, t_coord_center), ())
loss_coord_wh = added_scale * 2.0 * 1.5 * self.coord_scale * self.Sum(
coord_mask * self.SmoothL1Loss(coord_wh, t_coord_wh), ())
loss_coord = 1.0 * (loss_coord_center + loss_coord_wh)
loss_conf_pos = added_scale * 2.0 * self.object_scale * self.Sum(conf_pos_mask * self.BCE(conf, t_conf), ())
loss_conf_neg = 1.0 * self.no_object_scale * self.Sum(conf_neg_mask * self.BCE(conf, t_conf), ())
loss_conf = loss_conf_pos + loss_conf_neg
loss_cls = None
if num_classes > 1:
loss_cls = self.class_scale * 1.0 * self.Sum(cls_mask * self.CE(cls, one_hot_label)[0], ())
else:
loss_cls = 0.0
cls = self.Squeeze2(output[:, :, 5:6, :])
loss_cls_pos = added_scale * 2.0 * self.object_scale * self.Sum(conf_pos_mask * self.BCE(cls, t_conf), ())
loss_cls_neg = 1.0 * self.no_object_scale * self.Sum(conf_neg_mask * self.BCE(cls, t_conf), ())
loss_cls = loss_cls_pos + loss_cls_neg
loss_tot = loss_coord + 0.5 * loss_conf + 0.5 * loss_cls
return loss_tot
|
[
"mindspore.ops.operations.SigmoidCrossEntropyWithLogits",
"mindspore.ops.operations.GatherNd",
"mindspore.Tensor",
"mindspore.ops.operations.Cast",
"mindspore.ops.operations.DType",
"mindspore.ops.operations.IOU",
"mindspore.nn.OneHot",
"mindspore.ops.operations.Fill",
"mindspore.ops.operations.Transpose",
"mindspore.ops.operations.Concat",
"mindspore.ops.operations.Maximum",
"mindspore.ops.operations.ReduceSum",
"mindspore.ops.operations.ReduceMean",
"mindspore.ops.operations.TensorAdd",
"mindspore.ops.operations.Select",
"mindspore.ops.operations.Argmax",
"mindspore.ops.operations.TupleToArray",
"mindspore.ops.operations.Log",
"mindspore.ops.operations.ArgMaxWithValue",
"mindspore.ops.operations.Sum",
"mindspore.ops.operations.Exp",
"mindspore.ops.operations.Reshape",
"mindspore.ops.operations.Squeeze",
"mindspore.ops.operations.ScalarToArray",
"mindspore.ops.operations.Minimum",
"mindspore.ops.operations.Sqrt",
"mindspore.ops.operations.ScatterNdUpdate",
"mindspore.ops.operations.Abs",
"mindspore.ops.operations.Less",
"mindspore.ops.operations.Equal",
"mindspore.ops.operations.ScatterNd",
"mindspore.ops.operations.Greater",
"mindspore.ops.operations.Sigmoid",
"mindspore.ops.operations.OnesLike",
"mindspore.ops.operations.RealDiv",
"mindspore.ops.operations.ZerosLike",
"mindspore.ops.operations.SoftmaxCrossEntropyWithLogits",
"mindspore.ops.operations.Pow",
"mindspore.ops.operations.Shape",
"mindspore.ops.operations.Sub",
"numpy.array",
"mindspore.ops.operations.GreaterEqual",
"mindspore.ops.operations.Tile",
"mindspore.ops.operations.SmoothL1Loss",
"mindspore.ops.operations.Div"
] |
[((1055, 1071), 'mindspore.ops.operations.TupleToArray', 'P.TupleToArray', ([], {}), '()\n', (1069, 1071), True, 'from mindspore.ops import operations as P\n'), ((1419, 1426), 'mindspore.ops.operations.Sum', 'P.Sum', ([], {}), '()\n', (1424, 1426), True, 'from mindspore.ops import operations as P\n'), ((1447, 1475), 'mindspore.ops.operations.ReduceMean', 'P.ReduceMean', ([], {'keepdims': '(False)'}), '(keepdims=False)\n', (1459, 1475), True, 'from mindspore.ops import operations as P\n'), ((1495, 1502), 'mindspore.ops.operations.Pow', 'P.Pow', ([], {}), '()\n', (1500, 1502), True, 'from mindspore.ops import operations as P\n'), ((1523, 1531), 'mindspore.ops.operations.Sqrt', 'P.Sqrt', ([], {}), '()\n', (1529, 1531), True, 'from mindspore.ops import operations as P\n'), ((3026, 3062), 'mindspore.Tensor', 'Tensor', (['self.anchors', 'mstype.float32'], {}), '(self.anchors, mstype.float32)\n', (3032, 3062), False, 'from mindspore import Tensor\n'), ((3619, 3628), 'mindspore.Tensor', 'Tensor', (['(0)'], {}), '(0)\n', (3625, 3628), False, 'from mindspore import Tensor\n'), ((4047, 4056), 'mindspore.ops.operations.Shape', 'P.Shape', ([], {}), '()\n', (4054, 4056), True, 'from mindspore.ops import operations as P\n'), ((4080, 4091), 'mindspore.ops.operations.Reshape', 'P.Reshape', ([], {}), '()\n', (4089, 4091), True, 'from mindspore.ops import operations as P\n'), ((4115, 4126), 'mindspore.ops.operations.Sigmoid', 'P.Sigmoid', ([], {}), '()\n', (4124, 4126), True, 'from mindspore.ops import operations as P\n'), ((4152, 4165), 'mindspore.ops.operations.ZerosLike', 'P.ZerosLike', ([], {}), '()\n', (4163, 4165), True, 'from mindspore.ops import operations as P\n'), ((4191, 4204), 'mindspore.ops.operations.ScatterNd', 'P.ScatterNd', ([], {}), '()\n', (4202, 4204), True, 'from mindspore.ops import operations as P\n'), ((4236, 4255), 'mindspore.ops.operations.ScatterNdUpdate', 'P.ScatterNdUpdate', ([], {}), '()\n', (4253, 4255), True, 'from mindspore.ops import operations as P\n'), ((4279, 4290), 'mindspore.ops.operations.Concat', 'P.Concat', (['(0)'], {}), '(0)\n', (4287, 4290), True, 'from mindspore.ops import operations as P\n'), ((4316, 4327), 'mindspore.ops.operations.Concat', 'P.Concat', (['(0)'], {}), '(0)\n', (4324, 4327), True, 'from mindspore.ops import operations as P\n'), ((4353, 4364), 'mindspore.ops.operations.Concat', 'P.Concat', (['(0)'], {}), '(0)\n', (4361, 4364), True, 'from mindspore.ops import operations as P\n'), ((4390, 4401), 'mindspore.ops.operations.Concat', 'P.Concat', (['(0)'], {}), '(0)\n', (4398, 4401), True, 'from mindspore.ops import operations as P\n'), ((4425, 4436), 'mindspore.ops.operations.Concat', 'P.Concat', (['(1)'], {}), '(1)\n', (4433, 4436), True, 'from mindspore.ops import operations as P\n'), ((4462, 4473), 'mindspore.ops.operations.Concat', 'P.Concat', (['(1)'], {}), '(1)\n', (4470, 4473), True, 'from mindspore.ops import operations as P\n'), ((4499, 4510), 'mindspore.ops.operations.Concat', 'P.Concat', (['(1)'], {}), '(1)\n', (4507, 4510), True, 'from mindspore.ops import operations as P\n'), ((4536, 4547), 'mindspore.ops.operations.Concat', 'P.Concat', (['(1)'], {}), '(1)\n', (4544, 4547), True, 'from mindspore.ops import operations as P\n'), ((4571, 4582), 'mindspore.ops.operations.Concat', 'P.Concat', (['(2)'], {}), '(2)\n', (4579, 4582), True, 'from mindspore.ops import operations as P\n'), ((4608, 4619), 'mindspore.ops.operations.Concat', 'P.Concat', (['(2)'], {}), '(2)\n', (4616, 4619), True, 'from mindspore.ops import operations as P\n'), ((4645, 4656), 'mindspore.ops.operations.Concat', 'P.Concat', (['(2)'], {}), '(2)\n', (4653, 4656), True, 'from mindspore.ops import operations as P\n'), ((4682, 4693), 'mindspore.ops.operations.Concat', 'P.Concat', (['(2)'], {}), '(2)\n', (4690, 4693), True, 'from mindspore.ops import operations as P\n'), ((4715, 4723), 'mindspore.ops.operations.Tile', 'P.Tile', ([], {}), '()\n', (4721, 4723), True, 'from mindspore.ops import operations as P\n'), ((4749, 4762), 'mindspore.ops.operations.Transpose', 'P.Transpose', ([], {}), '()\n', (4760, 4762), True, 'from mindspore.ops import operations as P\n'), ((4791, 4807), 'mindspore.ops.operations.TupleToArray', 'P.TupleToArray', ([], {}), '()\n', (4805, 4807), True, 'from mindspore.ops import operations as P\n'), ((4837, 4854), 'mindspore.ops.operations.ScalarToArray', 'P.ScalarToArray', ([], {}), '()\n', (4852, 4854), True, 'from mindspore.ops import operations as P\n'), ((4875, 4883), 'mindspore.ops.operations.Cast', 'P.Cast', ([], {}), '()\n', (4881, 4883), True, 'from mindspore.ops import operations as P\n'), ((4903, 4910), 'mindspore.ops.operations.Exp', 'P.Exp', ([], {}), '()\n', (4908, 4910), True, 'from mindspore.ops import operations as P\n'), ((4930, 4943), 'mindspore.ops.operations.ReduceSum', 'P.ReduceSum', ([], {}), '()\n', (4941, 4943), True, 'from mindspore.ops import operations as P\n'), ((4963, 4970), 'mindspore.ops.operations.Log', 'P.Log', ([], {}), '()\n', (4968, 4970), True, 'from mindspore.ops import operations as P\n'), ((4996, 5009), 'mindspore.ops.operations.TensorAdd', 'P.TensorAdd', ([], {}), '()\n', (5007, 5009), True, 'from mindspore.ops import operations as P\n'), ((5033, 5044), 'mindspore.ops.operations.RealDiv', 'P.RealDiv', ([], {}), '()\n', (5042, 5044), True, 'from mindspore.ops import operations as P\n'), ((5064, 5071), 'mindspore.ops.operations.Div', 'P.Div', ([], {}), '()\n', (5069, 5071), True, 'from mindspore.ops import operations as P\n'), ((5100, 5116), 'mindspore.ops.operations.SmoothL1Loss', 'P.SmoothL1Loss', ([], {}), '()\n', (5114, 5116), True, 'from mindspore.ops import operations as P\n'), ((5136, 5143), 'mindspore.ops.operations.Sub', 'P.Sub', ([], {}), '()\n', (5141, 5143), True, 'from mindspore.ops import operations as P\n'), ((5167, 5178), 'mindspore.ops.operations.Greater', 'P.Greater', ([], {}), '()\n', (5176, 5178), True, 'from mindspore.ops import operations as P\n'), ((5207, 5223), 'mindspore.ops.operations.GreaterEqual', 'P.GreaterEqual', ([], {}), '()\n', (5221, 5223), True, 'from mindspore.ops import operations as P\n'), ((5247, 5258), 'mindspore.ops.operations.Minimum', 'P.Minimum', ([], {}), '()\n', (5256, 5258), True, 'from mindspore.ops import operations as P\n'), ((5282, 5293), 'mindspore.ops.operations.Maximum', 'P.Maximum', ([], {}), '()\n', (5291, 5293), True, 'from mindspore.ops import operations as P\n'), ((5314, 5322), 'mindspore.ops.operations.Less', 'P.Less', ([], {}), '()\n', (5320, 5322), True, 'from mindspore.ops import operations as P\n'), ((5347, 5359), 'mindspore.ops.operations.OnesLike', 'P.OnesLike', ([], {}), '()\n', (5357, 5359), True, 'from mindspore.ops import operations as P\n'), ((5380, 5388), 'mindspore.ops.operations.Fill', 'P.Fill', ([], {}), '()\n', (5386, 5388), True, 'from mindspore.ops import operations as P\n'), ((5410, 5419), 'mindspore.ops.operations.Equal', 'P.Equal', ([], {}), '()\n', (5417, 5419), True, 'from mindspore.ops import operations as P\n'), ((5439, 5472), 'mindspore.ops.operations.SigmoidCrossEntropyWithLogits', 'P.SigmoidCrossEntropyWithLogits', ([], {}), '()\n', (5470, 5472), True, 'from mindspore.ops import operations as P\n'), ((5491, 5524), 'mindspore.ops.operations.SoftmaxCrossEntropyWithLogits', 'P.SoftmaxCrossEntropyWithLogits', ([], {}), '()\n', (5522, 5524), True, 'from mindspore.ops import operations as P\n'), ((5546, 5555), 'mindspore.ops.operations.DType', 'P.DType', ([], {}), '()\n', (5553, 5555), True, 'from mindspore.ops import operations as P\n'), ((5617, 5658), 'mindspore.nn.OneHot', 'nn.OneHot', (['(-1)', 'self.num_classes', '(1.0)', '(0.0)'], {}), '(-1, self.num_classes, 1.0, 0.0)\n', (5626, 5658), True, 'import mindspore.nn as nn\n'), ((5683, 5695), 'mindspore.ops.operations.Squeeze', 'P.Squeeze', (['(2)'], {}), '(2)\n', (5692, 5695), True, 'from mindspore.ops import operations as P\n'), ((5718, 5728), 'mindspore.ops.operations.Argmax', 'P.Argmax', ([], {}), '()\n', (5726, 5728), True, 'from mindspore.ops import operations as P\n'), ((5761, 5781), 'mindspore.ops.operations.ArgMaxWithValue', 'P.ArgMaxWithValue', (['(1)'], {}), '(1)\n', (5778, 5781), True, 'from mindspore.ops import operations as P\n'), ((5807, 5820), 'mindspore.ops.operations.ReduceSum', 'P.ReduceSum', ([], {}), '()\n', (5818, 5820), True, 'from mindspore.ops import operations as P\n'), ((5840, 5847), 'mindspore.ops.operations.Log', 'P.Log', ([], {}), '()\n', (5845, 5847), True, 'from mindspore.ops import operations as P\n'), ((5872, 5884), 'mindspore.ops.operations.GatherNd', 'P.GatherNd', ([], {}), '()\n', (5882, 5884), True, 'from mindspore.ops import operations as P\n'), ((5904, 5911), 'mindspore.ops.operations.Abs', 'P.Abs', ([], {}), '()\n', (5909, 5911), True, 'from mindspore.ops import operations as P\n'), ((5934, 5944), 'mindspore.ops.operations.Select', 'P.Select', ([], {}), '()\n', (5942, 5944), True, 'from mindspore.ops import operations as P\n'), ((5964, 5971), 'mindspore.ops.operations.IOU', 'P.IOU', ([], {}), '()\n', (5969, 5971), True, 'from mindspore.ops import operations as P\n'), ((2910, 2945), 'numpy.array', 'np.array', (['anchors'], {'dtype': 'np.float32'}), '(anchors, dtype=np.float32)\n', (2918, 2945), True, 'import numpy as np\n'), ((3361, 3380), 'numpy.array', 'np.array', (['anchors_w'], {}), '(anchors_w)\n', (3369, 3380), True, 'import numpy as np\n'), ((3449, 3468), 'numpy.array', 'np.array', (['anchors_h'], {}), '(anchors_h)\n', (3457, 3468), True, 'import numpy as np\n')]
|
from chat.tests.common import *
from chat.models import Message
from chat.utils import datetime_to_timestamp, timestamp_to_datetime
def create_message(message_content, timestamp, username, channel, message_type):
"""
Creates a message with the given text, datetime,
username, channel and with typing set to True.
"""
return Message.objects.create(
message_content=message_content,
datetime_start=timestamp_to_datetime(timestamp),
username=username,
typing=True,
channel=channel,
message_type=message_type
)
|
[
"chat.utils.timestamp_to_datetime"
] |
[((434, 466), 'chat.utils.timestamp_to_datetime', 'timestamp_to_datetime', (['timestamp'], {}), '(timestamp)\n', (455, 466), False, 'from chat.utils import datetime_to_timestamp, timestamp_to_datetime\n')]
|
from django.db import models
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class Attribute(models.Model):
name = models.CharField(_('name'), max_length=255)
class Meta:
app_label = 'cms_articles'
verbose_name = _('attribute')
verbose_name_plural = _('attributes')
def __str__(self):
return force_text(self.name)
|
[
"django.utils.translation.ugettext_lazy",
"django.utils.encoding.force_text"
] |
[((249, 258), 'django.utils.translation.ugettext_lazy', '_', (['"""name"""'], {}), "('name')\n", (250, 258), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((351, 365), 'django.utils.translation.ugettext_lazy', '_', (['"""attribute"""'], {}), "('attribute')\n", (352, 365), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((396, 411), 'django.utils.translation.ugettext_lazy', '_', (['"""attributes"""'], {}), "('attributes')\n", (397, 411), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((451, 472), 'django.utils.encoding.force_text', 'force_text', (['self.name'], {}), '(self.name)\n', (461, 472), False, 'from django.utils.encoding import force_text, python_2_unicode_compatible\n')]
|
"""
Tests for helper functions for processing the images for OCR
"""
from typing import Any
import os
import cv2
from src.extractor.extractor import get_boxes
def test_get_boxes() -> tuple((Any, list)):
path = os.path.join(os.getcwd(), r"data\test\test2.png")
img = cv2.imread(path, 0)
get_boxes(img)
|
[
"os.getcwd",
"cv2.imread",
"src.extractor.extractor.get_boxes"
] |
[((277, 296), 'cv2.imread', 'cv2.imread', (['path', '(0)'], {}), '(path, 0)\n', (287, 296), False, 'import cv2\n'), ((301, 315), 'src.extractor.extractor.get_boxes', 'get_boxes', (['img'], {}), '(img)\n', (310, 315), False, 'from src.extractor.extractor import get_boxes\n'), ((230, 241), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (239, 241), False, 'import os\n')]
|
"""
test_hmap.py
Copyright 2012 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from nose.plugins.attrib import attr
from w3af.core.controllers.ci.moth import get_moth_http, get_moth_https
from w3af.plugins.tests.helper import PluginTest, PluginConfig
@attr('ci_fails')
class TestHmap(PluginTest):
base_url = get_moth_http()
_run_configs = {
'cfg': {
'target': base_url,
'plugins': {'infrastructure': (PluginConfig('hmap'),)}
}
}
def test_hmap_http(self):
cfg = self._run_configs['cfg']
self._scan(cfg['target'], cfg['plugins'])
infos = self.kb.get('hmap', 'server')
self.assertEqual(len(infos), 1, infos)
info = infos[0]
self.assertIn('WSGIServer/0.1', info.get_desc(), info.get_desc())
def test_hmap_https(self):
cfg = self._run_configs['cfg']
self._scan(get_moth_https(), cfg['plugins'])
infos = self.kb.get('hmap', 'server')
self.assertEqual(len(infos), 1, infos)
info = infos[0]
self.assertIn('WSGIServer/0.1', info.get_desc(), info.get_desc())
|
[
"w3af.core.controllers.ci.moth.get_moth_http",
"nose.plugins.attrib.attr",
"w3af.core.controllers.ci.moth.get_moth_https",
"w3af.plugins.tests.helper.PluginConfig"
] |
[((874, 890), 'nose.plugins.attrib.attr', 'attr', (['"""ci_fails"""'], {}), "('ci_fails')\n", (878, 890), False, 'from nose.plugins.attrib import attr\n'), ((935, 950), 'w3af.core.controllers.ci.moth.get_moth_http', 'get_moth_http', ([], {}), '()\n', (948, 950), False, 'from w3af.core.controllers.ci.moth import get_moth_http, get_moth_https\n'), ((1500, 1516), 'w3af.core.controllers.ci.moth.get_moth_https', 'get_moth_https', ([], {}), '()\n', (1514, 1516), False, 'from w3af.core.controllers.ci.moth import get_moth_http, get_moth_https\n'), ((1057, 1077), 'w3af.plugins.tests.helper.PluginConfig', 'PluginConfig', (['"""hmap"""'], {}), "('hmap')\n", (1069, 1077), False, 'from w3af.plugins.tests.helper import PluginTest, PluginConfig\n')]
|
import os
import sys
input_filename = sys.argv[1]
wav_location = sys.argv[2]
output_filename = sys.argv[3]
def replace_character( inp, index, final_char ):
b = list(inp)
b[index] = final_char
return "".join(b)
def find_last_occurence( inp, to_find ):
p = len(inp)-1-inp[::-1].find(to_find)
return p
lines = open(input_filename).read().split("\n")
lines = [line.split("|") for line in lines]
outlines = []
for line in lines:
wav_filename = os.path.basename(line[0])
final_filename = os.path.join( wav_location, replace_character( wav_filename, find_last_occurence(wav_filename, "_"), "/" ).replace(".npy","") )
outlines.append( "|".join( [ final_filename, line[1], line[2] ] ) )
f = open(output_filename, "w")
f.write("\n".join(outlines))
f.close()
|
[
"os.path.basename"
] |
[((450, 475), 'os.path.basename', 'os.path.basename', (['line[0]'], {}), '(line[0])\n', (466, 475), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
"""
Basic and Monitor-Curve Exponent Transfer Functions
===================================================
Defines the exponent transfer functions:
- :func:`colour.models.exponent_function_basic`
- :func:`colour.models.exponent_function_monitor_curve`
References
----------
- :cite: `TheAcademyofMotionPictureArtsandSciences2020` : The Academy of
Motion Picture Arts and Sciences, Science and Technology Council, & Academy
Color Encoding System (ACES) Project Subcommittee. (2020). Specification
S-2014-006 - Common LUT Format (CLF) - A Common File Format for Look-Up
Tables. Retrieved June 24, 2020, from http://j.mp/S-2014-006
"""
import numpy as np
from colour.utilities import as_float, as_float_array, suppress_warnings
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '<EMAIL>'
__status__ = 'Production'
__all__ = ['exponent_function_basic', 'exponent_function_monitor_curve']
def exponent_function_basic(x, exponent=1, style='basicFwd'):
"""
Defines the *basic* exponent transfer function.
Parameters
----------
x : numeric or array_like
Data to undergo the basic exponent conversion.
exponent : numeric or array_like, optional
Exponent value used for the conversion.
style : unicode, optional
**{'basicFwd', 'basicRev', 'basicMirrorFwd', 'basicMirrorRev',
'basicPassThruFwd', 'basicPassThruRev'}**,
Defines the behaviour for the transfer function to operate:
- *basicFwd*: *Basic Forward* exponential behaviour where the
definition applies a basic power law using the exponent. Values
less than zero are clamped.
- *basicRev*: *Basic Reverse* exponential behaviour where the
definition applies a basic power law using the exponent. Values
less than zero are clamped.
- *basicMirrorFwd*: *Basic Mirror Forward* exponential behaviour
where the definition applies a basic power law using the exponent
for values greater than or equal to zero and mirrors the function
for values less than zero (i.e. rotationally symmetric
around the origin).
- *basicMirrorRev*: *Basic Mirror Reverse* exponential behaviour
where the definition applies a basic power law using the exponent
for values greater than or equal to zero and mirrors the function
for values less than zero (i.e. rotationally symmetric around the
origin).
- *basicPassThruFwd*: *Basic Pass Forward* exponential behaviour
where the definition applies a basic power law using the exponent
for values greater than or equal to zero and passes values less
than zero unchanged.
- *basicPassThruRev*: *Basic Pass Reverse* exponential behaviour
where the definition applies a basic power law using the exponent
for values greater than or equal to zero and passes values less
than zero unchanged.
Returns
-------
numeric or ndarray
Exponentially converted data.
Raises
------
ValueError
If the *style* is not defined.
Examples
--------
>>> exponent_function_basic(0.18, 2.2) # doctest: +ELLIPSIS
0.0229932...
>>> exponent_function_basic(-0.18, 2.2)
0.0
>>> exponent_function_basic(0.18, 2.2, 'basicRev') # doctest: +ELLIPSIS
0.4586564...
>>> exponent_function_basic(-0.18, 2.2, 'basicRev')
0.0
>>> exponent_function_basic( # doctest: +ELLIPSIS
... 0.18, 2.2, 'basicMirrorFwd')
0.0229932...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... -0.18, 2.2, 'basicMirrorFwd')
-0.0229932...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... 0.18, 2.2, 'basicMirrorRev')
0.4586564...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... -0.18, 2.2, 'basicMirrorRev')
-0.4586564...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... 0.18, 2.2, 'basicPassThruFwd')
0.0229932...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... -0.18, 2.2, 'basicPassThruFwd')
-0.1799999...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... 0.18, 2.2, 'basicPassThruRev')
0.4586564...
>>> exponent_function_basic( # doctest: +ELLIPSIS
... -0.18, 2.2, 'basicPassThruRev')
-0.1799999...
"""
x = as_float_array(x)
exponent = as_float_array(exponent)
def exponent_forward(x):
"""
Returns the input raised to the exponent value.
"""
return x ** exponent
def exponent_reverse(y):
"""
Returns the input raised to the inverse exponent value.
"""
return y ** (1 / exponent)
style = style.lower()
if style == 'basicfwd':
return as_float(np.where(x >= 0, exponent_forward(x), 0))
elif style == 'basicrev':
return as_float(np.where(x >= 0, exponent_reverse(x), 0))
elif style == 'basicmirrorfwd':
return as_float(
np.where(x >= 0, exponent_forward(x), -exponent_forward(-x)))
elif style == 'basicmirrorrev':
return as_float(
np.where(x >= 0, exponent_reverse(x), -exponent_reverse(-x)))
elif style == 'basicpassthrufwd':
return as_float(np.where(x >= 0, exponent_forward(x), x))
elif style == 'basicpassthrurev':
return as_float(np.where(x >= 0, exponent_reverse(x), x))
else:
raise ValueError(
'Undefined style used: "{0}", must be one of the following: '
'"{1}".'.format(
style, ', '.join([
'basicFwd', 'basicRev', 'basicMirrorFwd', 'basicMirrorRev',
'basicPassThruFwd', 'basicPassThruRev'
])))
def exponent_function_monitor_curve(x,
exponent=1,
offset=0,
style='monCurveFwd'):
"""
Defines the *Monitor Curve* exponent transfer function.
Parameters
----------
x : numeric or array_like
Data to undergo the monitor curve exponential conversion.
exponent : numeric or array_like, optional
Exponent value used for the conversion.
offset: numeric or array_like, optional
Offset value used for the conversion.
style : unicode, optional
**{'monCurveFwd', 'monCurveRev', 'monCurveMirrorFwd',
'monCurveMirrorRev'}**,
Defines the behaviour for the transfer function to operate:
- *monCurveFwd*: *Monitor Curve Forward* exponential behaviour
where the definition applies a power law function with a linear
segment near the origin.
- *monCurveRev*: *Monitor Curve Reverse* exponential behaviour
where the definition applies a power law function with a linear
segment near the origin.
- *monCurveMirrorFwd*: *Monitor Curve Mirror Forward* exponential
behaviour where the definition applies a power law function with a
linear segment near the origin and mirrors the function for values
less than zero (i.e. rotationally symmetric around the origin).
- *monCurveMirrorRev*: *Monitor Curve Mirror Reverse* exponential
behaviour where the definition applies a power law function with a
linear segment near the origin and mirrors the function for values
less than zero (i.e. rotationally symmetric around the origin).
Returns
-------
numeric or ndarray
Exponentially converted data.
Raises
------
ValueError
If the *style* is not defined.
Examples
--------
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... 0.18, 2.2, 0.001)
0.0232240...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... -0.18, 2.2, 0.001)
-0.0002054...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... 0.18, 2.2, 0.001, 'monCurveRev')
0.4581151...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... -0.18, 2.2, 0.001, 'monCurveRev')
-157.7302795...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... 0.18, 2.2, 2, 'monCurveMirrorFwd')
0.1679399...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... -0.18, 2.2, 0.001, 'monCurveMirrorFwd')
-0.0232240...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... 0.18, 2.2, 0.001, 'monCurveMirrorRev')
0.4581151...
>>> exponent_function_monitor_curve( # doctest: +ELLIPSIS
... -0.18, 2.2, 0.001, 'monCurveMirrorRev')
-0.4581151...
"""
x = as_float_array(x)
exponent = as_float_array(exponent)
offset = as_float_array(offset)
with suppress_warnings(python_warnings=True):
s = as_float_array(((exponent - 1) / offset) * ((exponent * offset) / (
(exponent - 1) * (offset + 1))) ** exponent)
s[np.isnan(s)] = 1
def monitor_curve_forward(x):
"""
Defines the *Monitor Curve Forward* function.
"""
x_break = offset / (exponent - 1)
return np.where(
x >= x_break,
((x + offset) / (1 + offset)) ** exponent,
x * s,
)
def monitor_curve_reverse(y):
"""
Defines the *Monitor Curve Reverse* function.
"""
y_break = ((exponent * offset) / (
(exponent - 1) * (1 + offset))) ** exponent
return np.where(
y >= y_break,
((1 + offset) * (y ** (1 / exponent))) - offset,
y / s,
)
style = style.lower()
if style == 'moncurvefwd':
return as_float(monitor_curve_forward(x))
elif style == 'moncurverev':
return as_float(monitor_curve_reverse(x))
elif style == 'moncurvemirrorfwd':
return as_float(
np.where(
x >= 0,
monitor_curve_forward(x),
-monitor_curve_forward(-x),
))
elif style == 'moncurvemirrorrev':
return as_float(
np.where(
x >= 0,
monitor_curve_reverse(x),
-monitor_curve_reverse(-x),
))
else:
raise ValueError(
'Undefined style used: "{0}", must be one of the following: '
'"{1}".'.format(
style, ', '.join([
'monCurveFwd', 'monCurveRev', 'monCurveMirrorFwd',
'monCurveMirrorRev'
])))
|
[
"colour.utilities.suppress_warnings",
"numpy.where",
"numpy.isnan",
"colour.utilities.as_float_array"
] |
[((4646, 4663), 'colour.utilities.as_float_array', 'as_float_array', (['x'], {}), '(x)\n', (4660, 4663), False, 'from colour.utilities import as_float, as_float_array, suppress_warnings\n'), ((4679, 4703), 'colour.utilities.as_float_array', 'as_float_array', (['exponent'], {}), '(exponent)\n', (4693, 4703), False, 'from colour.utilities import as_float, as_float_array, suppress_warnings\n'), ((8992, 9009), 'colour.utilities.as_float_array', 'as_float_array', (['x'], {}), '(x)\n', (9006, 9009), False, 'from colour.utilities import as_float, as_float_array, suppress_warnings\n'), ((9025, 9049), 'colour.utilities.as_float_array', 'as_float_array', (['exponent'], {}), '(exponent)\n', (9039, 9049), False, 'from colour.utilities import as_float, as_float_array, suppress_warnings\n'), ((9063, 9085), 'colour.utilities.as_float_array', 'as_float_array', (['offset'], {}), '(offset)\n', (9077, 9085), False, 'from colour.utilities import as_float, as_float_array, suppress_warnings\n'), ((9096, 9135), 'colour.utilities.suppress_warnings', 'suppress_warnings', ([], {'python_warnings': '(True)'}), '(python_warnings=True)\n', (9113, 9135), False, 'from colour.utilities import as_float, as_float_array, suppress_warnings\n'), ((9149, 9261), 'colour.utilities.as_float_array', 'as_float_array', (['((exponent - 1) / offset * (exponent * offset / ((exponent - 1) * (offset +\n 1))) ** exponent)'], {}), '((exponent - 1) / offset * (exponent * offset / ((exponent - \n 1) * (offset + 1))) ** exponent)\n', (9163, 9261), False, 'from colour.utilities import as_float, as_float_array, suppress_warnings\n'), ((9474, 9546), 'numpy.where', 'np.where', (['(x >= x_break)', '(((x + offset) / (1 + offset)) ** exponent)', '(x * s)'], {}), '(x >= x_break, ((x + offset) / (1 + offset)) ** exponent, x * s)\n', (9482, 9546), True, 'import numpy as np\n'), ((9823, 9897), 'numpy.where', 'np.where', (['(y >= y_break)', '((1 + offset) * y ** (1 / exponent) - offset)', '(y / s)'], {}), '(y >= y_break, (1 + offset) * y ** (1 / exponent) - offset, y / s)\n', (9831, 9897), True, 'import numpy as np\n'), ((9285, 9296), 'numpy.isnan', 'np.isnan', (['s'], {}), '(s)\n', (9293, 9296), True, 'import numpy as np\n')]
|
from django.db import models
class Item(models.Model):
name = models.CharField(max_length=100, blank=True)
parent = models.ForeignKey('Item', on_delete=models.SET_NULL, null=True, related_name='children')
item = models.ForeignKey('Item', on_delete=models.SET_NULL, null=True)
item_type = 'simple'
@property
def title(self):
return self.name
@property
def unoptimized_title(self):
return self.title
def all_children(self):
return self.children.all()
class DetailedItem(Item):
detail = models.TextField(null=True)
item_type = models.CharField(max_length=100, null=True)
class RelatedItem(Item):
related_items = models.ManyToManyField(Item)
class ExtraDetailedItem(DetailedItem):
extra_detail = models.TextField()
class UnrelatedModel(models.Model):
detail = models.TextField(null=True)
class SomeOtherItem(models.Model):
name = models.CharField(max_length=100, blank=True)
class OtherItem(models.Model):
name = models.CharField(max_length=100, blank=True)
some_other_item = models.ForeignKey('SomeOtherItem', on_delete=models.PROTECT, null=False)
|
[
"django.db.models.CharField",
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.models.ForeignKey"
] |
[((68, 112), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)'}), '(max_length=100, blank=True)\n', (84, 112), False, 'from django.db import models\n'), ((126, 218), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Item"""'], {'on_delete': 'models.SET_NULL', 'null': '(True)', 'related_name': '"""children"""'}), "('Item', on_delete=models.SET_NULL, null=True,\n related_name='children')\n", (143, 218), False, 'from django.db import models\n'), ((226, 289), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""Item"""'], {'on_delete': 'models.SET_NULL', 'null': '(True)'}), "('Item', on_delete=models.SET_NULL, null=True)\n", (243, 289), False, 'from django.db import models\n'), ((556, 583), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (572, 583), False, 'from django.db import models\n'), ((600, 643), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'null': '(True)'}), '(max_length=100, null=True)\n', (616, 643), False, 'from django.db import models\n'), ((691, 719), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['Item'], {}), '(Item)\n', (713, 719), False, 'from django.db import models\n'), ((780, 798), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (796, 798), False, 'from django.db import models\n'), ((850, 877), 'django.db.models.TextField', 'models.TextField', ([], {'null': '(True)'}), '(null=True)\n', (866, 877), False, 'from django.db import models\n'), ((926, 970), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)'}), '(max_length=100, blank=True)\n', (942, 970), False, 'from django.db import models\n'), ((1015, 1059), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(100)', 'blank': '(True)'}), '(max_length=100, blank=True)\n', (1031, 1059), False, 'from django.db import models\n'), ((1082, 1154), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""SomeOtherItem"""'], {'on_delete': 'models.PROTECT', 'null': '(False)'}), "('SomeOtherItem', on_delete=models.PROTECT, null=False)\n", (1099, 1154), False, 'from django.db import models\n')]
|
# Generated by Django 2.2.5 on 2019-10-12 13:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0010_order_name'),
]
operations = [
migrations.AddField(
model_name='order',
name='status',
field=models.CharField(blank=True, max_length=20, null=True, verbose_name='accept/reject'),
),
]
|
[
"django.db.models.CharField"
] |
[((322, 411), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(20)', 'null': '(True)', 'verbose_name': '"""accept/reject"""'}), "(blank=True, max_length=20, null=True, verbose_name=\n 'accept/reject')\n", (338, 411), False, 'from django.db import migrations, models\n')]
|
from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE
from BUILD_Graph import build_graph
from flask import Flask, render_template, request
from flask_bootstrap import Bootstrap
file = "./CSV/1_Bundesliga_2018_2019.csv"
app = Flask(__name__)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 1
Bootstrap(app)
# landing Page
@app.route('/', methods=['GET'])
def index():
dropdown_list = GET_ALL_CLUBS(file)
name = request.args.get("name")
age = request.args.get("age")
# input formular with checkbox
if request.args.get('checkbox') == "on":
return render_template('index.html',
site1="Index",
name=name,
age=age,
check=True,
dropdown_list=dropdown_list
)
return render_template('index.html',
site1="Index",
name=name,
age=age,
dropdown_list=dropdown_list
)
# club information
@app.route('/club', methods=['GET', 'POST'])
def club():
club_name = request.args.get("club")
dropdown_list = GET_ALL_CLUBS(file)
# input dropdown (add value to the url)
if request.method == "GET":
if club_name != None:
return render_template('club.html',
site1="Index",
club_name=club_name,
dropdown_list=dropdown_list,
)
# input buttons
if request.method == "POST":
# match plan
if 'Button_1' in request.form:
season = GET_SEASON(club_name, file)
return render_template('club.html',
site1="Index",
club_name=club_name,
dropdown_list=dropdown_list,
season=season
)
# points
if 'Button_2' in request.form:
y = GET_POINTS(club_name, file)[3]
x = list(range(1, (len(y)+1) ))
values = (x, y)
graph_table = build_graph(values, "plot")
return render_template('club.html',
site1="Index",
club_name=club_name,
dropdown_list=dropdown_list,
graph_table=graph_table
)
# analyse ATT / DEF
if 'Button_3' in request.form:
anaylse = GET_ATT_DEF_ANALYSE(club_name, file)
y1_1 = GET_ATT_DEF_ANALYSE(club_name, file)[0]
y1_2 = GET_ATT_DEF_ANALYSE(club_name, file)[1]
x1 = GET_ATT_DEF_ANALYSE(club_name, file)[4]
y2_1 = GET_ATT_DEF_ANALYSE(club_name, file)[2]
y2_2 = GET_ATT_DEF_ANALYSE(club_name, file)[3]
x2 = GET_ATT_DEF_ANALYSE(club_name, file)[4]
values_1 = (x1, y1_1, y1_2)
values_2 = (x2, y2_1, y2_2)
graph_goals = build_graph(values_1, "plot")
graph_hits = build_graph(values_2, "plot")
return render_template('club.html',
site1="Index",
club_name=club_name,
dropdown_list=dropdown_list,
att_value=round(anaylse[5],3),
def_value=round(anaylse[6],3),
graph_goals=graph_goals,
graph_hits=graph_hits
)
# win / lost
if 'Button_4' in request.form:
anaylse = GET_ATT_DEF_ANALYSE(club_name, file)
y1_1 = GET_ATT_DEF_ANALYSE(club_name, file)[1]
y1_2 = GET_ATT_DEF_ANALYSE(club_name, file)[3]
x1 = GET_ATT_DEF_ANALYSE(club_name, file)[4]
y2_1 = GET_ATT_DEF_ANALYSE(club_name, file)[0]
y2_2 = GET_ATT_DEF_ANALYSE(club_name, file)[2]
x2 = GET_ATT_DEF_ANALYSE(club_name, file)[4]
values_1 = (x1, y1_1, y1_2)
values_2 = (x2, y2_1, y2_2)
graph_real = build_graph(values_1, "plot")
graph_pred = build_graph(values_2, "plot")
return render_template('club.html',
site1="Index",
club_name=club_name,
dropdown_list=dropdown_list,
graph_real=graph_real,
graph_pred=graph_pred
)
# poisson
if 'Button_5' in request.form:
poisson = CALC_SEASON_POISSON(club_name, file)
return render_template('club.html',
site1="Index",
club_name=club_name,
dropdown_list=dropdown_list,
poisson=poisson
)
# update diagram
@app.after_request
def add_header(response):
# response.cache_control.no_store = True
if 'Cache-Control' not in response.headers:
response.headers['Cache-Control'] = 'no-store'
return response
if __name__ == '__main__':
app.debug = True
app.run()
|
[
"GET_Calc.GET_SEASON",
"BUILD_Graph.build_graph",
"flask.request.args.get",
"GET_Calc.GET_POINTS",
"flask.Flask",
"GET_Calc.GET_ATT_DEF_ANALYSE",
"GET_Calc.CALC_SEASON_POISSON",
"GET_Calc.GET_ALL_CLUBS",
"flask.render_template",
"flask_bootstrap.Bootstrap"
] |
[((294, 309), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (299, 309), False, 'from flask import Flask, render_template, request\n'), ((354, 368), 'flask_bootstrap.Bootstrap', 'Bootstrap', (['app'], {}), '(app)\n', (363, 368), False, 'from flask_bootstrap import Bootstrap\n'), ((453, 472), 'GET_Calc.GET_ALL_CLUBS', 'GET_ALL_CLUBS', (['file'], {}), '(file)\n', (466, 472), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((485, 509), 'flask.request.args.get', 'request.args.get', (['"""name"""'], {}), "('name')\n", (501, 509), False, 'from flask import Flask, render_template, request\n'), ((521, 544), 'flask.request.args.get', 'request.args.get', (['"""age"""'], {}), "('age')\n", (537, 544), False, 'from flask import Flask, render_template, request\n'), ((957, 1054), 'flask.render_template', 'render_template', (['"""index.html"""'], {'site1': '"""Index"""', 'name': 'name', 'age': 'age', 'dropdown_list': 'dropdown_list'}), "('index.html', site1='Index', name=name, age=age,\n dropdown_list=dropdown_list)\n", (972, 1054), False, 'from flask import Flask, render_template, request\n'), ((1327, 1351), 'flask.request.args.get', 'request.args.get', (['"""club"""'], {}), "('club')\n", (1343, 1351), False, 'from flask import Flask, render_template, request\n'), ((1376, 1395), 'GET_Calc.GET_ALL_CLUBS', 'GET_ALL_CLUBS', (['file'], {}), '(file)\n', (1389, 1395), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((588, 616), 'flask.request.args.get', 'request.args.get', (['"""checkbox"""'], {}), "('checkbox')\n", (604, 616), False, 'from flask import Flask, render_template, request\n'), ((644, 753), 'flask.render_template', 'render_template', (['"""index.html"""'], {'site1': '"""Index"""', 'name': 'name', 'age': 'age', 'check': '(True)', 'dropdown_list': 'dropdown_list'}), "('index.html', site1='Index', name=name, age=age, check=True,\n dropdown_list=dropdown_list)\n", (659, 753), False, 'from flask import Flask, render_template, request\n'), ((1537, 1634), 'flask.render_template', 'render_template', (['"""club.html"""'], {'site1': '"""Index"""', 'club_name': 'club_name', 'dropdown_list': 'dropdown_list'}), "('club.html', site1='Index', club_name=club_name,\n dropdown_list=dropdown_list)\n", (1552, 1634), False, 'from flask import Flask, render_template, request\n'), ((1919, 1946), 'GET_Calc.GET_SEASON', 'GET_SEASON', (['club_name', 'file'], {}), '(club_name, file)\n', (1929, 1946), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((1968, 2080), 'flask.render_template', 'render_template', (['"""club.html"""'], {'site1': '"""Index"""', 'club_name': 'club_name', 'dropdown_list': 'dropdown_list', 'season': 'season'}), "('club.html', site1='Index', club_name=club_name,\n dropdown_list=dropdown_list, season=season)\n", (1983, 2080), False, 'from flask import Flask, render_template, request\n'), ((2486, 2513), 'BUILD_Graph.build_graph', 'build_graph', (['values', '"""plot"""'], {}), "(values, 'plot')\n", (2497, 2513), False, 'from BUILD_Graph import build_graph\n'), ((2536, 2658), 'flask.render_template', 'render_template', (['"""club.html"""'], {'site1': '"""Index"""', 'club_name': 'club_name', 'dropdown_list': 'dropdown_list', 'graph_table': 'graph_table'}), "('club.html', site1='Index', club_name=club_name,\n dropdown_list=dropdown_list, graph_table=graph_table)\n", (2551, 2658), False, 'from flask import Flask, render_template, request\n'), ((2956, 2992), 'GET_Calc.GET_ATT_DEF_ANALYSE', 'GET_ATT_DEF_ANALYSE', (['club_name', 'file'], {}), '(club_name, file)\n', (2975, 2992), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((3468, 3497), 'BUILD_Graph.build_graph', 'build_graph', (['values_1', '"""plot"""'], {}), "(values_1, 'plot')\n", (3479, 3497), False, 'from BUILD_Graph import build_graph\n'), ((3523, 3552), 'BUILD_Graph.build_graph', 'build_graph', (['values_2', '"""plot"""'], {}), "(values_2, 'plot')\n", (3534, 3552), False, 'from BUILD_Graph import build_graph\n'), ((4210, 4246), 'GET_Calc.GET_ATT_DEF_ANALYSE', 'GET_ATT_DEF_ANALYSE', (['club_name', 'file'], {}), '(club_name, file)\n', (4229, 4246), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((4726, 4755), 'BUILD_Graph.build_graph', 'build_graph', (['values_1', '"""plot"""'], {}), "(values_1, 'plot')\n", (4737, 4755), False, 'from BUILD_Graph import build_graph\n'), ((4781, 4810), 'BUILD_Graph.build_graph', 'build_graph', (['values_2', '"""plot"""'], {}), "(values_2, 'plot')\n", (4792, 4810), False, 'from BUILD_Graph import build_graph\n'), ((4831, 4974), 'flask.render_template', 'render_template', (['"""club.html"""'], {'site1': '"""Index"""', 'club_name': 'club_name', 'dropdown_list': 'dropdown_list', 'graph_real': 'graph_real', 'graph_pred': 'graph_pred'}), "('club.html', site1='Index', club_name=club_name,\n dropdown_list=dropdown_list, graph_real=graph_real, graph_pred=graph_pred)\n", (4846, 4974), False, 'from flask import Flask, render_template, request\n'), ((5328, 5364), 'GET_Calc.CALC_SEASON_POISSON', 'CALC_SEASON_POISSON', (['club_name', 'file'], {}), '(club_name, file)\n', (5347, 5364), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((5386, 5500), 'flask.render_template', 'render_template', (['"""club.html"""'], {'site1': '"""Index"""', 'club_name': 'club_name', 'dropdown_list': 'dropdown_list', 'poisson': 'poisson'}), "('club.html', site1='Index', club_name=club_name,\n dropdown_list=dropdown_list, poisson=poisson)\n", (5401, 5500), False, 'from flask import Flask, render_template, request\n'), ((2334, 2361), 'GET_Calc.GET_POINTS', 'GET_POINTS', (['club_name', 'file'], {}), '(club_name, file)\n', (2344, 2361), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((3013, 3049), 'GET_Calc.GET_ATT_DEF_ANALYSE', 'GET_ATT_DEF_ANALYSE', (['club_name', 'file'], {}), '(club_name, file)\n', (3032, 3049), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((3072, 3108), 'GET_Calc.GET_ATT_DEF_ANALYSE', 'GET_ATT_DEF_ANALYSE', (['club_name', 'file'], {}), '(club_name, file)\n', (3091, 3108), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((3131, 3167), 'GET_Calc.GET_ATT_DEF_ANALYSE', 'GET_ATT_DEF_ANALYSE', (['club_name', 'file'], {}), '(club_name, file)\n', (3150, 3167), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((3191, 3227), 'GET_Calc.GET_ATT_DEF_ANALYSE', 'GET_ATT_DEF_ANALYSE', (['club_name', 'file'], {}), '(club_name, file)\n', (3210, 3227), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((3250, 3286), 'GET_Calc.GET_ATT_DEF_ANALYSE', 'GET_ATT_DEF_ANALYSE', (['club_name', 'file'], {}), '(club_name, file)\n', (3269, 3286), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((3309, 3345), 'GET_Calc.GET_ATT_DEF_ANALYSE', 'GET_ATT_DEF_ANALYSE', (['club_name', 'file'], {}), '(club_name, file)\n', (3328, 3345), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((4267, 4303), 'GET_Calc.GET_ATT_DEF_ANALYSE', 'GET_ATT_DEF_ANALYSE', (['club_name', 'file'], {}), '(club_name, file)\n', (4286, 4303), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((4327, 4363), 'GET_Calc.GET_ATT_DEF_ANALYSE', 'GET_ATT_DEF_ANALYSE', (['club_name', 'file'], {}), '(club_name, file)\n', (4346, 4363), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((4388, 4424), 'GET_Calc.GET_ATT_DEF_ANALYSE', 'GET_ATT_DEF_ANALYSE', (['club_name', 'file'], {}), '(club_name, file)\n', (4407, 4424), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((4448, 4484), 'GET_Calc.GET_ATT_DEF_ANALYSE', 'GET_ATT_DEF_ANALYSE', (['club_name', 'file'], {}), '(club_name, file)\n', (4467, 4484), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((4508, 4544), 'GET_Calc.GET_ATT_DEF_ANALYSE', 'GET_ATT_DEF_ANALYSE', (['club_name', 'file'], {}), '(club_name, file)\n', (4527, 4544), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n'), ((4568, 4604), 'GET_Calc.GET_ATT_DEF_ANALYSE', 'GET_ATT_DEF_ANALYSE', (['club_name', 'file'], {}), '(club_name, file)\n', (4587, 4604), False, 'from GET_Calc import GET_ALL_GOALS, GET_POINTS, GET_SEASON, CALC_SEASON_POISSON, GET_ALL_CLUBS, GET_ATT_DEF_ANALYSE\n')]
|
"""
Copyright 2017-2019 Government of Canada - Public Services and Procurement Canada - buyandsell.gc.ca
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import logging
from time import time
from typing import Callable, Sequence, Union
from indy import anoncreds, crypto, did, non_secrets, wallet
from indy.error import IndyError, ErrorCode
from von_anchor.canon import canon_non_secret_wql, canon_pairwise_wql
from von_anchor.error import (
AbsentRecord,
AbsentMessage,
AbsentWallet,
BadAccess,
BadKey,
BadIdentifier,
BadRecord,
ExtantRecord,
ExtantWallet,
WalletState)
from von_anchor.util import ok_did
from von_anchor.wallet import DIDInfo, KeyInfo, storage_record2pairwise_info, PairwiseInfo, pairwise_info2tags
from von_anchor.wallet.record import StorageRecord, TYPE_PAIRWISE, TYPE_LINK_SECRET_LABEL
LOGGER = logging.getLogger(__name__)
class Wallet:
"""
Class encapsulating indy-sdk wallet.
"""
DEFAULT_CHUNK = 256 # chunk size in searching credentials, non-secret storage records
def __init__(self, indy_config: dict, von_config: dict) -> None:
"""
Initializer for wallet that WalletManager created. Store configuration and access credentials value.
Actuators should prefer WalletManager.get() to calling this initializer directly - the wallet manager
filters wallet configuration through preset defaults.
:param indy_config: configuration for indy-sdk wallet
:param von_config: VON wallet configuration particulars:
- 'seed': (optional) seed to use on creation
- 'did': (optional) anchor DID to use on creation
- 'link_secret_label': (optional) label to use to create link secret
- 'auto_create': whether to create wallet automatically on first open
- 'auto_remove': whether to remove wallet automatically on next close
- 'access': wallet access credentials value
"""
LOGGER.debug('Wallet.__init__ >>> indy_config %s, von_config %s', indy_config, von_config)
self._handle = None
self._indy_config = {**indy_config} # make a copy
self._von_config = {**von_config}
self._did = None
self._verkey = None
LOGGER.debug('Wallet.__init__ <<<')
@property
def name(self) -> str:
"""
Accessor for wallet name, as configuration retains at key 'id'.
:return: wallet name
"""
return self.config['id']
@property
def handle(self) -> int:
"""
Accessor for indy-sdk wallet handle.
:return: indy-sdk wallet handle
"""
return self._handle
@property
def opened(self) -> bool:
"""
Accessor for indy-sdk wallet state: True for open, False for closed.
:return: indy-sdk wallet state
"""
return bool(self._handle)
@property
def config(self) -> dict:
"""
Accessor for wallet config.
:return: wallet config
"""
return self._indy_config
@property
def auto_create(self) -> bool:
"""
Accessor for auto_create wallet config setting.
:return: auto_create wallet config setting
"""
return self._von_config['auto_create']
@auto_create.setter
def auto_create(self, value: bool) -> None:
"""
Set auto_create wallet config behaviour.
:param value: auto_create
"""
self._von_config['auto_create'] = value
@property
def auto_remove(self) -> bool:
"""
Accessor for auto_remove wallet config setting.
:return: auto_remove wallet config setting
"""
return self._von_config['auto_remove']
@auto_remove.setter
def auto_remove(self, value: bool) -> None:
"""
Set auto_remove wallet config behaviour.
:param value: auto_remove
"""
self._von_config['auto_remove'] = value
@property
def access_creds(self) -> dict:
"""
Accessor for wallet access credentials.
:return: wallet access credentials
"""
return {'key': self._von_config['access']}
@property
def access(self) -> str:
"""
Accessor for wallet access credentials value.
:return: wallet access credentials value
"""
return self._von_config['access']
@property
def storage_type(self) -> str:
"""
Accessor for wallet type, as configuration retains at key 'storage_type'.
:return: wallet type
"""
return self.config['storage_type']
@property
def did(self) -> str:
"""
Accessor for anchor DID in wallet.
:return: anchor DID in wallet
"""
return self._did
@did.setter
def did(self, value: str) -> None:
"""
Set anchor DID in wallet.
:param value: anchor DID
"""
self._did = value
@property
def verkey(self) -> str:
"""
Accessor for wallet verification key.
:return: wallet verification key
"""
return self._verkey
@verkey.setter
def verkey(self, value: str) -> None:
"""
Set verification key.
:param value: verification key
"""
self._verkey = value
async def create_signing_key(self, seed: str = None, metadata: dict = None) -> KeyInfo:
"""
Create a new signing key pair.
Raise WalletState if wallet is closed, ExtantRecord if verification key already exists.
:param seed: optional seed allowing deterministic key creation
:param metadata: optional metadata to store with key pair
:return: KeyInfo for new key pair
"""
LOGGER.debug('Wallet.create_signing_key >>> seed: [SEED], metadata: %s', metadata)
if not self.handle:
LOGGER.debug('Wallet.create_signing_key <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
try:
verkey = await crypto.create_key(self.handle, json.dumps({'seed': seed} if seed else {}))
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.WalletItemAlreadyExists:
LOGGER.debug('Wallet.create_signing_key <!< Verification key already present in wallet %s', self.name)
raise ExtantRecord('Verification key already present in wallet {}'.format(self.name))
LOGGER.debug('Wallet.create_signing_key <!< indy-sdk raised error %s', x_indy.error_code)
raise
await crypto.set_key_metadata(self.handle, verkey, json.dumps(metadata or {})) # coerce None to empty
rv = KeyInfo(verkey, metadata or {})
LOGGER.debug('Wallet.create_signing_key <<< %s', rv)
return rv
async def get_signing_key(self, verkey: str) -> KeyInfo:
"""
Get signing key pair for input verification key.
Raise WalletState if wallet is closed, AbsentRecord for no such key pair.
:param verkey: verification key of key pair
:return: KeyInfo for key pair
"""
LOGGER.debug('Wallet.get_signing_key >>> seed: [SEED], verkey: %s', verkey)
if not self.handle:
LOGGER.debug('Wallet.get_signing_key <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
try:
metadata = await crypto.get_key_metadata(self.handle, verkey)
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.WalletItemNotFound:
LOGGER.debug('Wallet.get_signing_key <!< Verification key %s not in wallet %s', verkey, self.name)
raise AbsentRecord('Verification key not in wallet {}'.format(self.name))
LOGGER.debug('Wallet.get_signing_key <!< indy-sdk raised error %s', x_indy.error_code)
raise
rv = KeyInfo(verkey, json.loads(metadata) if metadata else {})
LOGGER.debug('Wallet.get_signing_key <<< %s', rv)
return rv
async def replace_signing_key_metadata(self, verkey: str, metadata: dict) -> KeyInfo:
"""
Replace the metadata associated with a signing key pair.
Raise WalletState if wallet is closed, AbsentRecord for no such key pair.
:param verkey: verification key of key pair
:param metadata: new metadata to store
:return: resulting KeyInfo for key pair
"""
LOGGER.debug('Wallet.replace_signing_key_metadata >>> verkey: %s, metadata: %s', verkey, metadata)
if not self.handle:
LOGGER.debug('Wallet.replace_signing_key_metadata <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
try:
await crypto.get_key_metadata(self.handle, verkey)
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.WalletItemNotFound:
LOGGER.debug(
'Wallet.replace_signing_key_metadata <!< Verification key %s not in wallet %s',
verkey,
self.name)
raise AbsentRecord('Verification key not in wallet {}'.format(self.name))
LOGGER.debug('Wallet.replace_signing_key_metadata <!< indy-sdk raised error %s', x_indy.error_code)
raise
await crypto.set_key_metadata(self.handle, verkey, json.dumps(metadata or {}))
rv = await self.get_signing_key(verkey)
LOGGER.debug('Wallet.replace_signing_key_metadata <<< %s', rv)
return rv
async def create_local_did(self, seed: str = None, loc_did: str = None, metadata: dict = None) -> DIDInfo:
"""
Create and store a new local DID for use in pairwise DID relations.
:param seed: seed from which to create (default random)
:param loc_did: local DID value (default None to let indy-sdk generate)
:param metadata: metadata to associate with the local DID
(operation always sets 'since', 'modified' epoch timestamps)
:return: DIDInfo for new local DID
"""
LOGGER.debug('Wallet.create_local_did >>> seed: [SEED] loc_did: %s metadata: %s', loc_did, metadata)
cfg = {}
if seed:
cfg['seed'] = seed
if loc_did:
cfg['did'] = loc_did
if not self.handle:
LOGGER.debug('Wallet.create_local_did <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
try:
(created_did, verkey) = await did.create_and_store_my_did(self.handle, json.dumps(cfg))
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.DidAlreadyExistsError:
LOGGER.debug('Wallet.create_local_did <!< DID %s already present in wallet %s', loc_did, self.name)
raise ExtantRecord('Local DID {} already present in wallet {}'.format(loc_did, self.name))
LOGGER.debug('Wallet.create_local_did <!< indy-sdk raised error %s', x_indy.error_code)
raise
now = int(time())
loc_did_metadata = {**(metadata or {}), 'since': now, 'modified': now}
await did.set_did_metadata(self.handle, created_did, json.dumps(loc_did_metadata))
rv = DIDInfo(created_did, verkey, loc_did_metadata)
LOGGER.debug('Wallet.create_local_did <<< %s', rv)
return rv
async def replace_local_did_metadata(self, loc_did: str, metadata: dict) -> DIDInfo:
"""
Replace the metadata associated with a local DID.
Raise WalletState if wallet is closed, AbsentRecord for no such local DID.
:param loc_did: local DID of interest
:param metadata: new metadata to store
:return: DIDInfo for local DID after write
"""
LOGGER.debug('Wallet.replace_local_did_metadata >>> loc_did: %s, metadata: %s', loc_did, metadata)
old = await self.get_local_did(loc_did) # raises exceptions if applicable
now = int(time())
loc_did_metadata = {**(metadata or {}), 'since': (old.metadata or {}).get('since', now), 'modified': now}
try:
await did.set_did_metadata(self.handle, loc_did, json.dumps(loc_did_metadata))
except IndyError as x_indy:
LOGGER.debug('Wallet.replace_local_did_metadata <!< indy-sdk raised error %s', x_indy.error_code)
raise
rv = await self.get_local_did(loc_did)
LOGGER.debug('Wallet.replace_local_did_metadata <<< %s', rv)
return rv
async def get_local_dids(self) -> Sequence[DIDInfo]:
"""
Get list of DIDInfos for local DIDs.
:return: list of local DIDInfos
"""
LOGGER.debug('Wallet.get_local_dids >>>')
dids_with_meta = json.loads(did.list_my_dids_with_meta(self.handle)) # list
rv = []
for did_with_meta in dids_with_meta:
meta = json.loads(did_with_meta['metadata']) if did_with_meta['metadata'] else {}
if meta.get('anchor', False):
continue # exclude anchor DIDs past and present
rv.append(DIDInfo(did_with_meta['did'], did_with_meta['verkey'], meta))
LOGGER.debug('Wallet.get_local_dids <<< %s', rv)
return rv
async def get_local_did(self, loc: str) -> DIDInfo:
"""
Get local DID info by local DID or verification key.
Raise AbsentRecord for no such local DID.
:param loc: DID or verification key of interest
:return: DIDInfo for local DID
"""
LOGGER.debug('Wallet.get_local_did >>> loc: %s', loc)
if not self.handle:
LOGGER.debug('Wallet.get_local_did <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
if ok_did(loc): # it's a DID
try:
did_with_meta = json.loads(await did.get_my_did_with_meta(self.handle, loc))
rv = DIDInfo(
did_with_meta['did'],
did_with_meta['verkey'],
json.loads(did_with_meta['metadata']) if did_with_meta['metadata'] else {}) # nudge None to empty
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.WalletItemNotFound:
LOGGER.debug('Wallet.get_local_did <!< DID %s not present in wallet %s', loc, self.name)
raise AbsentRecord('Local DID {} not present in wallet {}'.format(loc, self.name))
LOGGER.debug('Wallet.get_local_did <!< indy-sdk raised error %s', x_indy.error_code)
raise
else: # it's a verkey
dids_with_meta = json.loads(await did.list_my_dids_with_meta(self.handle)) # list
for did_with_meta in dids_with_meta:
if did_with_meta['verkey'] == loc:
rv = DIDInfo(
did_with_meta['did'],
did_with_meta['verkey'],
json.loads(did_with_meta['metadata']) if did_with_meta['metadata'] else {})
break
else:
LOGGER.debug('Wallet.get_local_did <!< Wallet %s has no local DID for verkey %s', self.name, loc)
raise AbsentRecord('Wallet {} has no local DID for verkey {}'.format(self.name, loc))
LOGGER.debug('Wallet.get_local_did <<< %s', rv)
return rv
async def get_anchor_did(self) -> str:
"""
Get current anchor DID by metadata, None for not yet set.
:return: DID
"""
LOGGER.debug('Wallet.get_anchor_did >>>')
if not self.handle:
LOGGER.debug('Wallet.get_anchor_did <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rv = None
dids_with_meta = json.loads(await did.list_my_dids_with_meta(self.handle)) # list
latest = 0
for did_with_meta in dids_with_meta:
try:
meta = json.loads(did_with_meta['metadata']) if did_with_meta['metadata'] else {}
if not meta.get('anchor', False):
continue
if isinstance(meta, dict) and meta.get('since', -1) > latest:
rv = did_with_meta.get('did')
except json.decoder.JSONDecodeError:
continue # it's not an anchor DID, carry on
LOGGER.debug('Wallet.get_anchor_did <<< %s', rv)
return rv
async def create_link_secret(self, label: str) -> None:
"""
Create link secret (a.k.a. master secret) used in proofs by HolderProver, if the
current link secret does not already correspond to the input link secret label.
Raise WalletState if wallet is closed, or any other IndyError causing failure
to set link secret in wallet.
:param label: label for link secret; indy-sdk uses label to generate link secret
"""
LOGGER.debug('Wallet.create_link_secret >>> label: %s', label)
if not self.handle:
LOGGER.debug('Wallet.create_link_secret <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
try:
await anoncreds.prover_create_master_secret(self.handle, label)
await self._write_link_secret_label(label)
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.AnoncredsMasterSecretDuplicateNameError:
LOGGER.warning(
'Wallet %s link secret already current: abstaining from updating label record', self.name)
await self._write_link_secret_label(label)
else:
LOGGER.debug(
'Wallet.create_link_secret <!< cannot create link secret for wallet %s, indy error code %s',
self.name,
x_indy.error_code)
raise
LOGGER.debug('Wallet.create_link_secret <<<')
async def _write_link_secret_label(self, label) -> None:
"""
Update non-secret storage record with link secret label.
:param label: link secret label
"""
LOGGER.debug('Wallet._write_link_secret_label <<< %s', label)
if await self.get_link_secret_label() == label:
LOGGER.info('Wallet._write_link_secret_label abstaining - already current')
else:
await self.write_non_secret(StorageRecord(
TYPE_LINK_SECRET_LABEL,
label,
tags=None,
ident=str(int(time())))) # indy requires str
LOGGER.debug('Wallet._write_link_secret_label <<<')
async def get_link_secret_label(self) -> str:
"""
Get current link secret label from non-secret storage records; return None for no match.
:return: latest non-secret storage record for link secret label
"""
LOGGER.debug('Wallet.get_link_secret_label >>>')
if not self.handle:
LOGGER.debug('Wallet.get_link_secret <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rv = None
records = await self.get_non_secret(TYPE_LINK_SECRET_LABEL)
if records:
rv = records[str(max(int(k) for k in records))].value # str to int, max, and back again
LOGGER.debug('Wallet.get_link_secret_label <<< %s', rv)
return rv
async def __aenter__(self) -> 'Wallet':
"""
Context manager entry. Open wallet as configured, for closure on context manager exit.
For use in monolithic call opening, using, and closing wallet.
Raise any IndyError causing failure to open wallet, or AbsentWallet on attempt to enter wallet
not yet created.
:return: current object
"""
LOGGER.debug('Wallet.__aenter__ >>>')
rv = await self.open()
LOGGER.debug('Wallet.__aenter__ <<<')
return rv
async def open(self) -> 'Wallet':
"""
Explicit entry. Open wallet as configured, for later closure via close().
For use when keeping wallet open across multiple calls.
Raise any IndyError causing failure to open wallet, WalletState if wallet already open,
or AbsentWallet on attempt to enter wallet not yet created.
:return: current object
"""
LOGGER.debug('Wallet.open >>>')
created = False
while True:
try:
self._handle = await wallet.open_wallet(
json.dumps(self.config),
json.dumps(self.access_creds))
LOGGER.info('Opened wallet %s on handle %s', self.name, self.handle)
break
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.WalletNotFoundError:
if created:
LOGGER.debug('Wallet.open() <!< Wallet %s not found after creation', self.name)
raise AbsentWallet('Wallet {} not found after creation'.format(self.name))
if self.auto_create:
await self.create()
continue
else:
LOGGER.debug('Wallet.open() <!< Wallet %s not found', self.name)
raise AbsentWallet('Wallet {} not found'.format(self.name))
elif x_indy.error_code == ErrorCode.WalletAlreadyOpenedError:
LOGGER.debug('Wallet.open() <!< Wallet %s is already open', self.name)
raise WalletState('Wallet {} is already open'.format(self.name))
elif x_indy.error_code == ErrorCode.WalletAccessFailed:
LOGGER.debug('Wallet.open() <!< Bad access credentials value for wallet %s', self.name)
raise BadAccess('Bad access credentials value for wallet {}'.format(self.name))
LOGGER.debug('Wallet %s open raised indy error %s', self.name, x_indy.error_code)
raise
self.did = await self.get_anchor_did()
self.verkey = await did.key_for_local_did(self.handle, self.did) if self.did else None
LOGGER.info('Wallet %s got verkey %s for existing DID %s', self.name, self.verkey, self.did)
LOGGER.debug('Wallet.open <<<')
return self
async def create(self) -> None:
"""
Persist the wallet. Raise ExtantWallet if it already exists.
Actuators should prefer WalletManager.create() to calling this method directly - the wallet manager
filters wallet configuration through preset defaults.
"""
LOGGER.debug('Wallet.create >>>')
try:
await wallet.create_wallet(
config=json.dumps(self.config),
credentials=json.dumps(self.access_creds))
LOGGER.info('Created wallet %s', self.name)
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.WalletAlreadyExistsError:
LOGGER.debug('Wallet.create <!< Wallet %s already exists', self.name)
raise ExtantWallet('Wallet {} already exists'.format(self.name))
LOGGER.debug(
'Wallet.create <!< indy error code %s on creation of wallet %s',
x_indy.error_code,
self.name)
raise
auto_remove = self.auto_remove
self.auto_remove = False # defer past this creation process
async with self:
did_info = await self.create_local_did(
self._von_config.get('seed', None),
self._von_config.get('did', None),
{'anchor': True})
self.did = did_info.did
self.verkey = did_info.verkey
if 'link_secret_label' in self._von_config:
await self.create_link_secret(self._von_config['link_secret_label'])
self.auto_remove = auto_remove
LOGGER.debug('Wallet.create <<<')
async def __aexit__(self, exc_type, exc, traceback) -> None:
"""
Context manager exit. Close wallet (and delete if so configured).
For use in monolithic call opening, using, and closing the wallet.
:param exc_type:
:param exc:
:param traceback:
"""
LOGGER.debug('Wallet.__aexit__ >>>')
await self.close()
LOGGER.debug('Wallet.__aexit__ <<<')
async def close(self) -> None:
"""
Explicit exit. Close wallet (and delete if so configured).
"""
LOGGER.debug('Wallet.close >>>')
if not self.handle:
LOGGER.warning('Abstaining from closing wallet %s: already closed', self.name)
else:
LOGGER.debug('Closing wallet %s', self.name)
await wallet.close_wallet(self.handle)
self._handle = None
if self.auto_remove:
LOGGER.info('Automatically removing wallet %s', self.name)
await self.remove()
self._handle = None
LOGGER.debug('Wallet.close <<<')
async def remove(self) -> bool:
"""
Remove serialized wallet, best effort, if it exists. Return whether wallet absent after operation
(removal successful or else not present a priori).
Raise WalletState if wallet is open.
:return: whether wallet gone from persistent storage
"""
LOGGER.debug('Wallet.remove >>>')
if self.handle:
LOGGER.debug('Wallet.remove <!< Wallet %s is open', self.name)
raise WalletState('Wallet {} is open'.format(self.name))
rv = True
try:
LOGGER.info('Attempting to remove wallet: %s', self.name)
await wallet.delete_wallet(
json.dumps(self.config),
json.dumps(self.access_creds))
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.WalletNotFoundError:
LOGGER.info('Wallet %s not present; abstaining from removal', self.name)
else:
LOGGER.info('Failed wallet %s removal; indy-sdk error code %s', self.name, x_indy.error_code)
rv = False
LOGGER.debug('Wallet.remove <<< %s', rv)
return rv
async def write_pairwise(
self,
their_did: str,
their_verkey: str = None,
my_did: str = None,
metadata: dict = None,
replace_meta: bool = False) -> PairwiseInfo:
"""
Store a pairwise DID for a secure connection. Use verification key for local DID in wallet if
supplied; otherwise, create one first. If local DID specified but not present, raise AbsentRecord.
With supplied metadata, replace or augment and overwrite any existing metadata for the pairwise
relation if one already exists in the wallet. Always include local and remote DIDs and keys in
metadata to allow for WQL search.
Raise AbsentRecord on call to update a non-existent record. Raise BadRecord if metadata does not
coerce into non-secrets API tags specification {str:str}.
:param their_did: remote DID
:param their_verkey: remote verification key (default None is OK if updating an existing pairwise DID)
:param my_did: local DID (default None prompts operation to generate one at random)
:param metadata: metadata for pairwise connection
:param replace_meta: whether to (True) replace or (False) augment and overwrite existing metadata
:return: resulting PairwiseInfo
"""
LOGGER.debug(
'Wallet.write_pairwise >>> their_did: %s, their_verkey: %s, my_did: %s, metadata: %s, replace_meta: %s',
their_did,
their_verkey,
my_did,
metadata,
replace_meta)
if their_verkey is None:
match = await self.get_pairwise(their_did)
if not match:
LOGGER.debug(
'Wallet.write_pairwise <!< Wallet %s has no pairwise DID on %s to update',
self.name,
their_did)
raise AbsentRecord('Wallet {} has no pairwise DID on {} to update'.format(self.name, their_did))
their_verkey = [pwise for pwise in match.values()][0].their_verkey
try:
await did.store_their_did(self.handle, json.dumps({'did': their_did, 'verkey': their_verkey}))
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.WalletItemAlreadyExists:
pass # exists already, carry on
else:
LOGGER.debug(
'Wallet.write_pairwise <!< Wallet %s write of their_did %s raised indy error code %s',
self.name,
their_did,
x_indy.error_code)
raise
if my_did:
my_did_info = await self.get_local_did(my_did) # raises AbsentRecord if no such local did
else:
my_did_info = await self.create_local_did(None, None, {'pairwise_for': their_did})
pairwise = PairwiseInfo(their_did, their_verkey, my_did_info.did, my_did_info.verkey, metadata)
try:
storec = await self.write_non_secret(
StorageRecord(TYPE_PAIRWISE, their_verkey, tags=pairwise_info2tags(pairwise), ident=their_did),
replace_meta)
except BadRecord:
LOGGER.debug(
'Wallet.write_pairwise <!< Pairwise metadata %s does not coerce into flat {str:str} tags dict',
pairwise.metadata)
raise
rv = storage_record2pairwise_info(storec)
LOGGER.debug('Wallet.write_pairwise <<< %s', rv)
return rv
async def delete_pairwise(self, their_did: str) -> None:
"""
Remove a pairwise DID record by its remote DID. Silently return if no such record is present.
Raise WalletState for closed wallet, or BadIdentifier for invalid pairwise DID.
:param their_did: remote DID marking pairwise DID to remove
"""
LOGGER.debug('Wallet.delete_pairwise >>> their_did: %s', their_did)
if not ok_did(their_did):
LOGGER.debug('Wallet.delete_pairwise <!< Bad DID %s', their_did)
raise BadIdentifier('Bad DID {}'.format(their_did))
await self.delete_non_secret(TYPE_PAIRWISE, their_did)
LOGGER.debug('Wallet.delete_pairwise <<<')
async def get_pairwise(self, pairwise_filt: str = None) -> dict:
"""
Return dict mapping each pairwise DID of interest in wallet to its pairwise info, or,
for no filter specified, mapping them all. If wallet has no such item, return empty dict.
:param pairwise_filt: remote DID of interest, or WQL json (default all)
:return: dict mapping remote DIDs to PairwiseInfo
"""
LOGGER.debug('Wallet.get_pairwise >>> pairwise_filt: %s', pairwise_filt)
if not self.handle:
LOGGER.debug('Wallet.get_pairwise <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
storecs = await self.get_non_secret(
TYPE_PAIRWISE,
pairwise_filt if not pairwise_filt or ok_did(pairwise_filt) else json.loads(pairwise_filt),
canon_pairwise_wql)
rv = {k: storage_record2pairwise_info(storecs[k]) for k in storecs} # touch up tags, mute leading ~
LOGGER.debug('Wallet.get_pairwise <<< %s', rv)
return rv
async def write_non_secret(self, storec: StorageRecord, replace_meta: bool = False) -> StorageRecord:
"""
Add or update non-secret storage record to the wallet; return resulting wallet non-secret record.
:param storec: non-secret storage record
:param replace_meta: whether to replace any existing metadata on matching record or to augment it
:return: non-secret storage record as it appears in the wallet after write
"""
LOGGER.debug('Wallet.write_non_secret >>> storec: %s, replace_meta: %s', storec, replace_meta)
if not self.handle:
LOGGER.debug('Wallet.write_non_secret <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
if not StorageRecord.ok_tags(storec.tags):
LOGGER.debug('Wallet.write_non_secret <!< bad storage record tags %s; use flat {str: str} dict', storec)
raise BadRecord('Bad storage record tags {}; use flat {{str:str}} dict'.format(storec))
try:
record = json.loads(await non_secrets.get_wallet_record(
self.handle,
storec.type,
storec.id,
json.dumps({
'retrieveType': False,
'retrieveValue': True,
'retrieveTags': True
})))
if record['value'] != storec.value:
await non_secrets.update_wallet_record_value(
self.handle,
storec.type,
storec.id,
storec.value)
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.WalletItemNotFound:
await non_secrets.add_wallet_record(
self.handle,
storec.type,
storec.id,
storec.value,
json.dumps(storec.tags) if storec.tags else None)
else:
LOGGER.debug(
'Wallet.write_non_secret <!< Wallet lookup raised indy error code %s',
x_indy.error_code)
raise
else:
if (record['tags'] or None) != storec.tags: # record maps no tags to {}, not None
tags = (storec.tags or {}) if replace_meta else {**record['tags'], **(storec.tags or {})}
await non_secrets.update_wallet_record_tags(
self.handle,
storec.type,
storec.id,
json.dumps(tags)) # indy-sdk takes '{}' instead of None for null tags
record = json.loads(await non_secrets.get_wallet_record(
self.handle,
storec.type,
storec.id,
json.dumps({
'retrieveType': False,
'retrieveValue': True,
'retrieveTags': True
})))
rv = StorageRecord(storec.type, record['value'], tags=record.get('tags', None), ident=record['id'])
LOGGER.debug('Wallet.write_non_secret <<< %s', rv)
return rv
async def delete_non_secret(self, typ: str, ident: str) -> None:
"""
Remove a non-secret record by its type and identifier. Silently return if no such record is present.
Raise WalletState for closed wallet.
:param typ: non-secret storage record type
:param ident: non-secret storage record identifier
"""
LOGGER.debug('Wallet.delete_non_secret >>> typ: %s, ident: %s', typ, ident)
if not self.handle:
LOGGER.debug('Wallet.delete_non_secret <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
try:
await non_secrets.delete_wallet_record(self.handle, typ, ident)
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.WalletItemNotFound:
LOGGER.info('Wallet.delete_non_secret <!< no record for type %s on identifier %s', typ, ident)
else:
LOGGER.debug(
'Wallet.delete_non_secret <!< deletion of %s record on identifier %s raised indy error code %s',
typ,
ident,
x_indy.error_code)
raise
LOGGER.debug('Wallet.delete_non_secret <<<')
async def get_non_secret(
self,
typ: str,
filt: Union[dict, str] = None,
canon_wql: Callable[[dict], dict] = None,
limit: int = None) -> dict:
"""
Return dict mapping each non-secret storage record of interest by identifier or,
for no filter specified, mapping them all. If wallet has no such item, return empty dict.
:param typ: non-secret storage record type
:param filt: non-secret storage record identifier or WQL json (default all)
:param canon_wql: WQL canonicalization function (default von_anchor.canon.canon_non_secret_wql())
:param limit: maximum number of results to return (default no limit)
:return: dict mapping identifiers to non-secret storage records
"""
LOGGER.debug('Wallet.get_non_secret >>> typ: %s, filt: %s, canon_wql: %s', typ, filt, canon_wql)
if not self.handle:
LOGGER.debug('Wallet.get_non_secret <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
records = []
if isinstance(filt, str): # ordinary lookup by value
try:
records = [json.loads(await non_secrets.get_wallet_record(
self.handle,
typ,
filt,
json.dumps({
'retrieveType': False,
'retrieveValue': True,
'retrieveTags': True
})))]
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.WalletItemNotFound:
pass
else:
LOGGER.debug(
'Wallet.get_non_secret <!< Wallet %s lookup raised indy exception %s',
self.name,
x_indy.error_code)
raise
else:
canon = canon_wql or canon_non_secret_wql
s_handle = await non_secrets.open_wallet_search(
self.handle,
typ,
json.dumps(canon(filt or {})),
json.dumps({
'retrieveRecords': True,
'retrieveTotalCount': True,
'retrieveType': False,
'retrieveValue': True,
'retrieveTags': True
}))
records = []
cardinality = int(json.loads(
await non_secrets.fetch_wallet_search_next_records(self.handle, s_handle, 0))['totalCount'])
chunk = min(cardinality, limit or cardinality, Wallet.DEFAULT_CHUNK)
if limit:
cardinality = min(limit, cardinality)
try:
while len(records) != cardinality:
batch = json.loads(
await non_secrets.fetch_wallet_search_next_records(self.handle, s_handle, chunk))['records']
records.extend(batch)
if len(batch) < chunk:
break
if len(records) != cardinality:
LOGGER.warning(
'Non-secret search/limit indicated %s results but fetched %s',
cardinality,
len(records))
finally:
await non_secrets.close_wallet_search(s_handle)
rv = {record['id']: StorageRecord(typ, record['value'], record['tags'], record['id']) for record in records}
LOGGER.debug('Wallet.get_non_secret <<< %s', rv)
return rv
async def encrypt(
self,
message: bytes,
authn: bool = False,
to_verkey: str = None,
from_verkey: str = None) -> bytes:
"""
Encrypt plaintext for owner of DID, anonymously or via authenticated encryption scheme.
Raise AbsentMessage for missing message, or WalletState if wallet is closed.
:param message: plaintext, as bytes
:param authn: whether to use authenticated encryption scheme
:param to_verkey: verification key of recipient, None for anchor's own
:param from_verkey: verification key of sender for authenticated encryption, None for anchor's own
:return: ciphertext, as bytes
"""
LOGGER.debug(
'Wallet.encrypt >>> message: %s, authn: %s, to_verkey: %s, from_verkey: %s',
message,
authn,
to_verkey,
from_verkey)
if not message:
LOGGER.debug('Wallet.encrypt <!< No message to encrypt')
raise AbsentMessage('No message to encrypt')
if not self.handle:
LOGGER.debug('Wallet.encrypt <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
if authn:
rv = await crypto.auth_crypt(self.handle, from_verkey or self.verkey, to_verkey or self.verkey, message)
else:
rv = await crypto.anon_crypt(to_verkey or self.verkey, message)
LOGGER.debug('Wallet.auth_encrypt <<< %s', rv)
return rv
async def decrypt(
self,
ciphertext: bytes,
authn_check: bool = None,
to_verkey: str = None,
from_verkey: str = None) -> (bytes, str):
"""
Decrypt ciphertext and optionally authenticate sender.
Raise BadKey if authentication operation checks and reveals sender key distinct from input
sender verification key. Raise AbsentMessage for missing ciphertext, or WalletState if
wallet is closed.
:param ciphertext: ciphertext, as bytes
:param authn_check: True to authenticate and check sender verification key,
False to authenticate and return sender verification key for client to decide fitness, or
None to use anonymous decryption
:param to_verkey: recipient verification key, default anchor's own
:param from_verkey: sender verification key, ignored for anonymous decryption,
default anchor's own for authenticated decryption
:return: decrypted bytes and sender verification key (None for anonymous decryption)
"""
LOGGER.debug(
'Wallet.decrypt >>> ciphertext: %s, authn_check: %s, to_verkey: %s, from_verkey: %s',
ciphertext,
authn_check,
to_verkey,
from_verkey)
if not ciphertext:
LOGGER.debug('Wallet.decrypt <!< No ciphertext to decrypt')
raise AbsentMessage('No ciphertext to decrypt')
if not self.handle:
LOGGER.debug('Wallet.decrypt <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
sender_verkey = None
if authn_check is None:
plaintext = await crypto.anon_decrypt(self.handle, to_verkey or self.verkey, ciphertext)
else:
(sender_verkey, plaintext) = await crypto.auth_decrypt(self.handle, to_verkey or self.verkey, ciphertext)
if authn_check and sender_verkey != (from_verkey or self.verkey):
LOGGER.debug('Wallet.decrypt <!< Authentication revealed unexpected sender key on decryption')
raise BadKey('Authentication revealed unexpected sender key on decryption')
rv = (plaintext, sender_verkey)
LOGGER.debug('Wallet.decrypt <<< %s', rv)
return rv
async def sign(self, message: bytes, verkey: str = None) -> bytes:
"""
Derive signing key and Sign message; return signature. Raise WalletState if wallet is closed.
Raise AbsentMessage for missing message, or WalletState if wallet is closed.
:param message: Content to sign, as bytes
:param verkey: verification key corresponding to private signing key (default anchor's own)
:return: signature, as bytes
"""
LOGGER.debug('Wallet.sign >>> message: %s, verkey: %s', message, verkey)
if not message:
LOGGER.debug('Wallet.sign <!< No message to sign')
raise AbsentMessage('No message to sign')
if not self.handle:
LOGGER.debug('Wallet.sign <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rv = await crypto.crypto_sign(self.handle, verkey or self.verkey, message)
LOGGER.debug('Wallet.sign <<< %s', rv)
return rv
async def verify(self, message: bytes, signature: bytes, verkey: str = None) -> bool:
"""
Verify signature against input signer verification key (default anchor's own).
Raise AbsentMessage for missing message or signature, or WalletState if wallet is closed.
:param message: Content to sign, as bytes
:param signature: signature, as bytes
:param verkey: signer verification key (default for anchor's own)
:return: whether signature is valid
"""
LOGGER.debug('Wallet.verify >>> message: %s, signature: %s, verkey: %s', message, signature, verkey)
if not message:
LOGGER.debug('Wallet.verify <!< No message to verify')
raise AbsentMessage('No message to verify')
if not signature:
LOGGER.debug('Wallet.verify <!< No signature to verify')
raise AbsentMessage('No signature to verify')
if not self.handle:
LOGGER.debug('Wallet.verify <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rv = await crypto.crypto_verify(verkey or self.verkey, message, signature)
LOGGER.debug('Wallet.verify <<< %s', rv)
return rv
async def pack(
self,
message: str,
recip_verkeys: Union[str, Sequence[str]] = None,
sender_verkey: str = None) -> bytes:
"""
Pack a message for one or more recipients (default anchor only).
Raise AbsentMessage for missing message, or WalletState if wallet is closed.
:param message: message to pack
:param recip_verkeys: verification keys of recipients (default anchor's own, only)
:param sender_verkey: sender verification key (default anonymous encryption)
:return: packed message
"""
LOGGER.debug(
'Wallet.pack >>> message: %s, recip_verkeys: %s, sender_verkey: %s',
message,
recip_verkeys,
sender_verkey)
if message is None:
LOGGER.debug('Wallet.pack <!< No message to pack')
raise AbsentMessage('No message to pack')
rv = await crypto.pack_message(
self.handle,
message,
[recip_verkeys] if isinstance(recip_verkeys, str) else list(recip_verkeys or [self.verkey]),
sender_verkey)
LOGGER.debug('Wallet.pack <<< %s', rv)
return rv
async def unpack(self, ciphertext: bytes) -> (str, str, str):
"""
Unpack a message. Return triple with cleartext, sender verification key, and recipient verification key.
Raise AbsentMessage for missing ciphertext, or WalletState if wallet is closed. Raise AbsentRecord
if wallet has no key to unpack ciphertext.
:param ciphertext: JWE-like formatted message as pack() produces
:return: cleartext, sender verification key, recipient verification key
"""
LOGGER.debug('Wallet.unpack >>> ciphertext: %s', ciphertext)
if not ciphertext:
LOGGER.debug('Wallet.pack <!< No ciphertext to unpack')
raise AbsentMessage('No ciphertext to unpack')
try:
unpacked = json.loads(await crypto.unpack_message(self.handle, ciphertext))
except IndyError as x_indy:
if x_indy.error_code == ErrorCode.WalletItemNotFound:
LOGGER.debug('Wallet.unpack <!< Wallet %s has no local key to unpack ciphertext', self.name)
raise AbsentRecord('Wallet {} has no local key to unpack ciphertext'.format(self.name))
LOGGER.debug('Wallet.unpack <!< Wallet %s unpack() raised indy error code {}', x_indy.error_code)
raise
rv = (unpacked['message'], unpacked.get('sender_verkey', None), unpacked.get('recipient_verkey', None))
LOGGER.debug('Wallet.unpack <<< %s', rv)
return rv
async def reseed_init(self, next_seed: str = None) -> str:
"""
Begin reseed operation: generate new key. Raise WalletState if wallet is closed.
:param next_seed: incoming replacement seed (default random)
:return: new verification key
"""
LOGGER.debug('Wallet.reseed_init >>> next_seed: [SEED]')
if not self.handle:
LOGGER.debug('Wallet.reseed_init <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
rv = await did.replace_keys_start(self.handle, self.did, json.dumps({'seed': next_seed} if next_seed else {}))
LOGGER.debug('Wallet.reseed_init <<< %s', rv)
return rv
async def reseed_apply(self) -> DIDInfo:
"""
Replace verification key with new verification key from reseed operation.
Raise WalletState if wallet is closed.
:return: DIDInfo with new verification key and metadata for DID
"""
LOGGER.debug('Wallet.reseed_apply >>>')
if not self.handle:
LOGGER.debug('Wallet.reseed_init <!< Wallet %s is closed', self.name)
raise WalletState('Wallet {} is closed'.format(self.name))
await did.replace_keys_apply(self.handle, self.did)
self.verkey = await did.key_for_local_did(self.handle, self.did)
now = int(time())
rv = DIDInfo(self.did, self.verkey, {'anchor': True, 'since': now, 'modified': now})
await did.set_did_metadata(self.handle, self.did, json.dumps(rv.metadata))
LOGGER.info('Wallet %s set seed hash metadata for DID %s', self.name, self.did)
LOGGER.debug('Wallet.reseed_apply <<< %s', rv)
return rv
def __repr__(self) -> str:
"""
Return representation for current object.
:return: representation for current object
"""
return 'Wallet({}, [ACCESS], {})'.format(self.config, self.auto_remove)
|
[
"indy.wallet.close_wallet",
"indy.non_secrets.delete_wallet_record",
"von_anchor.wallet.PairwiseInfo",
"json.dumps",
"indy.non_secrets.fetch_wallet_search_next_records",
"indy.crypto.auth_decrypt",
"von_anchor.wallet.KeyInfo",
"indy.did.get_my_did_with_meta",
"indy.non_secrets.close_wallet_search",
"json.loads",
"indy.crypto.anon_crypt",
"von_anchor.wallet.record.StorageRecord",
"indy.crypto.anon_decrypt",
"indy.did.list_my_dids_with_meta",
"indy.crypto.get_key_metadata",
"indy.crypto.auth_crypt",
"von_anchor.error.BadKey",
"von_anchor.wallet.storage_record2pairwise_info",
"von_anchor.wallet.DIDInfo",
"indy.did.key_for_local_did",
"indy.anoncreds.prover_create_master_secret",
"indy.crypto.unpack_message",
"indy.crypto.crypto_sign",
"von_anchor.util.ok_did",
"indy.non_secrets.update_wallet_record_value",
"von_anchor.wallet.pairwise_info2tags",
"time.time",
"indy.crypto.crypto_verify",
"von_anchor.error.AbsentMessage",
"indy.did.replace_keys_apply",
"von_anchor.wallet.record.StorageRecord.ok_tags",
"logging.getLogger"
] |
[((1351, 1378), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1368, 1378), False, 'import logging\n'), ((7282, 7313), 'von_anchor.wallet.KeyInfo', 'KeyInfo', (['verkey', '(metadata or {})'], {}), '(verkey, metadata or {})\n', (7289, 7313), False, 'from von_anchor.wallet import DIDInfo, KeyInfo, storage_record2pairwise_info, PairwiseInfo, pairwise_info2tags\n'), ((11899, 11945), 'von_anchor.wallet.DIDInfo', 'DIDInfo', (['created_did', 'verkey', 'loc_did_metadata'], {}), '(created_did, verkey, loc_did_metadata)\n', (11906, 11945), False, 'from von_anchor.wallet import DIDInfo, KeyInfo, storage_record2pairwise_info, PairwiseInfo, pairwise_info2tags\n'), ((14435, 14446), 'von_anchor.util.ok_did', 'ok_did', (['loc'], {}), '(loc)\n', (14441, 14446), False, 'from von_anchor.util import ok_did\n'), ((29861, 29949), 'von_anchor.wallet.PairwiseInfo', 'PairwiseInfo', (['their_did', 'their_verkey', 'my_did_info.did', 'my_did_info.verkey', 'metadata'], {}), '(their_did, their_verkey, my_did_info.did, my_did_info.verkey,\n metadata)\n', (29873, 29949), False, 'from von_anchor.wallet import DIDInfo, KeyInfo, storage_record2pairwise_info, PairwiseInfo, pairwise_info2tags\n'), ((30382, 30418), 'von_anchor.wallet.storage_record2pairwise_info', 'storage_record2pairwise_info', (['storec'], {}), '(storec)\n', (30410, 30418), False, 'from von_anchor.wallet import DIDInfo, KeyInfo, storage_record2pairwise_info, PairwiseInfo, pairwise_info2tags\n'), ((50587, 50666), 'von_anchor.wallet.DIDInfo', 'DIDInfo', (['self.did', 'self.verkey', "{'anchor': True, 'since': now, 'modified': now}"], {}), "(self.did, self.verkey, {'anchor': True, 'since': now, 'modified': now})\n", (50594, 50666), False, 'from von_anchor.wallet import DIDInfo, KeyInfo, storage_record2pairwise_info, PairwiseInfo, pairwise_info2tags\n'), ((11707, 11713), 'time.time', 'time', ([], {}), '()\n', (11711, 11713), False, 'from time import time\n'), ((12635, 12641), 'time.time', 'time', ([], {}), '()\n', (12639, 12641), False, 'from time import time\n'), ((13416, 13455), 'indy.did.list_my_dids_with_meta', 'did.list_my_dids_with_meta', (['self.handle'], {}), '(self.handle)\n', (13442, 13455), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((30932, 30949), 'von_anchor.util.ok_did', 'ok_did', (['their_did'], {}), '(their_did)\n', (30938, 30949), False, 'from von_anchor.util import ok_did\n'), ((32124, 32164), 'von_anchor.wallet.storage_record2pairwise_info', 'storage_record2pairwise_info', (['storecs[k]'], {}), '(storecs[k])\n', (32152, 32164), False, 'from von_anchor.wallet import DIDInfo, KeyInfo, storage_record2pairwise_info, PairwiseInfo, pairwise_info2tags\n'), ((33073, 33107), 'von_anchor.wallet.record.StorageRecord.ok_tags', 'StorageRecord.ok_tags', (['storec.tags'], {}), '(storec.tags)\n', (33094, 33107), False, 'from von_anchor.wallet.record import StorageRecord, TYPE_PAIRWISE, TYPE_LINK_SECRET_LABEL\n'), ((40160, 40225), 'von_anchor.wallet.record.StorageRecord', 'StorageRecord', (['typ', "record['value']", "record['tags']", "record['id']"], {}), "(typ, record['value'], record['tags'], record['id'])\n", (40173, 40225), False, 'from von_anchor.wallet.record import StorageRecord, TYPE_PAIRWISE, TYPE_LINK_SECRET_LABEL\n'), ((41364, 41402), 'von_anchor.error.AbsentMessage', 'AbsentMessage', (['"""No message to encrypt"""'], {}), "('No message to encrypt')\n", (41377, 41402), False, 'from von_anchor.error import AbsentRecord, AbsentMessage, AbsentWallet, BadAccess, BadKey, BadIdentifier, BadRecord, ExtantRecord, ExtantWallet, WalletState\n'), ((43323, 43364), 'von_anchor.error.AbsentMessage', 'AbsentMessage', (['"""No ciphertext to decrypt"""'], {}), "('No ciphertext to decrypt')\n", (43336, 43364), False, 'from von_anchor.error import AbsentRecord, AbsentMessage, AbsentWallet, BadAccess, BadKey, BadIdentifier, BadRecord, ExtantRecord, ExtantWallet, WalletState\n'), ((44887, 44922), 'von_anchor.error.AbsentMessage', 'AbsentMessage', (['"""No message to sign"""'], {}), "('No message to sign')\n", (44900, 44922), False, 'from von_anchor.error import AbsentRecord, AbsentMessage, AbsentWallet, BadAccess, BadKey, BadIdentifier, BadRecord, ExtantRecord, ExtantWallet, WalletState\n'), ((45118, 45181), 'indy.crypto.crypto_sign', 'crypto.crypto_sign', (['self.handle', '(verkey or self.verkey)', 'message'], {}), '(self.handle, verkey or self.verkey, message)\n', (45136, 45181), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((45983, 46020), 'von_anchor.error.AbsentMessage', 'AbsentMessage', (['"""No message to verify"""'], {}), "('No message to verify')\n", (45996, 46020), False, 'from von_anchor.error import AbsentRecord, AbsentMessage, AbsentWallet, BadAccess, BadKey, BadIdentifier, BadRecord, ExtantRecord, ExtantWallet, WalletState\n'), ((46135, 46174), 'von_anchor.error.AbsentMessage', 'AbsentMessage', (['"""No signature to verify"""'], {}), "('No signature to verify')\n", (46148, 46174), False, 'from von_anchor.error import AbsentRecord, AbsentMessage, AbsentWallet, BadAccess, BadKey, BadIdentifier, BadRecord, ExtantRecord, ExtantWallet, WalletState\n'), ((46372, 46435), 'indy.crypto.crypto_verify', 'crypto.crypto_verify', (['(verkey or self.verkey)', 'message', 'signature'], {}), '(verkey or self.verkey, message, signature)\n', (46392, 46435), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((47399, 47434), 'von_anchor.error.AbsentMessage', 'AbsentMessage', (['"""No message to pack"""'], {}), "('No message to pack')\n", (47412, 47434), False, 'from von_anchor.error import AbsentRecord, AbsentMessage, AbsentWallet, BadAccess, BadKey, BadIdentifier, BadRecord, ExtantRecord, ExtantWallet, WalletState\n'), ((48420, 48460), 'von_anchor.error.AbsentMessage', 'AbsentMessage', (['"""No ciphertext to unpack"""'], {}), "('No ciphertext to unpack')\n", (48433, 48460), False, 'from von_anchor.error import AbsentRecord, AbsentMessage, AbsentWallet, BadAccess, BadKey, BadIdentifier, BadRecord, ExtantRecord, ExtantWallet, WalletState\n'), ((50429, 50474), 'indy.did.replace_keys_apply', 'did.replace_keys_apply', (['self.handle', 'self.did'], {}), '(self.handle, self.did)\n', (50451, 50474), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((50503, 50547), 'indy.did.key_for_local_did', 'did.key_for_local_did', (['self.handle', 'self.did'], {}), '(self.handle, self.did)\n', (50524, 50547), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((50566, 50572), 'time.time', 'time', ([], {}), '()\n', (50570, 50572), False, 'from time import time\n'), ((7216, 7242), 'json.dumps', 'json.dumps', (['(metadata or {})'], {}), '(metadata or {})\n', (7226, 7242), False, 'import json\n'), ((8024, 8068), 'indy.crypto.get_key_metadata', 'crypto.get_key_metadata', (['self.handle', 'verkey'], {}), '(self.handle, verkey)\n', (8047, 8068), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((8523, 8543), 'json.loads', 'json.loads', (['metadata'], {}), '(metadata)\n', (8533, 8543), False, 'import json\n'), ((9391, 9435), 'indy.crypto.get_key_metadata', 'crypto.get_key_metadata', (['self.handle', 'verkey'], {}), '(self.handle, verkey)\n', (9414, 9435), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((10007, 10033), 'json.dumps', 'json.dumps', (['(metadata or {})'], {}), '(metadata or {})\n', (10017, 10033), False, 'import json\n'), ((11855, 11883), 'json.dumps', 'json.dumps', (['loc_did_metadata'], {}), '(loc_did_metadata)\n', (11865, 11883), False, 'import json\n'), ((13546, 13583), 'json.loads', 'json.loads', (["did_with_meta['metadata']"], {}), "(did_with_meta['metadata'])\n", (13556, 13583), False, 'import json\n'), ((13750, 13810), 'von_anchor.wallet.DIDInfo', 'DIDInfo', (["did_with_meta['did']", "did_with_meta['verkey']", 'meta'], {}), "(did_with_meta['did'], did_with_meta['verkey'], meta)\n", (13757, 13810), False, 'from von_anchor.wallet import DIDInfo, KeyInfo, storage_record2pairwise_info, PairwiseInfo, pairwise_info2tags\n'), ((16497, 16536), 'indy.did.list_my_dids_with_meta', 'did.list_my_dids_with_meta', (['self.handle'], {}), '(self.handle)\n', (16523, 16536), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((17889, 17946), 'indy.anoncreds.prover_create_master_secret', 'anoncreds.prover_create_master_secret', (['self.handle', 'label'], {}), '(self.handle, label)\n', (17926, 17946), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((22809, 22853), 'indy.did.key_for_local_did', 'did.key_for_local_did', (['self.handle', 'self.did'], {}), '(self.handle, self.did)\n', (22830, 22853), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((25494, 25526), 'indy.wallet.close_wallet', 'wallet.close_wallet', (['self.handle'], {}), '(self.handle)\n', (25513, 25526), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((36079, 36136), 'indy.non_secrets.delete_wallet_record', 'non_secrets.delete_wallet_record', (['self.handle', 'typ', 'ident'], {}), '(self.handle, typ, ident)\n', (36111, 36136), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((41623, 41720), 'indy.crypto.auth_crypt', 'crypto.auth_crypt', (['self.handle', '(from_verkey or self.verkey)', '(to_verkey or self.verkey)', 'message'], {}), '(self.handle, from_verkey or self.verkey, to_verkey or\n self.verkey, message)\n', (41640, 41720), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((41754, 41806), 'indy.crypto.anon_crypt', 'crypto.anon_crypt', (['(to_verkey or self.verkey)', 'message'], {}), '(to_verkey or self.verkey, message)\n', (41771, 41806), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((43635, 43705), 'indy.crypto.anon_decrypt', 'crypto.anon_decrypt', (['self.handle', '(to_verkey or self.verkey)', 'ciphertext'], {}), '(self.handle, to_verkey or self.verkey, ciphertext)\n', (43654, 43705), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((43767, 43837), 'indy.crypto.auth_decrypt', 'crypto.auth_decrypt', (['self.handle', '(to_verkey or self.verkey)', 'ciphertext'], {}), '(self.handle, to_verkey or self.verkey, ciphertext)\n', (43786, 43837), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((44049, 44118), 'von_anchor.error.BadKey', 'BadKey', (['"""Authentication revealed unexpected sender key on decryption"""'], {}), "('Authentication revealed unexpected sender key on decryption')\n", (44055, 44118), False, 'from von_anchor.error import AbsentRecord, AbsentMessage, AbsentWallet, BadAccess, BadKey, BadIdentifier, BadRecord, ExtantRecord, ExtantWallet, WalletState\n'), ((49785, 49837), 'json.dumps', 'json.dumps', (["({'seed': next_seed} if next_seed else {})"], {}), "({'seed': next_seed} if next_seed else {})\n", (49795, 49837), False, 'import json\n'), ((50725, 50748), 'json.dumps', 'json.dumps', (['rv.metadata'], {}), '(rv.metadata)\n', (50735, 50748), False, 'import json\n'), ((6664, 6706), 'json.dumps', 'json.dumps', (["({'seed': seed} if seed else {})"], {}), "({'seed': seed} if seed else {})\n", (6674, 6706), False, 'import json\n'), ((11225, 11240), 'json.dumps', 'json.dumps', (['cfg'], {}), '(cfg)\n', (11235, 11240), False, 'import json\n'), ((12831, 12859), 'json.dumps', 'json.dumps', (['loc_did_metadata'], {}), '(loc_did_metadata)\n', (12841, 12859), False, 'import json\n'), ((15331, 15370), 'indy.did.list_my_dids_with_meta', 'did.list_my_dids_with_meta', (['self.handle'], {}), '(self.handle)\n', (15357, 15370), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((16651, 16688), 'json.loads', 'json.loads', (["did_with_meta['metadata']"], {}), "(did_with_meta['metadata'])\n", (16661, 16688), False, 'import json\n'), ((26477, 26500), 'json.dumps', 'json.dumps', (['self.config'], {}), '(self.config)\n', (26487, 26500), False, 'import json\n'), ((26518, 26547), 'json.dumps', 'json.dumps', (['self.access_creds'], {}), '(self.access_creds)\n', (26528, 26547), False, 'import json\n'), ((29119, 29173), 'json.dumps', 'json.dumps', (["{'did': their_did, 'verkey': their_verkey}"], {}), "({'did': their_did, 'verkey': their_verkey})\n", (29129, 29173), False, 'import json\n'), ((32048, 32073), 'json.loads', 'json.loads', (['pairwise_filt'], {}), '(pairwise_filt)\n', (32058, 32073), False, 'import json\n'), ((33741, 33834), 'indy.non_secrets.update_wallet_record_value', 'non_secrets.update_wallet_record_value', (['self.handle', 'storec.type', 'storec.id', 'storec.value'], {}), '(self.handle, storec.type, storec.id,\n storec.value)\n', (33779, 33834), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((35084, 35169), 'json.dumps', 'json.dumps', (["{'retrieveType': False, 'retrieveValue': True, 'retrieveTags': True}"], {}), "({'retrieveType': False, 'retrieveValue': True, 'retrieveTags': True}\n )\n", (35094, 35169), False, 'import json\n'), ((38873, 39010), 'json.dumps', 'json.dumps', (["{'retrieveRecords': True, 'retrieveTotalCount': True, 'retrieveType': False,\n 'retrieveValue': True, 'retrieveTags': True}"], {}), "({'retrieveRecords': True, 'retrieveTotalCount': True,\n 'retrieveType': False, 'retrieveValue': True, 'retrieveTags': True})\n", (38883, 39010), False, 'import json\n'), ((40089, 40130), 'indy.non_secrets.close_wallet_search', 'non_secrets.close_wallet_search', (['s_handle'], {}), '(s_handle)\n', (40120, 40130), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((48515, 48561), 'indy.crypto.unpack_message', 'crypto.unpack_message', (['self.handle', 'ciphertext'], {}), '(self.handle, ciphertext)\n', (48536, 48561), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((14528, 14570), 'indy.did.get_my_did_with_meta', 'did.get_my_did_with_meta', (['self.handle', 'loc'], {}), '(self.handle, loc)\n', (14552, 14570), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((14709, 14746), 'json.loads', 'json.loads', (["did_with_meta['metadata']"], {}), "(did_with_meta['metadata'])\n", (14719, 14746), False, 'import json\n'), ((21232, 21255), 'json.dumps', 'json.dumps', (['self.config'], {}), '(self.config)\n', (21242, 21255), False, 'import json\n'), ((21277, 21306), 'json.dumps', 'json.dumps', (['self.access_creds'], {}), '(self.access_creds)\n', (21287, 21306), False, 'import json\n'), ((23459, 23482), 'json.dumps', 'json.dumps', (['self.config'], {}), '(self.config)\n', (23469, 23482), False, 'import json\n'), ((23512, 23541), 'json.dumps', 'json.dumps', (['self.access_creds'], {}), '(self.access_creds)\n', (23522, 23541), False, 'import json\n'), ((32021, 32042), 'von_anchor.util.ok_did', 'ok_did', (['pairwise_filt'], {}), '(pairwise_filt)\n', (32027, 32042), False, 'from von_anchor.util import ok_did\n'), ((33510, 33595), 'json.dumps', 'json.dumps', (["{'retrieveType': False, 'retrieveValue': True, 'retrieveTags': True}"], {}), "({'retrieveType': False, 'retrieveValue': True, 'retrieveTags': True}\n )\n", (33520, 33595), False, 'import json\n'), ((34862, 34878), 'json.dumps', 'json.dumps', (['tags'], {}), '(tags)\n', (34872, 34878), False, 'import json\n'), ((15633, 15670), 'json.loads', 'json.loads', (["did_with_meta['metadata']"], {}), "(did_with_meta['metadata'])\n", (15643, 15670), False, 'import json\n'), ((30073, 30101), 'von_anchor.wallet.pairwise_info2tags', 'pairwise_info2tags', (['pairwise'], {}), '(pairwise)\n', (30091, 30101), False, 'from von_anchor.wallet import DIDInfo, KeyInfo, storage_record2pairwise_info, PairwiseInfo, pairwise_info2tags\n'), ((39216, 39286), 'indy.non_secrets.fetch_wallet_search_next_records', 'non_secrets.fetch_wallet_search_next_records', (['self.handle', 's_handle', '(0)'], {}), '(self.handle, s_handle, 0)\n', (39260, 39286), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((34218, 34241), 'json.dumps', 'json.dumps', (['storec.tags'], {}), '(storec.tags)\n', (34228, 34241), False, 'import json\n'), ((38063, 38148), 'json.dumps', 'json.dumps', (["{'retrieveType': False, 'retrieveValue': True, 'retrieveTags': True}"], {}), "({'retrieveType': False, 'retrieveValue': True, 'retrieveTags': True}\n )\n", (38073, 38148), False, 'import json\n'), ((39598, 39672), 'indy.non_secrets.fetch_wallet_search_next_records', 'non_secrets.fetch_wallet_search_next_records', (['self.handle', 's_handle', 'chunk'], {}), '(self.handle, s_handle, chunk)\n', (39642, 39672), False, 'from indy import anoncreds, crypto, did, non_secrets, wallet\n'), ((19232, 19238), 'time.time', 'time', ([], {}), '()\n', (19236, 19238), False, 'from time import time\n')]
|
import unittest
import uuid
import threading
import pytest
from time import sleep
from azure.cosmos.http_constants import ResourceType
import azure.cosmos.cosmos_client as cosmos_client
import azure.cosmos.documents as documents
from azure.cosmos.request_object import _RequestObject
from azure.cosmos.location_cache import LocationCache
from azure.cosmos.global_endpoint_manager import _GlobalEndpointManager
import azure.cosmos.errors as errors
from azure.cosmos.http_constants import StatusCodes, SubStatusCodes, HttpHeaders
import azure.cosmos.retry_utility as retry_utility
import six
class RefreshThread(threading.Thread):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
if six.PY2:
super(RefreshThread, self).__init__(group=group, target=target,
name=name, verbose=verbose)
else:
super().__init__()
self.endpoint_manager = kwargs['endpoint_manager']
def run(self):
self.endpoint_manager.force_refresh(None)
@pytest.mark.usefixtures("teardown")
class LocationCacheTest(unittest.TestCase):
DEFAULT_ENDPOINT = "https://default.documents.azure.com"
LOCATION_1_ENDPOINT = "https://location1.documents.azure.com"
LOCATION_2_ENDPOINT = "https://location2.documents.azure.com"
LOCATION_3_ENDPOINT = "https://location3.documents.azure.com"
LOCATION_4_ENDPOINT = "https://location4.documents.azure.com"
REFRESH_TIME_INTERVAL_IN_MS = 1000
endpoint_by_location = {"location1": LOCATION_1_ENDPOINT,
"location2": LOCATION_2_ENDPOINT,
"location3": LOCATION_3_ENDPOINT,
"location4": LOCATION_4_ENDPOINT}
def mock_create_db_with_flag_enabled(self, url_connection = None):
self.database_account = self.create_database_account(True)
return self.database_account
def mock_create_db_with_flag_disabled(self, url_connection = None):
self.database_account = self.create_database_account(False)
return self.database_account
def create_spy_client(self, use_multiple_write_locations, enable_endpoint_discovery, is_preferred_locations_list_empty):
self.preferred_locations = ["location1", "location2", "location3", "location4"]
connectionPolicy = documents.ConnectionPolicy()
connectionPolicy.DisableSSLVerification = True
connectionPolicy.PreferredLocations = [] if is_preferred_locations_list_empty else self.preferred_locations
connectionPolicy.EnableEndpointDiscovery = enable_endpoint_discovery
connectionPolicy.UseMultipleWriteLocations = use_multiple_write_locations
client = cosmos_client.CosmosClient(self.DEFAULT_ENDPOINT, {'masterKey': "SomeKeyValue"}, connectionPolicy)
return client
def test_validate_retry_on_session_not_availabe_with_disable_multiple_write_locations_and_endpoint_discovery_disabled(self):
self.validate_retry_on_session_not_availabe_with_endpoint_discovery_disabled(False, False, False)
self.validate_retry_on_session_not_availabe_with_endpoint_discovery_disabled(False, False, True)
self.validate_retry_on_session_not_availabe_with_endpoint_discovery_disabled(False, True, False)
self.validate_retry_on_session_not_availabe_with_endpoint_discovery_disabled(False, True, True)
self.validate_retry_on_session_not_availabe_with_endpoint_discovery_disabled(True, False, False)
self.validate_retry_on_session_not_availabe_with_endpoint_discovery_disabled(True, False, True)
self.validate_retry_on_session_not_availabe_with_endpoint_discovery_disabled(True, True, False)
self.validate_retry_on_session_not_availabe_with_endpoint_discovery_disabled(True, True, True)
def validate_retry_on_session_not_availabe_with_endpoint_discovery_disabled(self, is_preferred_locations_list_empty, use_multiple_write_locations, is_read_request):
self.counter = 0
self.OriginalExecuteFunction = retry_utility._ExecuteFunction
retry_utility._ExecuteFunction = self._MockExecuteFunctionSessionReadFailureOnce
self.original_get_database_account = cosmos_client.CosmosClient.GetDatabaseAccount
cosmos_client.CosmosClient.GetDatabaseAccount = self.mock_create_db_with_flag_enabled if use_multiple_write_locations else self.mock_create_db_with_flag_disabled
enable_endpoint_discovery = False
client = self.create_spy_client(use_multiple_write_locations, enable_endpoint_discovery, is_preferred_locations_list_empty)
try:
if is_read_request:
client.ReadItem("dbs/mydb/colls/mycoll/docs/1")
else:
client.CreateItem("dbs/mydb/colls/mycoll/", {'id':'1'})
self.fail()
except errors.HTTPFailure as e:
# not retried
self.assertEqual(self.counter, 1)
self.counter = 0
self.assertEqual(e.status_code, StatusCodes.NOT_FOUND)
self.assertEqual(e.sub_status, SubStatusCodes.READ_SESSION_NOTAVAILABLE)
cosmos_client.CosmosClient.GetDatabaseAccount = self.original_get_database_account
retry_utility._ExecuteFunction = self.OriginalExecuteFunction
def _MockExecuteFunctionSessionReadFailureOnce(self, function, *args, **kwargs):
self.counter += 1
raise errors.HTTPFailure(StatusCodes.NOT_FOUND, "Read Session not available", {HttpHeaders.SubStatus: SubStatusCodes.READ_SESSION_NOTAVAILABLE})
def test_validate_retry_on_session_not_availabe_with_endpoint_discovery_enabled(self):
# sequence of chosen endpoints:
# 1. Single region, No Preferred Location:
# location1 (default) -> location1 (no preferred location, hence default)
# 2. Single Region, Preferred Locations present:
# location1 (1st preferred location) -> location1 (1st location in DBA's WriteLocation)
# 3. MultiRegion, Preferred Regions present:
# location1 (1st preferred location Read Location) -> location1 (1st location in DBA's WriteLocation) ->
# location2 (2nd preferred location Read Location)-> location4 (3rd preferred location Read Location)
#self.validate_retry_on_session_not_availabe(True, False)
#self.validate_retry_on_session_not_availabe(False, False)
self.validate_retry_on_session_not_availabe(False, True)
def validate_retry_on_session_not_availabe(self, is_preferred_locations_list_empty, use_multiple_write_locations):
self.counter = 0
self.OriginalExecuteFunction = retry_utility._ExecuteFunction
retry_utility._ExecuteFunction = self._MockExecuteFunctionSessionReadFailureTwice
self.original_get_database_account = cosmos_client.CosmosClient.GetDatabaseAccount
cosmos_client.CosmosClient.GetDatabaseAccount = self.mock_create_db_with_flag_enabled if use_multiple_write_locations else self.mock_create_db_with_flag_disabled
enable_endpoint_discovery = True
self.is_preferred_locations_list_empty = is_preferred_locations_list_empty
self.use_multiple_write_locations = use_multiple_write_locations
client = self.create_spy_client(use_multiple_write_locations, enable_endpoint_discovery, is_preferred_locations_list_empty)
try:
client.ReadItem("dbs/mydb/colls/mycoll/docs/1")
except errors.HTTPFailure as e:
# not retried
self.assertEqual(self.counter, 4 if use_multiple_write_locations else 2)
self.counter = 0
self.assertEqual(e.status_code, StatusCodes.NOT_FOUND)
self.assertEqual(e.sub_status, SubStatusCodes.READ_SESSION_NOTAVAILABLE)
cosmos_client.CosmosClient.GetDatabaseAccount = self.original_get_database_account
retry_utility._ExecuteFunction = self.OriginalExecuteFunction
def _MockExecuteFunctionSessionReadFailureTwice(self, function, *args, **kwargs):
request = args[1]
if self.counter == 0:
if not self.use_multiple_write_locations:
expected_endpoint = self.database_account.WritableLocations[0]['databaseAccountEndpoint'] if self.is_preferred_locations_list_empty else self.preferred_locations[0]
else:
expected_endpoint = self.endpoint_by_location[self.preferred_locations[0]]
self.assertFalse(request.should_clear_session_token_on_session_read_failure)
elif self.counter == 1:
expected_endpoint = self.database_account.WritableLocations[0]['databaseAccountEndpoint']
if not self.use_multiple_write_locations:
self.assertTrue(request.should_clear_session_token_on_session_read_failure)
else:
self.assertFalse(request.should_clear_session_token_on_session_read_failure)
elif self.counter == 2:
expected_endpoint = self.endpoint_by_location[self.preferred_locations[1]]
self.assertFalse(request.should_clear_session_token_on_session_read_failure)
elif self.counter == 3:
expected_endpoint = self.database_account.ReadableLocations[2]['databaseAccountEndpoint']
self.assertTrue(request.should_clear_session_token_on_session_read_failure)
self.assertEqual(expected_endpoint, request.location_endpoint_to_route)
self.counter += 1
raise errors.HTTPFailure(StatusCodes.NOT_FOUND, "Read Session not available", {HttpHeaders.SubStatus: SubStatusCodes.READ_SESSION_NOTAVAILABLE})
def test_validate_location_cache(self):
self.original_get_database_account = cosmos_client.CosmosClient.GetDatabaseAccount
cosmos_client.CosmosClient.GetDatabaseAccount = self.mock_get_database_account
self.get_database_account_hit_counter = 0
for i in range (0,8):
use_multiple_write_locations = (i & 1) > 0
endpoint_discovery_enabled = (i & 2) > 0
is_preferred_list_empty = (i & 4) > 0
self.validate_location_cache(use_multiple_write_locations, endpoint_discovery_enabled, is_preferred_list_empty)
cosmos_client.CosmosClient.GetDatabaseAccount = self.original_get_database_account
def test_validate_write_endpoint_order_with_client_side_disable_multiple_write_location(self):
self.original_get_database_account = cosmos_client.CosmosClient.GetDatabaseAccount
cosmos_client.CosmosClient.GetDatabaseAccount = self.mock_get_database_account
self.get_database_account_hit_counter = 0
self.initialize(False, True, False)
self.assertEqual(self.location_cache.get_write_endpoints()[0], self.LOCATION_1_ENDPOINT)
self.assertEqual(self.location_cache.get_write_endpoints()[1], self.LOCATION_2_ENDPOINT)
self.assertEqual(self.location_cache.get_write_endpoints()[2], self.LOCATION_3_ENDPOINT)
cosmos_client.CosmosClient.GetDatabaseAccount = self.original_get_database_account
def mock_get_database_account(self, url_connection = None):
self.get_database_account_hit_counter += 1
return self.create_database_account(True)
def create_database_account(self, use_multiple_write_locations):
database_account = documents.DatabaseAccount()
database_account._EnableMultipleWritableLocations = use_multiple_write_locations
database_account._WritableLocations = [
{'name': 'location1', 'databaseAccountEndpoint': self.LOCATION_1_ENDPOINT},
{'name': 'location2', 'databaseAccountEndpoint': self.LOCATION_2_ENDPOINT},
{'name': 'location3', 'databaseAccountEndpoint': self.LOCATION_3_ENDPOINT}
]
database_account._ReadableLocations = [
{'name': 'location1', 'databaseAccountEndpoint': self.LOCATION_1_ENDPOINT},
{'name': 'location2', 'databaseAccountEndpoint': self.LOCATION_2_ENDPOINT},
{'name': 'location4', 'databaseAccountEndpoint': self.LOCATION_4_ENDPOINT}
]
return database_account
def initialize(self, use_multiple_write_locations, enable_endpoint_discovery, is_preferred_locations_list_empty):
self.database_account = self.create_database_account(use_multiple_write_locations)
preferred_locations = ["location1", "location2", "location3"]
self.preferred_locations = [] if is_preferred_locations_list_empty else preferred_locations
self.location_cache = LocationCache(
self.preferred_locations,
self.DEFAULT_ENDPOINT,
enable_endpoint_discovery,
use_multiple_write_locations,
self.REFRESH_TIME_INTERVAL_IN_MS)
self.location_cache.perform_on_database_account_read(self.database_account)
connectionPolicy = documents.ConnectionPolicy()
connectionPolicy.PreferredLocations = self.preferred_locations
client = cosmos_client.CosmosClient("", {}, connectionPolicy)
self.global_endpoint_manager = client._global_endpoint_manager
def validate_location_cache(self, use_multiple_write_locations, endpoint_discovery_enabled, is_preferred_list_empty):
for write_location_index in range(0,3):
for read_location_index in range(0,2):
self.initialize(use_multiple_write_locations, endpoint_discovery_enabled, is_preferred_list_empty)
current_write_endpoints = self.location_cache.get_write_endpoints()
current_read_endpoints = self.location_cache.get_read_endpoints()
for i in range(0, read_location_index):
self.location_cache.mark_endpoint_unavailable_for_read(self.database_account.ReadableLocations[i]['databaseAccountEndpoint'])
self.global_endpoint_manager.mark_endpoint_unavailable_for_read(self.database_account.ReadableLocations[i]['databaseAccountEndpoint'])
for i in range(0, write_location_index):
self.location_cache.mark_endpoint_unavailable_for_write(self.database_account.WritableLocations[i]['databaseAccountEndpoint'])
self.global_endpoint_manager.mark_endpoint_unavailable_for_write(self.database_account.WritableLocations[i]['databaseAccountEndpoint'])
write_endpoint_by_location = {}
for dba_location in self.database_account._WritableLocations:
write_endpoint_by_location[dba_location['name']] = dba_location['databaseAccountEndpoint']
read_endpoint_by_location = {}
for dba_location in self.database_account._ReadableLocations:
read_endpoint_by_location[dba_location['name']] = dba_location['databaseAccountEndpoint']
available_write_endpoints = []
for i in range(write_location_index, len(self.preferred_locations)):
location = self.preferred_locations[i]
endpoint = write_endpoint_by_location[location] if location in write_endpoint_by_location else None
if endpoint:
available_write_endpoints.append(endpoint)
available_read_endpoints = []
for i in range(read_location_index, len(self.preferred_locations)):
location = self.preferred_locations[i]
endpoint = read_endpoint_by_location[location] if location in read_endpoint_by_location else None
if endpoint:
available_read_endpoints.append(endpoint)
self.validate_endpoint_refresh(use_multiple_write_locations, endpoint_discovery_enabled, available_write_endpoints, available_read_endpoints, write_location_index > 0)
self.validate_global_endpoint_location_cache_refresh()
self.validate_request_endpoint_resolution(use_multiple_write_locations, endpoint_discovery_enabled, available_write_endpoints, available_read_endpoints)
# wait for TTL on unavailablity info
sleep(1.5)
self.assertEquals(current_write_endpoints, self.location_cache.get_write_endpoints())
self.assertEquals(current_read_endpoints, self.location_cache.get_read_endpoints())
def validate_global_endpoint_location_cache_refresh(self):
self.get_database_account_hit_counter = 0
refresh_threads = []
for i in range(10):
refresh_thread = RefreshThread(kwargs={'endpoint_manager':self.global_endpoint_manager})
refresh_thread.start()
refresh_threads.append(refresh_thread)
for i in range(10):
refresh_threads[i].join()
self.assertTrue(self.get_database_account_hit_counter <= 1)
for i in range(10):
refresh_thread = RefreshThread(kwargs={'endpoint_manager':self.global_endpoint_manager})
refresh_thread.start()
refresh_thread.join()
self.assertTrue(self.get_database_account_hit_counter <= 1)
def validate_endpoint_refresh(self, use_multiple_write_locations, endpoint_discovery_enabled, preferred_available_write_endpoints,
preferred_available_read_endpoints, is_first_write_endpoint_unavailable):
should_refresh_endpoints = self.location_cache.should_refresh_endpoints()
is_most_preferred_location_unavailable_for_read = False
is_most_preferred_location_unavailable_for_write = False if use_multiple_write_locations else is_first_write_endpoint_unavailable
if (len(self.preferred_locations) > 0):
most_preferred_read_location_name = None
for preferred_location in self.preferred_locations:
for read_location in self.database_account._ReadableLocations:
if read_location['name'] == preferred_location:
most_preferred_read_location_name = preferred_location
break
if most_preferred_read_location_name:
break
most_preferred_read_endpoint = self.endpoint_by_location[most_preferred_read_location_name]
is_most_preferred_location_unavailable_for_read = True if len(preferred_available_read_endpoints) == 0 else preferred_available_read_endpoints[0] != most_preferred_read_endpoint
most_preferred_write_location_name = None
for preferred_location in self.preferred_locations:
for write_location in self.database_account._WritableLocations:
if write_location['name'] == preferred_location:
most_preferred_write_location_name = preferred_location
break
if most_preferred_write_location_name:
break
most_preferred_write_endpoint = self.endpoint_by_location[most_preferred_write_location_name]
if use_multiple_write_locations:
is_most_preferred_location_unavailable_for_write = True if len(preferred_available_write_endpoints) == 0 else preferred_available_write_endpoints[0] != most_preferred_write_endpoint
if not endpoint_discovery_enabled:
self.assertFalse(should_refresh_endpoints)
else:
self.assertEquals(is_most_preferred_location_unavailable_for_read or is_most_preferred_location_unavailable_for_write, should_refresh_endpoints)
def validate_request_endpoint_resolution(self, use_multiple_write_locations, endpoint_discovery_enabled,
available_write_endpoints, available_read_endpoints):
write_locations = self.database_account._WritableLocations
if not endpoint_discovery_enabled:
first_available_write_endpoint = self.DEFAULT_ENDPOINT
second_available_write_endpoint = self.DEFAULT_ENDPOINT
elif not use_multiple_write_locations:
first_available_write_endpoint = write_locations[0]['databaseAccountEndpoint']
second_available_write_endpoint = write_locations[1]['databaseAccountEndpoint']
elif len(available_write_endpoints) > 1:
first_available_write_endpoint = available_write_endpoints[0]
second_available_write_endpoint = available_write_endpoints[1]
elif len(available_write_endpoints) > 0:
first_available_write_endpoint = available_write_endpoints[0]
write_endpoint = write_locations[0]['databaseAccountEndpoint']
second_available_write_endpoint = write_endpoint if write_endpoint != first_available_write_endpoint else available_write_endpoints[1]
else:
first_available_write_endpoint = self.DEFAULT_ENDPOINT
second_available_write_endpoint = self.DEFAULT_ENDPOINT
if not endpoint_discovery_enabled:
first_available_read_endpoint = self.DEFAULT_ENDPOINT
elif len(self.preferred_locations) == 0:
first_available_read_endpoint = first_available_write_endpoint
elif len(available_read_endpoints) > 0:
first_available_read_endpoint = available_read_endpoints[0]
else:
first_available_read_endpoint = self.endpoint_by_location[self.preferred_locations[0]]
first_write_enpoint = self.DEFAULT_ENDPOINT if not endpoint_discovery_enabled else self.database_account.WritableLocations[0]['databaseAccountEndpoint']
second_write_enpoint = self.DEFAULT_ENDPOINT if not endpoint_discovery_enabled else self.database_account.WritableLocations[1]['databaseAccountEndpoint']
# If current write endpoint is unavailable, write endpoints order doesn't change
# All write requests flip-flop between current write and alternate write endpoint
write_endpoints = self.location_cache.get_write_endpoints()
self.assertTrue(first_available_write_endpoint == write_endpoints[0])
self.assertTrue(second_available_write_endpoint == self.resolve_endpoint_for_write_request(ResourceType.Document, True))
self.assertTrue(first_available_write_endpoint == self.resolve_endpoint_for_write_request(ResourceType.Document, False))
# Writes to other resource types should be directed to first/second write endpoint
self.assertTrue(first_write_enpoint == self.resolve_endpoint_for_write_request(ResourceType.Database, False))
self.assertTrue(second_write_enpoint == self.resolve_endpoint_for_write_request(ResourceType.Database, True))
# Reads should be directed to available read endpoints regardless of resource type
self.assertTrue(first_available_read_endpoint == self.resolve_endpoint_for_read_request(True))
self.assertTrue(first_available_read_endpoint == self.resolve_endpoint_for_read_request(False))
def resolve_endpoint_for_read_request(self, master_resource_type):
operation_type = documents._OperationType.Read
resource_type = ResourceType.Database if master_resource_type else ResourceType.Document
request = _RequestObject(resource_type, operation_type)
return self.location_cache.resolve_service_endpoint(request)
def resolve_endpoint_for_write_request(self, resource_type, use_alternate_write_endpoint):
operation_type = documents._OperationType.Create
request = _RequestObject(resource_type, operation_type)
request.route_to_location_with_preferred_location_flag(1 if use_alternate_write_endpoint else 0, ResourceType.IsCollectionChild(resource_type))
return self.location_cache.resolve_service_endpoint(request)
|
[
"azure.cosmos.errors.HTTPFailure",
"azure.cosmos.http_constants.ResourceType.IsCollectionChild",
"azure.cosmos.cosmos_client.CosmosClient",
"azure.cosmos.request_object._RequestObject",
"azure.cosmos.documents.ConnectionPolicy",
"time.sleep",
"azure.cosmos.location_cache.LocationCache",
"pytest.mark.usefixtures",
"azure.cosmos.documents.DatabaseAccount"
] |
[((1081, 1116), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""teardown"""'], {}), "('teardown')\n", (1104, 1116), False, 'import pytest\n'), ((2369, 2397), 'azure.cosmos.documents.ConnectionPolicy', 'documents.ConnectionPolicy', ([], {}), '()\n', (2395, 2397), True, 'import azure.cosmos.documents as documents\n'), ((2746, 2848), 'azure.cosmos.cosmos_client.CosmosClient', 'cosmos_client.CosmosClient', (['self.DEFAULT_ENDPOINT', "{'masterKey': 'SomeKeyValue'}", 'connectionPolicy'], {}), "(self.DEFAULT_ENDPOINT, {'masterKey':\n 'SomeKeyValue'}, connectionPolicy)\n", (2772, 2848), True, 'import azure.cosmos.cosmos_client as cosmos_client\n'), ((5435, 5578), 'azure.cosmos.errors.HTTPFailure', 'errors.HTTPFailure', (['StatusCodes.NOT_FOUND', '"""Read Session not available"""', '{HttpHeaders.SubStatus: SubStatusCodes.READ_SESSION_NOTAVAILABLE}'], {}), "(StatusCodes.NOT_FOUND, 'Read Session not available', {\n HttpHeaders.SubStatus: SubStatusCodes.READ_SESSION_NOTAVAILABLE})\n", (5453, 5578), True, 'import azure.cosmos.errors as errors\n'), ((9489, 9632), 'azure.cosmos.errors.HTTPFailure', 'errors.HTTPFailure', (['StatusCodes.NOT_FOUND', '"""Read Session not available"""', '{HttpHeaders.SubStatus: SubStatusCodes.READ_SESSION_NOTAVAILABLE}'], {}), "(StatusCodes.NOT_FOUND, 'Read Session not available', {\n HttpHeaders.SubStatus: SubStatusCodes.READ_SESSION_NOTAVAILABLE})\n", (9507, 9632), True, 'import azure.cosmos.errors as errors\n'), ((11321, 11348), 'azure.cosmos.documents.DatabaseAccount', 'documents.DatabaseAccount', ([], {}), '()\n', (11346, 11348), True, 'import azure.cosmos.documents as documents\n'), ((12594, 12756), 'azure.cosmos.location_cache.LocationCache', 'LocationCache', (['self.preferred_locations', 'self.DEFAULT_ENDPOINT', 'enable_endpoint_discovery', 'use_multiple_write_locations', 'self.REFRESH_TIME_INTERVAL_IN_MS'], {}), '(self.preferred_locations, self.DEFAULT_ENDPOINT,\n enable_endpoint_discovery, use_multiple_write_locations, self.\n REFRESH_TIME_INTERVAL_IN_MS)\n', (12607, 12756), False, 'from azure.cosmos.location_cache import LocationCache\n'), ((12940, 12968), 'azure.cosmos.documents.ConnectionPolicy', 'documents.ConnectionPolicy', ([], {}), '()\n', (12966, 12968), True, 'import azure.cosmos.documents as documents\n'), ((13057, 13109), 'azure.cosmos.cosmos_client.CosmosClient', 'cosmos_client.CosmosClient', (['""""""', '{}', 'connectionPolicy'], {}), "('', {}, connectionPolicy)\n", (13083, 13109), True, 'import azure.cosmos.cosmos_client as cosmos_client\n'), ((23203, 23248), 'azure.cosmos.request_object._RequestObject', '_RequestObject', (['resource_type', 'operation_type'], {}), '(resource_type, operation_type)\n', (23217, 23248), False, 'from azure.cosmos.request_object import _RequestObject\n'), ((23489, 23534), 'azure.cosmos.request_object._RequestObject', '_RequestObject', (['resource_type', 'operation_type'], {}), '(resource_type, operation_type)\n', (23503, 23534), False, 'from azure.cosmos.request_object import _RequestObject\n'), ((23640, 23685), 'azure.cosmos.http_constants.ResourceType.IsCollectionChild', 'ResourceType.IsCollectionChild', (['resource_type'], {}), '(resource_type)\n', (23670, 23685), False, 'from azure.cosmos.http_constants import ResourceType\n'), ((16192, 16202), 'time.sleep', 'sleep', (['(1.5)'], {}), '(1.5)\n', (16197, 16202), False, 'from time import sleep\n')]
|
from uuid import uuid4
from config import db
class Event(db.Model):
__tablename__ = 'event'
id = db.Column(db.String(32), primary_key=True)
username = db.Column(db.String(20))
description = db.Column(db.String(250))
start = db.Column(db.DateTime)
end = db.Column(db.DateTime)
def __init__(self, username, description, start, end):
self.id = uuid4().hex
self.username = username
self.description = description
self.start = start
self.end = end
|
[
"config.db.String",
"uuid.uuid4",
"config.db.Column"
] |
[((247, 269), 'config.db.Column', 'db.Column', (['db.DateTime'], {}), '(db.DateTime)\n', (256, 269), False, 'from config import db\n'), ((280, 302), 'config.db.Column', 'db.Column', (['db.DateTime'], {}), '(db.DateTime)\n', (289, 302), False, 'from config import db\n'), ((118, 131), 'config.db.String', 'db.String', (['(32)'], {}), '(32)\n', (127, 131), False, 'from config import db\n'), ((176, 189), 'config.db.String', 'db.String', (['(20)'], {}), '(20)\n', (185, 189), False, 'from config import db\n'), ((219, 233), 'config.db.String', 'db.String', (['(250)'], {}), '(250)\n', (228, 233), False, 'from config import db\n'), ((381, 388), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (386, 388), False, 'from uuid import uuid4\n')]
|
################################################################################
# Module: idf.py
# Description: Various functions for processing of EnergyPlus models and
# retrieving results in different forms
# License: MIT, see full license in LICENSE.txt
# Web: https://github.com/samuelduchesne/archetypal
################################################################################
import itertools
import logging as lg
import os
import re
import sqlite3
import subprocess
import time
import warnings
from collections import defaultdict
from io import StringIO
from itertools import chain
from math import isclose
from tempfile import TemporaryDirectory
import eppy
import pandas as pd
from eppy.bunch_subclass import BadEPFieldError
from eppy.easyopen import getiddfile
from eppy.modeleditor import IDDNotSetError, namebunch, newrawobject
from geomeppy import IDF as geomIDF
from geomeppy.patches import EpBunch, idfreader1, obj2bunch
from pandas.errors import ParserError
from path import Path
from tqdm import tqdm
from archetypal import ReportData, log, settings
from archetypal.energypandas import EnergySeries
from archetypal.eplus_interface.basement import BasementThread
from archetypal.eplus_interface.energy_plus import EnergyPlusThread
from archetypal.eplus_interface.exceptions import (
EnergyPlusProcessError,
EnergyPlusVersionError,
EnergyPlusWeatherError,
)
from archetypal.eplus_interface.expand_objects import ExpandObjectsThread
from archetypal.eplus_interface.slab import SlabThread
from archetypal.eplus_interface.transition import TransitionThread
from archetypal.eplus_interface.version import (
EnergyPlusVersion,
get_eplus_dirs,
)
from archetypal.idfclass.meters import Meters
from archetypal.idfclass.outputs import Outputs
from archetypal.idfclass.util import get_idf_version, hash_model
from archetypal.idfclass.reports import get_report
from archetypal.idfclass.variables import Variables
from archetypal.schedule import Schedule
class IDF(geomIDF):
"""Class for loading and parsing idf models and running simulations and
retrieving results.
Wrapper over the geomeppy.IDF class and subsequently the
eppy.modeleditor.IDF class
"""
# dependencies: dict of <dependant value: independent value>
_dependencies = {
"iddname": ["idfname", "as_version"],
"file_version": ["idfname"],
"idd_info": ["iddname", "idfname"],
"idd_index": ["iddname", "idfname"],
"idd_version": ["iddname", "idfname"],
"idfobjects": ["iddname", "idfname"],
"block": ["iddname", "idfname"],
"model": ["iddname", "idfname"],
"sql": [
"as_version",
"annual",
"design_day",
"epw",
"idfname",
"tmp_dir",
],
"htm": [
"as_version",
"annual",
"design_day",
"epw",
"idfname",
"tmp_dir",
],
"meters": [
"idfobjects",
"epw",
"annual",
"design_day",
"readvars",
"as_version",
],
"variables": [
"idfobjects",
"epw",
"annual",
"design_day",
"readvars",
"as_version",
],
"sim_id": [
"idfobjects",
"epw",
"annual",
"design_day",
"readvars",
"as_version",
],
"schedules_dict": ["idfobjects"],
"partition_ratio": ["idfobjects"],
"net_conditioned_building_area": ["idfobjects"],
"energyplus_its": ["annual", "design_day"],
"tmp_dir": ["idfobjects"],
}
_independant_vars = set(chain(*list(_dependencies.values())))
_dependant_vars = set(_dependencies.keys())
_initial_postition = itertools.count(start=1)
def _reset_dependant_vars(self, name):
_reverse_dependencies = {}
for k, v in self._dependencies.items():
for x in v:
_reverse_dependencies.setdefault(x, []).append(k)
for var in _reverse_dependencies[name]:
super().__setattr__(f"_{var}", None)
def __setattr__(self, key, value):
propobj = getattr(self.__class__, key, None)
if isinstance(propobj, property):
if propobj.fset is None:
raise AttributeError("Cannot set attribute")
# self.__set_on_dependencies(key.strip("_"), value)
else:
propobj.fset(self, value)
self.__set_on_dependencies(key, value)
else:
self.__set_on_dependencies(key, value)
def __set_on_dependencies(self, key, value):
if key in self._dependant_vars:
raise AttributeError("Cannot set this value.")
if key in self._independant_vars:
self._reset_dependant_vars(key)
key = f"_{key}"
super(IDF, self).__setattr__(key, value)
def __init__(
self,
idfname=None,
epw=None,
as_version=settings.ep_version,
annual=False,
design_day=False,
expandobjects=False,
convert=False,
verbose=settings.log_console,
readvars=True,
prep_outputs=True,
include=None,
custom_processes=None,
output_suffix="L",
epmacro=False,
keep_data=True,
keep_data_err=False,
position=0,
**kwargs,
):
"""
Args:
idfname (str _TemporaryFileWrapper): The idf model filename.
epw (str or Path): The weather-file
EnergyPlus args:
tmp_dir=None,
as_version=None,
prep_outputs=True,
include=None,
keep_original=True,
"""
# Set independents to there original values
if include is None:
include = []
self.idfname = idfname
self.epw = epw
self.as_version = as_version if as_version else settings.ep_version
self._custom_processes = custom_processes
self._include = include
self.keep_data_err = keep_data_err
self._keep_data = keep_data
self.output_suffix = output_suffix
self.verbose = verbose
self.readvars = readvars
self.expandobjects = expandobjects
self.convert = convert
self.epmacro = epmacro
self.design_day = design_day
self.annual = annual
self.prep_outputs = prep_outputs
self._position = position
self.output_prefix = None
# Set dependants to None
self._output_directory = None
self._file_version = None
self._iddname = None
self._idd_info = None
self._idd_index = None
self._idd_version = None
self._idfobjects = None
self._block = None
self._model = None
self._sql = None
self._sql_file = None
self._htm = None
self._original_ep_version = None
self._schedules_dict = None
self._outputs = None
self._partition_ratio = None
self._area_conditioned = None
self._area_unconditioned = None
self._area_total = None
self._schedules = None
self._meters = None
self._variables = None
self._energyplus_its = 0
self._sim_id = None
self.load_kwargs = dict(epw=epw, **kwargs)
self.outputtype = "standard"
self.original_idfname = self.idfname # Save original
self._original_cache = hash_model(self)
# Move to tmp_dir, if we want to keep the original file intact.
if settings.use_cache:
previous_file = self.output_directory / (self.name or str(self.idfname))
if previous_file.exists():
# We have a transitioned or cached file here; Load this one.
cache_file_version = EnergyPlusVersion(get_idf_version(previous_file))
if cache_file_version <= self.as_version:
self.idfname = previous_file
else:
if not isinstance(self.idfname, StringIO):
self.output_directory.makedirs_p()
self.idfname = self.savecopy(self.output_directory / self.name)
try:
# load the idf object by asserting self.idd_info
assert self.idd_info
except Exception as e:
raise e
else:
if self.file_version < self.as_version:
self.upgrade(to_version=self.as_version)
finally:
# Set model outputs
self._outputs = Outputs(idf=self)
if self.prep_outputs:
(
self._outputs.add_basics()
.add_umi_template_outputs()
.add_custom(outputs=self.prep_outputs)
.add_profile_gas_elect_ouputs()
.apply()
)
def __str__(self):
"""Returns name of IDF model."""
return self.name
def setiddname(self, iddname, testing=False):
"""Set the path to the EnergyPlus IDD for the version of EnergyPlus
which is to be used by eppy.
Args:
iddname (str): Path to the IDD file.
testing (bool):
"""
self.iddname = iddname
self.idd_info = None
self.block = None
def read(self):
"""Read the IDF file and the IDD file.
If the IDD file had already been read, it will not be read again.
Read populates the following data structures:
- idfobjects : list
- model : list
- idd_info : list
- idd_index : dict
"""
if self.getiddname() is None:
errortxt = (
"IDD file needed to read the idf file. "
"Set it using IDF.setiddname(iddfile)"
)
raise IDDNotSetError(errortxt)
readout = idfreader1(
self.idfname, self.iddname, self, commdct=self.idd_info, block=self.block
)
(self.idfobjects, block, self.model, idd_info, idd_index, idd_version) = readout
self.setidd(idd_info, idd_index, block, idd_version)
def getiddname(self):
"""Get the name of the current IDD used by eppy."""
return self.iddname
def setidd(self, iddinfo, iddindex, block, idd_version):
"""Set the IDD to be used by eppy.
Args:
iddinfo (list): Comments and metadata about fields in the IDD.
block (list): Field names in the IDD.
"""
self.idd_info = iddinfo
self.block = block
self.idd_index = iddindex
self.idd_version = idd_version
@property
def block(self):
"""EnergyPlus field ID names of the IDF from the IDD."""
if self._block is None:
bunchdt, block, data, commdct, idd_index, versiontuple = idfreader1(
self.idfname, self.iddname, self, commdct=None, block=None
)
self._block = block
self._idd_info = commdct
self._idd_index = idd_index
self._idfobjects = bunchdt
self._model = data
self._idd_version = versiontuple
return self._block
@property
def idd_info(self):
"""Descriptions of IDF fields from the IDD."""
if self._idd_info is None:
bunchdt, block, data, commdct, idd_index, versiontuple = idfreader1(
self.idfname, self.iddname, self, commdct=None, block=None
)
self._block = block
self._idd_info = commdct
self._idd_index = idd_index
self._idfobjects = bunchdt
self._model = data
self._idd_version = versiontuple
return self._idd_info
@property
def idd_index(self):
"""A pair of dicts used for fast lookups of names of groups of objects."""
if self._idd_index is None:
bunchdt, block, data, commdct, idd_index, versiontuple = idfreader1(
self.idfname, self.iddname, self, commdct=None, block=None
)
self._block = block
self._idd_info = commdct
self._idd_index = idd_index
self._idfobjects = bunchdt
self._model = data
self._idd_version = versiontuple
return self._idd_index
@property
def idfobjects(self):
"""Dict of lists of idf_MSequence objects in the IDF."""
if self._idfobjects is None:
bunchdt, block, data, commdct, idd_index, versiontuple = idfreader1(
self.idfname, self.iddname, self, commdct=None, block=None
)
self._block = block
self._idd_info = commdct
self._idd_index = idd_index
self._idfobjects = bunchdt
self._model = data
self._idd_version = versiontuple
return self._idfobjects
@property
def model(self):
"""Eplusdata object containing representions of IDF objects."""
if self._model is None:
bunchdt, block, data, commdct, idd_index, versiontuple = idfreader1(
self.idfname, self.iddname, self, commdct=None, block=None
)
self._block = block
self._idd_info = commdct
self._idd_index = idd_index
self._idfobjects = bunchdt
self._model = data
self._idd_version = versiontuple
return self._model
@property
def idd_version(self):
"""tuple: The version of the iddname."""
if self._idd_version is None:
bunchdt, block, data, commdct, idd_index, versiontuple = idfreader1(
self.idfname, self.iddname, self, commdct=None, block=None
)
self._block = block
self._idd_info = commdct
self._idd_index = idd_index
self._idfobjects = bunchdt
self._model = data
self._idd_version = versiontuple
return self._idd_version
@property
def iddname(self):
"""Path: The iddname used to parse the idf model."""
if self._iddname is None:
if self.file_version > self.as_version:
raise EnergyPlusVersionError(
f"{self.as_version} cannot be lower then "
f"the version number set in the file: {self.file_version}"
)
idd_filename = Path(getiddfile(str(self.file_version))).expand()
if not idd_filename.exists():
# Try finding the one in IDFVersionsUpdater
idd_filename = (
self.idfversionupdater_dir / f"V"
f"{self.file_version.dash}-Energy+.idd"
).expand()
self._iddname = idd_filename
return self._iddname
@property
def file_version(self):
"""The :class:`EnergyPlusVersion` of the idf text file."""
if self._file_version is None:
return EnergyPlusVersion(get_idf_version(self.idfname))
@property
def custom_processes(self):
"""list: List of callables. Called on the output files."""
return self._custom_processes
@property
def include(self):
"""list: List of external files."""
return self._include
@property
def keep_data_err(self):
"""bool: If True, error files are copied back into self.output_folder"""
return self._keep_data_err
@keep_data_err.setter
def keep_data_err(self, value):
if not isinstance(value, bool):
raise TypeError("'keep_data_err' needs to be a bool")
self._keep_data_err = value
@property
def keep_data(self):
return self._keep_data
# region User-Defined Properties (have setter)
@property
def output_suffix(self):
"""Suffix style for output file names (default: L)
- L: Legacy (e.g., eplustbl.csv)
- C: Capital (e.g., eplusTable.csv)
- D: Dash (e.g., eplus-table.csv)
"""
return self._output_suffix
@output_suffix.setter
def output_suffix(self, value):
choices = ["L", "C", "D"]
if value not in choices:
raise ValueError(f"Choices of 'output_suffix' are {choices}")
self._output_suffix = value
@property
def idfname(self):
"""The path of the active (parsed) idf model.
If `settings.use_cache == True`, then this path will point to
`settings.cache_folder`. See :meth:`~archetypal.utils.config`
"""
if self._idfname is None:
idfname = StringIO(f"VERSION, {self.as_version};")
idfname.seek(0)
self._idfname = idfname
else:
if isinstance(self._idfname, StringIO):
self._idfname.seek(0)
else:
self._idfname = Path(self._idfname).expand()
return self._idfname
@idfname.setter
def idfname(self, value):
if value:
self._idfname = Path(value).expand()
else:
self._idfname = None
@property
def epw(self):
"""The weather file path."""
if self._epw is not None:
return Path(self._epw).expand()
@epw.setter
def epw(self, value):
if value:
self._epw = Path(value).expand()
else:
self._epw = None
@property
def verbose(self):
"""bool: If True, print outputs to logging module.
See Also:
:ref:`archetypal.utils.config`
"""
return self._verbose
@verbose.setter
def verbose(self, value):
if not isinstance(value, bool):
raise TypeError("'verbose' needs to be a bool")
self._verbose = value
@property
def expandobjects(self):
"""bool: If True, run ExpandObjects prior to simulation."""
return self._expandobjects
@expandobjects.setter
def expandobjects(self, value):
if not isinstance(value, bool):
raise TypeError("'expandobjects' needs to be a bool")
self._expandobjects = value
@property
def readvars(self):
"""bool: If True, run ReadVarsESO after simulation."""
return self._readvars
@readvars.setter
def readvars(self, value):
if not isinstance(value, bool):
raise TypeError("'readvars' needs to be a bool")
self._readvars = value
@property
def epmacro(self):
"""bool: If True, run EPMacro prior to simulation."""
return self._epmacro
@epmacro.setter
def epmacro(self, value):
if not isinstance(value, bool):
raise TypeError("'epmacro' needs to be a bool")
self._epmacro = value
@property
def design_day(self):
"""bool: If True, force design-day-only simulation."""
return self._design_day
@design_day.setter
def design_day(self, value):
if not isinstance(value, bool):
raise TypeError("'design_day' needs to be a bool")
self._design_day = value
@property
def annual(self):
"""bool: If True, force annual simulation."""
return self._annual
@annual.setter
def annual(self, value):
if not isinstance(value, bool):
raise TypeError("'annual' needs to be a bool")
self._annual = value
@property
def convert(self):
"""bool: If True, only convert IDF->epJSON or epJSON->IDF.
Dependent on input file type. No simulation.
"""
return self._convert
@convert.setter
def convert(self, value):
if not isinstance(value, bool):
raise TypeError("'convert' needs to be a bool")
self._convert = value
@property
def prep_outputs(self):
"""Bool or set list of custom outputs"""
return self._prep_outputs
@prep_outputs.setter
def prep_outputs(self, value):
self._prep_outputs = value
@property
def as_version(self):
"""Specify the desired :class:`EnergyPlusVersion` for the IDF model."""
if self._as_version is None:
self._as_version = EnergyPlusVersion.current()
return EnergyPlusVersion(self._as_version)
@as_version.setter
def as_version(self, value):
# Parse value and check if above or bellow
self._as_version = EnergyPlusVersion(value)
@property
def output_directory(self):
"""The output directory based on the hashing of the original file.
Notes:
The hashing is performed before transitions or modifications.
"""
if self._output_directory is None:
cache_filename = self._original_cache
output_directory = settings.cache_folder / cache_filename
output_directory.makedirs_p()
self._output_directory = output_directory.expand()
return Path(self._output_directory)
@output_directory.setter
def output_directory(self, value):
if value and not Path(value).exists():
raise ValueError(
f"The tmp_dir '{value}' must be created before being assigned"
)
elif value:
value = Path(value)
self._output_directory = value
@property
def output_prefix(self):
"""Prefix for output file names (default: eplus)."""
if self._output_prefix is None:
self._output_prefix = "eplus"
return self._output_prefix
@output_prefix.setter
def output_prefix(self, value):
if value and not isinstance(value, str):
raise TypeError("'output_prefix' needs to be a string")
self._output_prefix = value
@property
def sim_id(self):
"""The unique Id of the simulation.
Based on a subset of hashed variables:
- The idf model itself.
- epw
- annual
- design_day
- readvars
- as_version
"""
if self._sim_id is None:
self._sim_id = hash_model(
self,
epw=self.epw,
annual=self.annual,
design_day=self.design_day,
readvars=self.readvars,
ep_version=self.as_version,
include=self.include,
)
return self._sim_id
@sim_id.setter
def sim_id(self, value):
if value and not isinstance(value, str):
raise TypeError("'output_prefix' needs to be a string")
self._sim_id = value
# endregion
@property
def position(self):
return self._position
@property
def idfversionupdater_dir(self):
return (
get_eplus_dirs(settings.ep_version) / "PreProcess" / "IDFVersionUpdater"
).expand()
@property
def idf_version(self):
return self.file_version
@property
def name(self):
if isinstance(self.idfname, StringIO):
return None
return self.idfname.basename()
def sql(self):
"""Get the sql table report"""
if self._sql is None:
try:
sql_dict = get_report(
self.idfname,
self.simulation_dir,
output_report="sql",
output_prefix=self.output_prefix,
)
except FileNotFoundError:
# check if htm output is in file
sql_object = self.anidfobject(
key="Output:SQLite".upper(), Option_Type="SimpleAndTabular"
)
if sql_object not in self.idfobjects["Output:SQLite".upper()]:
self.addidfobject(sql_object)
return self.simulate().sql()
except Exception as e:
raise e
else:
self._sql = sql_dict
return self._sql
def htm(self):
"""Get the htm table report"""
if self._htm is None:
try:
htm_dict = get_report(
self.idfname,
self.simulation_dir,
output_report="htm",
output_prefix=self.output_prefix,
)
except FileNotFoundError:
return self.simulate().htm()
else:
self._htm = htm_dict
return self._htm
@property
def energyplus_its(self):
"""Number of iterations needed to complete simulation"""
if self._energyplus_its is None:
self._energyplus_its = 0
return self._energyplus_its
def open_htm(self):
"""Open .htm file in browser"""
import webbrowser
html, *_ = self.simulation_dir.files("*.htm")
webbrowser.open(html.abspath())
def open_idf(self):
"""Open .idf file in Ep-Launch"""
self.save()
filepath = self.idfname
import os
import platform
import subprocess
if platform.system() == "Darwin": # macOS
subprocess.call(("open", filepath))
elif platform.system() == "Windows": # Windows
os.startfile(filepath)
else: # linux variants
subprocess.call(("xdg-open", filepath))
def open_last_simulation(self):
"""Open last simulation in Ep-Launch"""
filepath, *_ = self.simulation_dir.files("*.idf")
import os
import platform
import subprocess
if platform.system() == "Darwin": # macOS
subprocess.call(("open", filepath))
elif platform.system() == "Windows": # Windows
os.startfile(filepath)
else: # linux variants
subprocess.call(("xdg-open", filepath))
def open_mdd(self):
"""Open .mdd file in browser. This file shows all the report meters along
with their “availability” for the current input file"""
import webbrowser
mdd, *_ = self.simulation_dir.files("*.mdd")
webbrowser.open(mdd.abspath())
def open_mtd(self):
"""Open .mtd file in browser. This file contains the “meter details” for the
run. This shows what report variables are on which meters and vice versa –
which meters contain what report variables."""
import webbrowser
mtd, *_ = self.simulation_dir.files("*.mtd")
webbrowser.open(mtd.abspath())
@property
def sql_file(self):
"""Get the sql file path"""
try:
file, *_ = self.simulation_dir.files("*out.sql")
except (FileNotFoundError, ValueError):
return self.simulate().sql_file
return file.expand()
@property
def net_conditioned_building_area(self):
"""Returns the total conditioned area of a building (taking into account
zone multipliers)
"""
if self._area_conditioned is None:
if self.simulation_dir.exists():
with sqlite3.connect(self.sql_file) as conn:
sql_query = f"""
SELECT t.Value
FROM TabularDataWithStrings t
WHERE TableName == 'Building Area' and ColumnName == 'Area' and RowName == 'Net Conditioned Building Area';"""
(res,) = conn.execute(sql_query).fetchone()
self._area_conditioned = float(res)
else:
area = 0
zones = self.idfobjects["ZONE"]
zone: EpBunch
for zone in zones:
for surface in zone.zonesurfaces:
if hasattr(surface, "tilt"):
if surface.tilt == 180.0:
part_of = int(
zone.Part_of_Total_Floor_Area.upper() != "NO"
)
multiplier = float(
zone.Multiplier if zone.Multiplier != "" else 1
)
area += surface.area * multiplier * part_of
self._area_conditioned = area
return self._area_conditioned
@property
def unconditioned_building_area(self):
"""Returns the Unconditioned Building Area"""
if self._area_unconditioned is None:
if self.simulation_dir.exists():
with sqlite3.connect(self.sql_file) as conn:
sql_query = f"""
SELECT t.Value
FROM TabularDataWithStrings t
WHERE TableName == 'Building Area' and
ColumnName == 'Area' and RowName == 'Unconditioned Building Area';"""
(res,) = conn.execute(sql_query).fetchone()
self._area_unconditioned = float(res)
else:
area = 0
zones = self.idfobjects["ZONE"]
zone: EpBunch
for zone in zones:
for surface in zone.zonesurfaces:
if hasattr(surface, "tilt"):
if surface.tilt == 180.0:
part_of = int(
zone.Part_of_Total_Floor_Area.upper() == "NO"
)
multiplier = float(
zone.Multiplier if zone.Multiplier != "" else 1
)
area += surface.area * multiplier * part_of
self._area_unconditioned = area
return self._area_unconditioned
@property
def total_building_area(self):
""""""
if self._area_total is None:
if self.simulation_dir.exists():
with sqlite3.connect(self.sql_file) as conn:
sql_query = f"""
SELECT t.Value
FROM TabularDataWithStrings t
WHERE TableName == 'Building Area' and
ColumnName == 'Area' and RowName == 'Total Building Area';"""
(res,) = conn.execute(sql_query).fetchone()
self._area_total = float(res)
else:
area = 0
zones = self.idfobjects["ZONE"]
zone: EpBunch
for zone in zones:
for surface in zone.zonesurfaces:
if hasattr(surface, "tilt"):
if surface.tilt == 180.0:
multiplier = float(
zone.Multiplier if zone.Multiplier != "" else 1
)
area += surface.area * multiplier
self._area_total = area
return self._area_total
@property
def partition_ratio(self):
"""The number of lineal meters of partitions (Floor to ceiling) present
in average in the building floor plan by m2.
"""
if self._partition_ratio is None:
partition_lineal = 0
zones = self.idfobjects["ZONE"]
zone: EpBunch
for zone in zones:
for surface in [
surf
for surf in zone.zonesurfaces
if surf.key.upper() not in ["INTERNALMASS", "WINDOWSHADINGCONTROL"]
]:
if hasattr(surface, "tilt"):
if (
surface.tilt == 90.0
and surface.Outside_Boundary_Condition != "Outdoors"
):
multiplier = float(
zone.Multiplier if zone.Multiplier != "" else 1
)
partition_lineal += surface.width * multiplier
self._partition_ratio = (
partition_lineal / self.net_conditioned_building_area
)
return self._partition_ratio
@property
def simulation_files(self):
try:
return self.simulation_dir.files()
except FileNotFoundError:
return []
@property
def simulation_dir(self):
"""The path where simulation results are stored"""
try:
return (self.output_directory / self.sim_id).expand()
except AttributeError:
return Path()
@property
def schedules_dict(self):
if self._schedules_dict is None:
self._schedules_dict = self.get_all_schedules()
return self._schedules_dict
@property
def schedules(self):
if self._schedules is None:
schedules = {}
for schd in self.schedules_dict:
schedules[schd] = Schedule(Name=schd, idf=self)
self._umischedules = schedules
return self._umischedules
@property
def outputs(self):
return self._outputs
@property
def day_of_week_for_start_day(self):
"""Get day of week for start day for the first found RUNPERIOD"""
import calendar
day = self.idfobjects["RUNPERIOD"][0]["Day_of_Week_for_Start_Day"]
if day.lower() == "sunday":
return calendar.SUNDAY
elif day.lower() == "monday":
return calendar.MONDAY
elif day.lower() == "tuesday":
return calendar.TUESDAY
elif day.lower() == "wednesday":
return calendar.WEDNESDAY
elif day.lower() == "thursday":
return calendar.THURSDAY
elif day.lower() == "friday":
return calendar.FRIDAY
elif day.lower() == "saturday":
return calendar.SATURDAY
else:
return 0
@property
def meters(self):
"""List of available meters for the :class:`IDF` model.
The :class:`IDF` model must be simulated once (to retrieve the .mdd file).
The listed meters may or may not be included in the idf file. If they are
not, the output is added to the file and the model is simulated again. The
output is appended to the :attr:`IDF.idfobjects` list, but will not overwrite the
original idf file, unless :meth:`IDF.save` is called.
Hint:
Call `idf.meters.<output_group>.<meter_name>.values()` to retreive a
time-series based on the :class:`pandas.Series` class which can be plotted.
See :class:`Meter` and :class:`EnergySeries` for more information.
Example:
The IDF.meters attribute is populated with meters categories
(`Output:Meter` or `Output:Meter:Cumulative`) and each category is
populated with all the available meters.
.. code-block::
>>> IDF.meters.OutputMeter.WaterSystems__MainsWater
>>> IDF.meters.OutputMeterCumulative.WaterSystems__MainsWater
"""
if self._meters is None:
try:
self.simulation_dir.files("*.mdd")
except FileNotFoundError:
raise Exception(
"call IDF.simulate() at least once to get a list of "
"possible meters"
)
else:
self._meters = Meters(self)
return self._meters
@property
def variables(self):
"""List of available meters for the :class:`IDF` model.
The :class:`IDF` model must be simulated once (to retrieve the .mdd file).
The listed meters may or may not be included in the idf file. If they are
not, the output is added to the file and the model is simulated again. The
output is appended to the :attr:`IDF.idfobjects` list, but will not overwrite
the
original idf file, unless :meth:`IDF.save` is called.
Hint:
Call `idf.meters.<output_group>.<meter_name>.values()` to retreive a
time-series based on the :class:`pandas.Series` class which can be plotted.
See :class:`Meter` and :class:`EnergySeries` for more information.
Example:
The IDF.meters attribute is populated with meters categories
(`Output:Meter` or `Output:Meter:Cumulative`) and each category is
populated with all the available meters.
.. code-block::
>>> IDF.variables.OutputVariable
>>> IDF.variables.OutputVariable
"""
if self._variables is None:
try:
self.simulation_dir.files("*.rdd")
except FileNotFoundError:
return "call IDF.simulate() to get a list of possible variables"
else:
self._variables = Variables(self)
return self._variables
def simulate(self, **kwargs):
"""Execute EnergyPlus. Does not return anything.
Keyword Args:
eplus_file (str): path to the idf file.
weather_file (str): path to the EPW weather file.
output_directory (str, optional): path to the output folder.
ep_version (str, optional): EnergyPlus executable version to use, eg: 9-2-0
output_report: 'sql' or 'htm'
prep_outputs (bool or list, optional): if True, meters and variable
outputs will be appended to the idf files. Can also specify custom
outputs as list of ep-object outputs.
keep_data (bool): If True, files created by EnergyPlus are saved to the
tmp_dir.
annual (bool): If True then force annual simulation (default: False)
design_day (bool): Force design-day-only simulation (default: False)
epmacro (bool): Run EPMacro prior to simulation (default: False)
expandobjects (bool): Run ExpandObjects prior to simulation (default:
True)
readvars (bool): Run ReadVarsESO after simulation (default: False)
output_prefix (str, optional): Prefix for output file names.
output_suffix (str, optional): Suffix style for output file names
(default: L) Choices are:
- L: Legacy (e.g., eplustbl.csv)
- C: Capital (e.g., eplusTable.csv)
- D: Dash (e.g., eplus-table.csv)
version (bool, optional): Display version information (default: False)
verbose (str): Set verbosity of runtime messages (default: v) v: verbose
q: quiet
keep_data_err (bool): If True, errored directory where simulation
occurred is
kept.
include (str, optional): List input files that need to be copied to the
simulation directory. If a string is provided, it should be in a glob
form (see :meth:`pathlib.Path.glob`).
process_files (bool): If True, process the output files and load to a
:class:`~pandas.DataFrame`. Custom processes can be passed using the
:attr:`custom_processes` attribute.
custom_processes (dict(Callback)): if provided, it has to be a
dictionary with the keys being a glob (see
:meth:`pathlib.Path.glob`), and
the value a Callback taking as signature `callback(file: str,
working_dir, simulname) -> Any` All the file matching this glob will
be processed by this callback. Note: they will still be processed by
pandas.read_csv (if they are csv files), resulting in duplicate. The
only way to bypass this behavior is to add the key "*.csv" to that
dictionary.
return_idf (bool): If True, returns the :class:`IDF` object part of the
return tuple.
return_files (bool): It True, all files paths created by the EnergyPlus
run are returned.
Raises:
EnergyPlusProcessError: If an issue occurs with the execution of the
energyplus command.
See Also:
:meth:`simulation_files`, :meth:`processed_results` for simulation outputs.
"""
# First, update keys with new values
for key, value in kwargs.items():
if f"_{key}" in self.__dict__.keys():
setattr(self, key, value)
if self.as_version != EnergyPlusVersion(self.idd_version):
raise EnergyPlusVersionError(
None, self.idfname, EnergyPlusVersion(self.idd_version), self.as_version
)
start_time = time.time()
include = self.include
if isinstance(include, str):
include = Path().abspath().glob(include)
elif include is not None:
include = [Path(file) for file in include]
# check if a weather file is defined
if not getattr(self, "epw", None):
raise EnergyPlusWeatherError(
f"No weather file specified with {self}. Set 'epw' in IDF("
f"filename, epw='weather.epw').simulate() or in IDF.simulate("
f"epw='weather.epw')"
)
# Todo: Add EpMacro Thread -> if exist in.imf "%program_path%EPMacro"
# Run the expandobjects program if necessary
with TemporaryDirectory(
prefix="expandobjects_run_",
suffix=None,
dir=self.output_directory,
) as tmp:
# Run the ExpandObjects preprocessor program
expandobjects_thread = ExpandObjectsThread(self, tmp)
expandobjects_thread.start()
expandobjects_thread.join()
e = expandobjects_thread.exception
if e is not None:
raise e
# Run the Basement preprocessor program if necessary
with TemporaryDirectory(
prefix="runBasement_run_",
suffix=None,
dir=self.output_directory,
) as tmp:
basement_thread = BasementThread(self, tmp)
basement_thread.start()
basement_thread.join()
e = basement_thread.exception
if e is not None:
raise e
# Run the Slab preprocessor program if necessary
with TemporaryDirectory(
prefix="runSlab_run_",
suffix=None,
dir=self.output_directory,
) as tmp:
slab_thread = SlabThread(self, tmp)
slab_thread.start()
slab_thread.join()
e = slab_thread.exception
if e is not None:
raise e
# Run the energyplus program
with TemporaryDirectory(
prefix="eplus_run_",
suffix=None,
dir=self.output_directory,
) as tmp:
running_simulation_thread = EnergyPlusThread(self, tmp)
running_simulation_thread.start()
running_simulation_thread.join()
e = running_simulation_thread.exception
if e is not None:
raise e
return self
def savecopy(self, filename, lineendings="default", encoding="latin-1"):
"""Save a copy of the file with the filename passed.
Args:
filename (str): Filepath to save the file.
lineendings (str): Line endings to use in the saved file. Options are
'default',
'windows' and 'unix' the default is 'default' which uses the line
endings for the current system.
encoding (str): Encoding to use for the saved file. The default is
'latin-1' which
is compatible with the EnergyPlus IDFEditor.
Returns:
Path: The new file path.
"""
super(IDF, self).save(filename, lineendings, encoding)
return Path(filename)
def save(self, lineendings="default", encoding="latin-1", **kwargs):
"""Write the IDF model to the text file. Uses
:meth:`~eppy.modeleditor.IDF.saveas`
Args:
filename (str): Filepath to save the file. If None then use the IDF.idfname
parameter. Also accepts a file handle.
lineendings (str) : Line endings to use in the saved file. Options are
'default',
'windows' and 'unix' the default is 'default' which uses the line
endings for the current system.
encoding (str): Encoding to use for the saved file. The default is
'latin-1' which is compatible with the EnergyPlus IDFEditor.
Returns:
IDF: The IDF model
"""
super(IDF, self).save(
filename=self.idfname, lineendings=lineendings, encoding=encoding
)
if not settings.use_cache:
cache_filename = hash_model(self)
output_directory = settings.cache_folder / cache_filename
output_directory.makedirs_p()
self.simulation_dir.copytree(
output_directory / self.simulation_dir.basename(), dirs_exist_ok=True
)
log(f"saved '{self.name}' at '{self.idfname}'")
return self
def saveas(self, filename, lineendings="default", encoding="latin-1"):
"""Save the IDF model as. Writes a new text file and load a new instance of
the IDF class (new object).
Args:
filename (str): Filepath to save the file. If None then use the IDF.idfname
parameter. Also accepts a file handle.
lineendings (str) : Line endings to use in the saved file. Options are
'default',
'windows' and 'unix' the default is 'default' which uses the line
endings for the current system.
encoding (str): Encoding to use for the saved file. The default is
'latin-1' which is compatible with the EnergyPlus IDFEditor.
Returns:
IDF: A new IDF object based on the new location file.
"""
super(IDF, self).save(
filename=filename, lineendings=lineendings, encoding=encoding
)
import inspect
sig = inspect.signature(IDF.__init__)
kwargs = {
key: getattr(self, key)
for key in [a for a in sig.parameters]
if key not in ["self", "idfname", "kwargs"]
}
as_idf = IDF(filename, **kwargs)
# copy simulation_dir over to new location
file: Path
as_idf.simulation_dir.makedirs_p()
for file in self.simulation_files:
if self.output_prefix in file:
name = file.replace(self.output_prefix, as_idf.output_prefix)
name = Path(name).basename()
else:
name = file.basename()
file.copy(as_idf.simulation_dir / name)
return as_idf
def process_results(self):
"""Returns the list of processed results as defined by self.custom_processes
as a list of tuple(file, result). A default process looks for csv files
and tries to parse them into :class:`~pandas.DataFrame` objects.
Returns:
list: List of two-tuples.
Info:
For processed_results to work more consistently, it may be necessary to
add the "readvars=True" parameter to :func:`IDF.simulate` as this one is
set to false by default.
"""
processes = {"*.csv": _process_csv}
custom_processes = self.custom_processes
if custom_processes:
processes.update(custom_processes)
try:
results = []
for glob, process in processes.items():
results.extend(
[
(
file.basename(),
process(
file,
working_dir=os.getcwd(),
simulname=self.output_prefix,
),
)
for file in self.simulation_dir.files(glob)
]
)
except FileNotFoundError:
raise ValueError("No results to process. Have you called IDF.simulate()?")
else:
return results
def upgrade(self, to_version, overwrite=True, **kwargs):
"""EnergyPlus idf version updater using local transition program.
Update the EnergyPlus simulation file (.idf) to the latest available
EnergyPlus version installed on this machine. Optionally specify a version
(eg.: "9-2-0") to aim for a specific version. The output will be the path of
the updated file. The run is multiprocessing_safe.
Hint:
If attempting to upgrade an earlier version of EnergyPlus ( pre-v7.2.0),
specific binaries need to be downloaded and copied to the
EnergyPlus*/PreProcess/IDFVersionUpdater folder. More info at
`Converting older version files
<http://energyplus.helpserve.com/Knowledgebase/List/Index/46
/converting-older-version-files>`_ .
Args:
to_version (str, optional): EnergyPlus version in the form "X-X-X".
overwrite (bool): If True, original idf file is overwritten with new
transitioned file.
Keyword Args:
Same as :class:`IDF`
Raises:
EnergyPlusProcessError: If version updater fails.
EnergyPlusVersionError:
CalledProcessError:
"""
if self.file_version == to_version:
return
if self.file_version > to_version:
raise EnergyPlusVersionError(self.name, self.idf_version, to_version)
else:
# # execute transitions
with TemporaryDirectory(
prefix="Transition_run_",
dir=self.output_directory,
) as tmp:
slab_thread = TransitionThread(self, tmp, overwrite=overwrite)
slab_thread.start()
slab_thread.join()
e = slab_thread.exception
if e is not None:
raise e
def wwr(self, azimuth_threshold=10, round_to=10):
"""Returns the Window-to-Wall Ratio by major orientation for the IDF
model. Optionally round up the WWR value to nearest value (eg.: nearest
10).
Args:
azimuth_threshold (int): Defines the incremental major orientation
azimuth angle. Due to possible rounding errors, some surface
azimuth can be rounded to values different than the main
directions (eg.: 89 degrees instead of 90 degrees). Defaults to
increments of 10 degrees.
round_to (float): Optionally round the WWR value to nearest value
(eg.: nearest 10). If None, this is ignored and the float is
returned.
Returns:
(pd.DataFrame): A DataFrame with the total wall area, total window
area and WWR for each main orientation of the building.
"""
import math
from builtins import round
def roundto(x, to=10.0):
"""Rounds up to closest `to` number"""
if to and not math.isnan(x):
return int(round(x / to)) * to
else:
return x
total_surface_area = defaultdict(int)
total_window_area = defaultdict(int)
zones = self.idfobjects["ZONE"]
zone: EpBunch
for zone in zones:
multiplier = float(zone.Multiplier if zone.Multiplier != "" else 1)
for surface in [
surf
for surf in zone.zonesurfaces
if surf.key.upper() not in ["INTERNALMASS", "WINDOWSHADINGCONTROL"]
]:
if isclose(surface.tilt, 90, abs_tol=10):
if surface.Outside_Boundary_Condition == "Outdoors":
surf_azim = roundto(surface.azimuth, to=azimuth_threshold)
total_surface_area[surf_azim] += surface.area * multiplier
for subsurface in surface.subsurfaces:
if hasattr(subsurface, "tilt"):
if isclose(subsurface.tilt, 90, abs_tol=10):
if subsurface.Surface_Type.lower() == "window":
surf_azim = roundto(
subsurface.azimuth, to=azimuth_threshold
)
total_window_area[surf_azim] += (
subsurface.area * multiplier
)
if isclose(subsurface.tilt, 180, abs_tol=80):
total_window_area["sky"] += subsurface.area * multiplier
# Fix azimuth = 360 which is the same as azimuth 0
total_surface_area[0] += total_surface_area.pop(360, 0)
total_window_area[0] += total_window_area.pop(360, 0)
# Create dataframe with wall_area, window_area and wwr as columns and azimuth
# as indexes
from sigfig import round
df = (
pd.DataFrame(
{"wall_area": total_surface_area, "window_area": total_window_area}
)
.rename_axis("Azimuth")
.fillna(0)
)
df.wall_area = df.wall_area.apply(round, decimals=1)
df.window_area = df.window_area.apply(round, decimals=1)
df["wwr"] = (df.window_area / df.wall_area).fillna(0).apply(round, 2)
df["wwr_rounded_%"] = (
(df.window_area / df.wall_area * 100)
.fillna(0)
.apply(lambda x: roundto(x, to=round_to))
)
return df
def space_heating_profile(
self,
units="kWh",
energy_out_variable_name=None,
name="Space Heating",
EnergySeries_kwds=None,
):
"""
Args:
units (str): Units to convert the energy profile to. Will detect the
units of the EnergyPlus results.
energy_out_variable_name (list-like): a list of EnergyPlus Variable
names.
name (str): Name given to the EnergySeries.
EnergySeries_kwds (dict, optional): keywords passed to
:func:`EnergySeries.from_sqlite`
Returns:
EnergySeries
"""
if EnergySeries_kwds is None:
EnergySeries_kwds = {}
start_time = time.time()
if energy_out_variable_name is None:
energy_out_variable_name = (
"Air System Total Heating Energy",
"Zone Ideal Loads Zone Total Heating Energy",
)
series = self._energy_series(
energy_out_variable_name, units, name, EnergySeries_kwds=EnergySeries_kwds
)
log(
"Retrieved Space Heating Profile in {:,.2f} seconds".format(
time.time() - start_time
)
)
return series
def space_cooling_profile(
self,
units="kWh",
energy_out_variable_name=None,
name="Space Cooling",
EnergySeries_kwds=None,
):
"""
Args:
units (str): Units to convert the energy profile to. Will detect the
units of the EnergyPlus results.
energy_out_variable_name (list-like): a list of EnergyPlus
name (str): Name given to the EnergySeries.
EnergySeries_kwds (dict, optional): keywords passed to
:func:`EnergySeries.from_sqlite`
Returns:
EnergySeries
"""
if EnergySeries_kwds is None:
EnergySeries_kwds = {}
start_time = time.time()
if energy_out_variable_name is None:
energy_out_variable_name = (
"Air System Total Cooling Energy",
"Zone Ideal Loads Zone Total Cooling Energy",
)
series = self._energy_series(
energy_out_variable_name, units, name, EnergySeries_kwds=EnergySeries_kwds
)
log(
"Retrieved Space Cooling Profile in {:,.2f} seconds".format(
time.time() - start_time
)
)
return series
def service_water_heating_profile(
self,
units="kWh",
energy_out_variable_name=None,
name="Space Heating",
EnergySeries_kwds=None,
):
"""
Args:
units (str): Units to convert the energy profile to. Will detect the
units of the EnergyPlus results.
energy_out_variable_name (list-like): a list of EnergyPlus Variable
names.
name (str): Name given to the EnergySeries.
EnergySeries_kwds (dict, optional): keywords passed to
:func:`EnergySeries.from_sqlite`
Returns:
EnergySeries
"""
if EnergySeries_kwds is None:
EnergySeries_kwds = {}
start_time = time.time()
if energy_out_variable_name is None:
energy_out_variable_name = ("WaterSystems:EnergyTransfer",)
series = self._energy_series(
energy_out_variable_name, units, name, EnergySeries_kwds=EnergySeries_kwds
)
log(
"Retrieved Service Water Heating Profile in {:,.2f} seconds".format(
time.time() - start_time
)
)
return series
def custom_profile(
self,
energy_out_variable_name,
name,
units="kWh",
prep_outputs=None,
EnergySeries_kwds=None,
):
"""
Args:
energy_out_variable_name (list-like): a list of EnergyPlus
name (str): Name given to the EnergySeries.
units (str): Units to convert the energy profile to. Will detect the
units of the EnergyPlus results.
prep_outputs:
EnergySeries_kwds (dict, optional): keywords passed to
:func:`EnergySeries.from_sqlite`
Returns:
EnergySeries
"""
if EnergySeries_kwds is None:
EnergySeries_kwds = {}
start_time = time.time()
series = self._energy_series(
energy_out_variable_name,
units,
name,
prep_outputs,
EnergySeries_kwds=EnergySeries_kwds,
)
log("Retrieved {} in {:,.2f} seconds".format(name, time.time() - start_time))
return series
def newidfobject(self, key, **kwargs):
"""Add a new object to an idf file. The function will test if the object
exists to prevent duplicates.
Args:
key (str): The type of IDF object. This must be in ALL_CAPS.
**kwargs: Keyword arguments in the format `field=value` used to set
fields in the EnergyPlus object.
Example:
>>> from archetypal import IDF
>>> IDF.newidfobject(
>>> key="Schedule:Constant".upper(),
>>> Name="AlwaysOn",
>>> Schedule_Type_Limits_Name="",
>>> Hourly_Value=1,
>>> )
Returns:
EpBunch: the object
"""
# get list of objects
existing_objs = self.idfobjects[key] # a list
# create new object
try:
new_object = self.anidfobject(key, **kwargs)
except BadEPFieldError as e:
if str(e) == "unknown field Key_Name":
# Try backwards compatibility with EnergyPlus < 9.0.0
name = kwargs.pop("Key_Name")
kwargs["Name"] = name
else:
log(f"Could not add object {key} because of: {e}", lg.WARNING)
return None
else:
new_object = self.anidfobject(key, **kwargs)
# If object is supposed to be 'unique-object', deletes all objects to be
# sure there is only one of them when creating new object
# (see following line)
if "unique-object" in set().union(
*(d.objidd[0].keys() for d in existing_objs)
):
for obj in existing_objs:
self.removeidfobject(obj)
self.addidfobject(new_object)
log(
f"{obj} is a 'unique-object'; Removed and replaced with"
f" {new_object}",
lg.DEBUG,
)
return new_object
if new_object in existing_objs:
# If obj already exists, simply return
log(
f"object '{new_object}' already exists in {self.name}. "
f"Skipping.",
lg.DEBUG,
)
return new_object
elif new_object not in existing_objs and new_object.nameexists():
obj = self.getobject(
key=new_object.key.upper(), name=new_object.Name.upper()
)
self.removeidfobject(obj)
self.addidfobject(new_object)
log(
f"{obj} exists but has different attributes; Removed and replaced "
f"with {new_object}",
lg.DEBUG,
)
return new_object
else:
# add to model and return
self.addidfobject(new_object)
log(f"object '{new_object}' added to '{self.name}'", lg.DEBUG)
return new_object
def addidfobject(self, new_object):
"""Add an IDF object to the IDF.
Args:
new_object (EpBunch): The IDF object to copy.
Returns:
EpBunch: object.
"""
key = new_object.key.upper()
self.idfobjects[key].append(new_object)
self._reset_dependant_vars("idfobjects")
def removeidfobject(self, idfobject):
"""Remove an IDF object from the IDF.
Parameters
----------
idfobject : EpBunch object
The IDF object to remove.
"""
key = idfobject.key.upper()
self.idfobjects[key].remove(idfobject)
self._reset_dependant_vars("idfobjects")
def anidfobject(self, key, aname="", **kwargs):
# type: (str, str, **Any) -> EpBunch
"""Create an object, but don't add to the model (See
:func:`~archetypal.idfclass.idf.IDF.newidfobject`). If you don't specify a value
for a field, the default value will be set.
Example:
>>> from archetypal import IDF
>>> IDF.anidfobject("CONSTRUCTION")
>>> IDF.anidfobject(
>>> key="CONSTRUCTION",
>>> Name='Interior Ceiling_class',
>>> Outside_Layer='LW Concrete',
>>> Layer_2='soundmat'
>>> )
Args:
key (str): The type of IDF object. This must be in ALL_CAPS.
aname (str): This parameter is not used. It is left there for backward
compatibility.
kwargs: Keyword arguments in the format `field=value` used to set
fields in the EnergyPlus object.
Returns:
EpBunch: object.
"""
obj = newrawobject(self.model, self.idd_info, key)
abunch = obj2bunch(self.model, self.idd_info, obj)
if aname:
warnings.warn(
"The aname parameter should no longer be used (%s)." % aname,
UserWarning,
)
namebunch(abunch, aname)
for k, v in kwargs.items():
try:
abunch[k] = v
except BadEPFieldError as e:
# Backwards compatibility
if str(e) == "unknown field Key_Name":
abunch["Name"] = v
else:
raise e
abunch.theidf = self
return abunch
def get_schedule_type_limits_data_by_name(self, schedule_limit_name):
"""Returns the data for a particular 'ScheduleTypeLimits' object
Args:
schedule_limit_name:
"""
schedule = self.getobject("ScheduleTypeLimits".upper(), schedule_limit_name)
if schedule is not None:
lower_limit = schedule["Lower_Limit_Value"]
upper_limit = schedule["Upper_Limit_Value"]
numeric_type = schedule["Numeric_Type"]
unit_type = schedule["Unit_Type"]
if schedule["Unit_Type"] == "":
unit_type = numeric_type
return lower_limit, upper_limit, numeric_type, unit_type
else:
return "", "", "", ""
def get_schedule_epbunch(self, name, sch_type=None):
"""Returns the epbunch of a particular schedule name. If the schedule
type is known,retrievess it quicker.
Args:
name (str): The name of the schedule to retreive in the IDF file.
sch_type (str): The schedule type, e.g.: "SCHEDULE:YEAR".
"""
if sch_type is None:
try:
return self.schedules_dict[name.upper()]
except KeyError:
raise KeyError(
'Unable to find schedule "{}" of type "{}" '
'in idf file "{}"'.format(name, sch_type, self.name)
)
else:
return self.getobject(sch_type.upper(), name)
def get_all_schedules(self, yearly_only=False):
"""Returns all schedule ep_objects in a dict with their name as a key
Args:
yearly_only (bool): If True, return only yearly schedules
Returns:
(dict of eppy.bunch_subclass.EpBunch): the schedules with their
name as a key
"""
schedule_types = list(map(str.upper, self.getiddgroupdict()["Schedules"]))
if yearly_only:
schedule_types = [
"Schedule:Year".upper(),
"Schedule:Compact".upper(),
"Schedule:Constant".upper(),
"Schedule:File".upper(),
]
scheds = {}
for sched_type in schedule_types:
for sched in self.idfobjects[sched_type]:
try:
if sched.key.upper() in schedule_types:
scheds[sched.Name.upper()] = sched
except KeyError:
pass
return scheds
def get_used_schedules(self, yearly_only=False):
"""Returns all used schedules
Args:
yearly_only (bool): If True, return only yearly schedules
Returns:
(list): the schedules names
"""
schedule_types = [
"Schedule:Day:Hourly".upper(),
"Schedule:Day:Interval".upper(),
"Schedule:Day:List".upper(),
"Schedule:Week:Daily".upper(),
"Schedule:Year".upper(),
"Schedule:Week:Compact".upper(),
"Schedule:Compact".upper(),
"Schedule:Constant".upper(),
"Schedule:File".upper(),
]
used_schedules = []
all_schedules = self.get_all_schedules(yearly_only=yearly_only)
for object_name in self.idfobjects:
for object in self.idfobjects[object_name]:
if object.key.upper() not in schedule_types:
for fieldvalue in object.fieldvalues:
try:
if (
fieldvalue.upper() in all_schedules.keys()
and fieldvalue not in used_schedules
):
used_schedules.append(fieldvalue)
except (KeyError, AttributeError):
pass
return used_schedules
def rename(self, objkey, objname, newname):
"""rename all the references to this objname.
Function comes from eppy.modeleditor and was modified to compare the
name to rename with a lower string (see
idfobject[idfobject.objls[findex]].lower() == objname.lower())
Args:
objkey (str): EpBunch we want to rename and rename all the
occurrences where this object is in the IDF file
objname (str): The name of the EpBunch to rename
newname (str): New name used to rename the EpBunch
Returns:
theobject (EpBunch): The renamed idf object
"""
refnames = eppy.modeleditor.getrefnames(self, objkey)
for refname in refnames:
objlists = eppy.modeleditor.getallobjlists(self, refname)
# [('OBJKEY', refname, fieldindexlist), ...]
for robjkey, refname, fieldindexlist in objlists:
idfobjects = self.idfobjects[robjkey]
for idfobject in idfobjects:
for findex in fieldindexlist: # for each field
if (
idfobject[idfobject.objls[findex]].lower()
== objname.lower()
):
idfobject[idfobject.objls[findex]] = newname
theobject = self.getobject(objkey, objname)
fieldname = [item for item in theobject.objls if item.endswith("Name")][0]
theobject[fieldname] = newname
return theobject
def _energy_series(
self,
energy_out_variable_name,
units,
name,
prep_outputs=None,
EnergySeries_kwds=None,
):
"""
Args:
energy_out_variable_name:
units:
name:
prep_outputs (list):
EnergySeries_kwds:
"""
if prep_outputs:
self.outputs.add_custom(prep_outputs).apply()
self.simulate()
rd = ReportData.from_sqlite(self.sql_file, table_name=energy_out_variable_name)
profile = EnergySeries.from_reportdata(
rd, to_units=units, name=name, **EnergySeries_kwds
)
return profile
def _execute_transitions(self, idf_file, to_version, **kwargs):
trans_exec = {
EnergyPlusVersion(
re.search(r"to-V(([\d])-([\d])-([\d]))", exec).group(1)
): exec
for exec in self.idfversionupdater_dir.files("Transition-V*")
}
transitions = [
key for key in trans_exec if to_version >= key > self.idf_version
]
transitions.sort()
for trans in tqdm(
transitions,
position=self.position,
desc=f"transition file #{self.position}-{self.name}",
):
if not trans_exec[trans].exists():
raise EnergyPlusProcessError(
cmd=trans_exec[trans],
stderr="The specified EnergyPlus version (v{}) does not have"
" the required transition program '{}' in the "
"PreProcess folder. See the documentation "
"(archetypal.readthedocs.io/troubleshooting.html#missing"
"-transition-programs) "
"to solve this issue".format(to_version, trans_exec[trans]),
idf=self,
)
else:
cmd = [trans_exec[trans], idf_file]
with subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=self.idfversionupdater_dir,
) as process:
process_output, error_output = process.communicate()
log(
process_output.decode("utf-8"),
level=lg.DEBUG,
name="transition_" + self.name,
filename="transition_" + self.name,
log_dir=self.idfversionupdater_dir,
)
if error_output:
log(
error_output.decode("utf-8"),
level=lg.DEBUG,
name="transition_" + self.name,
filename="transition_" + self.name,
log_dir=self.idfversionupdater_dir,
)
def _process_csv(file, working_dir, simulname):
"""
Args:
file:
working_dir:
simulname:
"""
log("looking for csv output, return the csv files in DataFrames if any")
if "table" in file.basename():
tables_out = working_dir.abspath() / "tables"
tables_out.makedirs_p()
file.copy(tables_out / "%s_%s.csv" % (file.basename().stripext(), simulname))
return
log("try to store file %s in DataFrame" % file)
try:
df = pd.read_csv(file, sep=",", encoding="us-ascii")
except ParserError:
pass
else:
log("file %s stored" % file)
return df
|
[
"archetypal.eplus_interface.version.EnergyPlusVersion.current",
"archetypal.eplus_interface.transition.TransitionThread",
"archetypal.ReportData.from_sqlite",
"pandas.read_csv",
"archetypal.eplus_interface.basement.BasementThread",
"archetypal.eplus_interface.version.get_eplus_dirs",
"geomeppy.patches.idfreader1",
"collections.defaultdict",
"archetypal.idfclass.outputs.Outputs",
"archetypal.eplus_interface.slab.SlabThread",
"eppy.modeleditor.getrefnames",
"pandas.DataFrame",
"archetypal.eplus_interface.expand_objects.ExpandObjectsThread",
"archetypal.schedule.Schedule",
"tempfile.TemporaryDirectory",
"archetypal.idfclass.meters.Meters",
"eppy.modeleditor.getallobjlists",
"archetypal.eplus_interface.version.EnergyPlusVersion",
"path.Path",
"inspect.signature",
"sigfig.round",
"re.search",
"os.startfile",
"tqdm.tqdm",
"io.StringIO",
"math.isnan",
"subprocess.Popen",
"geomeppy.patches.obj2bunch",
"archetypal.energypandas.EnergySeries.from_reportdata",
"archetypal.idfclass.util.hash_model",
"archetypal.idfclass.util.get_idf_version",
"itertools.count",
"eppy.modeleditor.IDDNotSetError",
"subprocess.call",
"sqlite3.connect",
"eppy.modeleditor.namebunch",
"platform.system",
"eppy.modeleditor.newrawobject",
"archetypal.eplus_interface.exceptions.EnergyPlusVersionError",
"os.getcwd",
"archetypal.eplus_interface.exceptions.EnergyPlusWeatherError",
"time.time",
"archetypal.idfclass.reports.get_report",
"math.isclose",
"archetypal.eplus_interface.energy_plus.EnergyPlusThread",
"warnings.warn",
"archetypal.log",
"archetypal.idfclass.variables.Variables"
] |
[((3896, 3920), 'itertools.count', 'itertools.count', ([], {'start': '(1)'}), '(start=1)\n', (3911, 3920), False, 'import itertools\n'), ((73082, 73154), 'archetypal.log', 'log', (['"""looking for csv output, return the csv files in DataFrames if any"""'], {}), "('looking for csv output, return the csv files in DataFrames if any')\n", (73085, 73154), False, 'from archetypal import ReportData, log, settings\n'), ((73381, 73428), 'archetypal.log', 'log', (["('try to store file %s in DataFrame' % file)"], {}), "('try to store file %s in DataFrame' % file)\n", (73384, 73428), False, 'from archetypal import ReportData, log, settings\n'), ((7624, 7640), 'archetypal.idfclass.util.hash_model', 'hash_model', (['self'], {}), '(self)\n', (7634, 7640), False, 'from archetypal.idfclass.util import get_idf_version, hash_model\n'), ((10061, 10151), 'geomeppy.patches.idfreader1', 'idfreader1', (['self.idfname', 'self.iddname', 'self'], {'commdct': 'self.idd_info', 'block': 'self.block'}), '(self.idfname, self.iddname, self, commdct=self.idd_info, block=\n self.block)\n', (10071, 10151), False, 'from geomeppy.patches import EpBunch, idfreader1, obj2bunch\n'), ((20401, 20436), 'archetypal.eplus_interface.version.EnergyPlusVersion', 'EnergyPlusVersion', (['self._as_version'], {}), '(self._as_version)\n', (20418, 20436), False, 'from archetypal.eplus_interface.version import EnergyPlusVersion, get_eplus_dirs\n'), ((20572, 20596), 'archetypal.eplus_interface.version.EnergyPlusVersion', 'EnergyPlusVersion', (['value'], {}), '(value)\n', (20589, 20596), False, 'from archetypal.eplus_interface.version import EnergyPlusVersion, get_eplus_dirs\n'), ((21104, 21132), 'path.Path', 'Path', (['self._output_directory'], {}), '(self._output_directory)\n', (21108, 21132), False, 'from path import Path\n'), ((40967, 40978), 'time.time', 'time.time', ([], {}), '()\n', (40976, 40978), False, 'import time\n'), ((44162, 44176), 'path.Path', 'Path', (['filename'], {}), '(filename)\n', (44166, 44176), False, 'from path import Path\n'), ((45422, 45469), 'archetypal.log', 'log', (['f"""saved \'{self.name}\' at \'{self.idfname}\'"""'], {}), '(f"saved \'{self.name}\' at \'{self.idfname}\'")\n', (45425, 45469), False, 'from archetypal import ReportData, log, settings\n'), ((46486, 46517), 'inspect.signature', 'inspect.signature', (['IDF.__init__'], {}), '(IDF.__init__)\n', (46503, 46517), False, 'import inspect\n'), ((51802, 51818), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (51813, 51818), False, 'from collections import defaultdict\n'), ((51847, 51863), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (51858, 51863), False, 'from collections import defaultdict\n'), ((54943, 54954), 'time.time', 'time.time', ([], {}), '()\n', (54952, 54954), False, 'import time\n'), ((56199, 56210), 'time.time', 'time.time', ([], {}), '()\n', (56208, 56210), False, 'import time\n'), ((57495, 57506), 'time.time', 'time.time', ([], {}), '()\n', (57504, 57506), False, 'import time\n'), ((58688, 58699), 'time.time', 'time.time', ([], {}), '()\n', (58697, 58699), False, 'import time\n'), ((63847, 63891), 'eppy.modeleditor.newrawobject', 'newrawobject', (['self.model', 'self.idd_info', 'key'], {}), '(self.model, self.idd_info, key)\n', (63859, 63891), False, 'from eppy.modeleditor import IDDNotSetError, namebunch, newrawobject\n'), ((63909, 63950), 'geomeppy.patches.obj2bunch', 'obj2bunch', (['self.model', 'self.idd_info', 'obj'], {}), '(self.model, self.idd_info, obj)\n', (63918, 63950), False, 'from geomeppy.patches import EpBunch, idfreader1, obj2bunch\n'), ((69097, 69139), 'eppy.modeleditor.getrefnames', 'eppy.modeleditor.getrefnames', (['self', 'objkey'], {}), '(self, objkey)\n', (69125, 69139), False, 'import eppy\n'), ((70444, 70518), 'archetypal.ReportData.from_sqlite', 'ReportData.from_sqlite', (['self.sql_file'], {'table_name': 'energy_out_variable_name'}), '(self.sql_file, table_name=energy_out_variable_name)\n', (70466, 70518), False, 'from archetypal import ReportData, log, settings\n'), ((70537, 70622), 'archetypal.energypandas.EnergySeries.from_reportdata', 'EnergySeries.from_reportdata', (['rd'], {'to_units': 'units', 'name': 'name'}), '(rd, to_units=units, name=name, **EnergySeries_kwds\n )\n', (70565, 70622), False, 'from archetypal.energypandas import EnergySeries\n'), ((71124, 71224), 'tqdm.tqdm', 'tqdm', (['transitions'], {'position': 'self.position', 'desc': 'f"""transition file #{self.position}-{self.name}"""'}), "(transitions, position=self.position, desc=\n f'transition file #{self.position}-{self.name}')\n", (71128, 71224), False, 'from tqdm import tqdm\n'), ((73451, 73498), 'pandas.read_csv', 'pd.read_csv', (['file'], {'sep': '""","""', 'encoding': '"""us-ascii"""'}), "(file, sep=',', encoding='us-ascii')\n", (73462, 73498), True, 'import pandas as pd\n'), ((73554, 73582), 'archetypal.log', 'log', (["('file %s stored' % file)"], {}), "('file %s stored' % file)\n", (73557, 73582), False, 'from archetypal import ReportData, log, settings\n'), ((8714, 8731), 'archetypal.idfclass.outputs.Outputs', 'Outputs', ([], {'idf': 'self'}), '(idf=self)\n', (8721, 8731), False, 'from archetypal.idfclass.outputs import Outputs\n'), ((10018, 10042), 'eppy.modeleditor.IDDNotSetError', 'IDDNotSetError', (['errortxt'], {}), '(errortxt)\n', (10032, 10042), False, 'from eppy.modeleditor import IDDNotSetError, namebunch, newrawobject\n'), ((11025, 11095), 'geomeppy.patches.idfreader1', 'idfreader1', (['self.idfname', 'self.iddname', 'self'], {'commdct': 'None', 'block': 'None'}), '(self.idfname, self.iddname, self, commdct=None, block=None)\n', (11035, 11095), False, 'from geomeppy.patches import EpBunch, idfreader1, obj2bunch\n'), ((11575, 11645), 'geomeppy.patches.idfreader1', 'idfreader1', (['self.idfname', 'self.iddname', 'self'], {'commdct': 'None', 'block': 'None'}), '(self.idfname, self.iddname, self, commdct=None, block=None)\n', (11585, 11645), False, 'from geomeppy.patches import EpBunch, idfreader1, obj2bunch\n'), ((12158, 12228), 'geomeppy.patches.idfreader1', 'idfreader1', (['self.idfname', 'self.iddname', 'self'], {'commdct': 'None', 'block': 'None'}), '(self.idfname, self.iddname, self, commdct=None, block=None)\n', (12168, 12228), False, 'from geomeppy.patches import EpBunch, idfreader1, obj2bunch\n'), ((12726, 12796), 'geomeppy.patches.idfreader1', 'idfreader1', (['self.idfname', 'self.iddname', 'self'], {'commdct': 'None', 'block': 'None'}), '(self.idfname, self.iddname, self, commdct=None, block=None)\n', (12736, 12796), False, 'from geomeppy.patches import EpBunch, idfreader1, obj2bunch\n'), ((13292, 13362), 'geomeppy.patches.idfreader1', 'idfreader1', (['self.idfname', 'self.iddname', 'self'], {'commdct': 'None', 'block': 'None'}), '(self.idfname, self.iddname, self, commdct=None, block=None)\n', (13302, 13362), False, 'from geomeppy.patches import EpBunch, idfreader1, obj2bunch\n'), ((13842, 13912), 'geomeppy.patches.idfreader1', 'idfreader1', (['self.idfname', 'self.iddname', 'self'], {'commdct': 'None', 'block': 'None'}), '(self.idfname, self.iddname, self, commdct=None, block=None)\n', (13852, 13912), False, 'from geomeppy.patches import EpBunch, idfreader1, obj2bunch\n'), ((16800, 16840), 'io.StringIO', 'StringIO', (['f"""VERSION, {self.as_version};"""'], {}), "(f'VERSION, {self.as_version};')\n", (16808, 16840), False, 'from io import StringIO\n'), ((20358, 20385), 'archetypal.eplus_interface.version.EnergyPlusVersion.current', 'EnergyPlusVersion.current', ([], {}), '()\n', (20383, 20385), False, 'from archetypal.eplus_interface.version import EnergyPlusVersion, get_eplus_dirs\n'), ((22250, 22412), 'archetypal.idfclass.util.hash_model', 'hash_model', (['self'], {'epw': 'self.epw', 'annual': 'self.annual', 'design_day': 'self.design_day', 'readvars': 'self.readvars', 'ep_version': 'self.as_version', 'include': 'self.include'}), '(self, epw=self.epw, annual=self.annual, design_day=self.\n design_day, readvars=self.readvars, ep_version=self.as_version, include\n =self.include)\n', (22260, 22412), False, 'from archetypal.idfclass.util import get_idf_version, hash_model\n'), ((25219, 25236), 'platform.system', 'platform.system', ([], {}), '()\n', (25234, 25236), False, 'import platform\n'), ((25271, 25306), 'subprocess.call', 'subprocess.call', (["('open', filepath)"], {}), "(('open', filepath))\n", (25286, 25306), False, 'import subprocess\n'), ((25707, 25724), 'platform.system', 'platform.system', ([], {}), '()\n', (25722, 25724), False, 'import platform\n'), ((25759, 25794), 'subprocess.call', 'subprocess.call', (["('open', filepath)"], {}), "(('open', filepath))\n", (25774, 25794), False, 'import subprocess\n'), ((40763, 40798), 'archetypal.eplus_interface.version.EnergyPlusVersion', 'EnergyPlusVersion', (['self.idd_version'], {}), '(self.idd_version)\n', (40780, 40798), False, 'from archetypal.eplus_interface.version import EnergyPlusVersion, get_eplus_dirs\n'), ((41296, 41466), 'archetypal.eplus_interface.exceptions.EnergyPlusWeatherError', 'EnergyPlusWeatherError', (['f"""No weather file specified with {self}. Set \'epw\' in IDF(filename, epw=\'weather.epw\').simulate() or in IDF.simulate(epw=\'weather.epw\')"""'], {}), '(\n f"No weather file specified with {self}. Set \'epw\' in IDF(filename, epw=\'weather.epw\').simulate() or in IDF.simulate(epw=\'weather.epw\')"\n )\n', (41318, 41466), False, 'from archetypal.eplus_interface.exceptions import EnergyPlusProcessError, EnergyPlusVersionError, EnergyPlusWeatherError\n'), ((41672, 41764), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {'prefix': '"""expandobjects_run_"""', 'suffix': 'None', 'dir': 'self.output_directory'}), "(prefix='expandobjects_run_', suffix=None, dir=self.\n output_directory)\n", (41690, 41764), False, 'from tempfile import TemporaryDirectory\n'), ((41907, 41937), 'archetypal.eplus_interface.expand_objects.ExpandObjectsThread', 'ExpandObjectsThread', (['self', 'tmp'], {}), '(self, tmp)\n', (41926, 41937), False, 'from archetypal.eplus_interface.expand_objects import ExpandObjectsThread\n'), ((42195, 42285), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {'prefix': '"""runBasement_run_"""', 'suffix': 'None', 'dir': 'self.output_directory'}), "(prefix='runBasement_run_', suffix=None, dir=self.\n output_directory)\n", (42213, 42285), False, 'from tempfile import TemporaryDirectory\n'), ((42366, 42391), 'archetypal.eplus_interface.basement.BasementThread', 'BasementThread', (['self', 'tmp'], {}), '(self, tmp)\n', (42380, 42391), False, 'from archetypal.eplus_interface.basement import BasementThread\n'), ((42618, 42704), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {'prefix': '"""runSlab_run_"""', 'suffix': 'None', 'dir': 'self.output_directory'}), "(prefix='runSlab_run_', suffix=None, dir=self.\n output_directory)\n", (42636, 42704), False, 'from tempfile import TemporaryDirectory\n'), ((42781, 42802), 'archetypal.eplus_interface.slab.SlabThread', 'SlabThread', (['self', 'tmp'], {}), '(self, tmp)\n', (42791, 42802), False, 'from archetypal.eplus_interface.slab import SlabThread\n'), ((42997, 43076), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {'prefix': '"""eplus_run_"""', 'suffix': 'None', 'dir': 'self.output_directory'}), "(prefix='eplus_run_', suffix=None, dir=self.output_directory)\n", (43015, 43076), False, 'from tempfile import TemporaryDirectory\n'), ((43172, 43199), 'archetypal.eplus_interface.energy_plus.EnergyPlusThread', 'EnergyPlusThread', (['self', 'tmp'], {}), '(self, tmp)\n', (43188, 43199), False, 'from archetypal.eplus_interface.energy_plus import EnergyPlusThread\n'), ((45143, 45159), 'archetypal.idfclass.util.hash_model', 'hash_model', (['self'], {}), '(self)\n', (45153, 45159), False, 'from archetypal.idfclass.util import get_idf_version, hash_model\n'), ((50052, 50115), 'archetypal.eplus_interface.exceptions.EnergyPlusVersionError', 'EnergyPlusVersionError', (['self.name', 'self.idf_version', 'to_version'], {}), '(self.name, self.idf_version, to_version)\n', (50074, 50115), False, 'from archetypal.eplus_interface.exceptions import EnergyPlusProcessError, EnergyPlusVersionError, EnergyPlusWeatherError\n'), ((63981, 64073), 'warnings.warn', 'warnings.warn', (["('The aname parameter should no longer be used (%s).' % aname)", 'UserWarning'], {}), "('The aname parameter should no longer be used (%s).' % aname,\n UserWarning)\n", (63994, 64073), False, 'import warnings\n'), ((64129, 64153), 'eppy.modeleditor.namebunch', 'namebunch', (['abunch', 'aname'], {}), '(abunch, aname)\n', (64138, 64153), False, 'from eppy.modeleditor import IDDNotSetError, namebunch, newrawobject\n'), ((69196, 69242), 'eppy.modeleditor.getallobjlists', 'eppy.modeleditor.getallobjlists', (['self', 'refname'], {}), '(self, refname)\n', (69227, 69242), False, 'import eppy\n'), ((14407, 14538), 'archetypal.eplus_interface.exceptions.EnergyPlusVersionError', 'EnergyPlusVersionError', (['f"""{self.as_version} cannot be lower then the version number set in the file: {self.file_version}"""'], {}), "(\n f'{self.as_version} cannot be lower then the version number set in the file: {self.file_version}'\n )\n", (14429, 14538), False, 'from archetypal.eplus_interface.exceptions import EnergyPlusProcessError, EnergyPlusVersionError, EnergyPlusWeatherError\n'), ((15200, 15229), 'archetypal.idfclass.util.get_idf_version', 'get_idf_version', (['self.idfname'], {}), '(self.idfname)\n', (15215, 15229), False, 'from archetypal.idfclass.util import get_idf_version, hash_model\n'), ((21412, 21423), 'path.Path', 'Path', (['value'], {}), '(value)\n', (21416, 21423), False, 'from path import Path\n'), ((23365, 23469), 'archetypal.idfclass.reports.get_report', 'get_report', (['self.idfname', 'self.simulation_dir'], {'output_report': '"""sql"""', 'output_prefix': 'self.output_prefix'}), "(self.idfname, self.simulation_dir, output_report='sql',\n output_prefix=self.output_prefix)\n", (23375, 23469), False, 'from archetypal.idfclass.reports import get_report\n'), ((24243, 24347), 'archetypal.idfclass.reports.get_report', 'get_report', (['self.idfname', 'self.simulation_dir'], {'output_report': '"""htm"""', 'output_prefix': 'self.output_prefix'}), "(self.idfname, self.simulation_dir, output_report='htm',\n output_prefix=self.output_prefix)\n", (24253, 24347), False, 'from archetypal.idfclass.reports import get_report\n'), ((25320, 25337), 'platform.system', 'platform.system', ([], {}), '()\n', (25335, 25337), False, 'import platform\n'), ((25375, 25397), 'os.startfile', 'os.startfile', (['filepath'], {}), '(filepath)\n', (25387, 25397), False, 'import os\n'), ((25442, 25481), 'subprocess.call', 'subprocess.call', (["('xdg-open', filepath)"], {}), "(('xdg-open', filepath))\n", (25457, 25481), False, 'import subprocess\n'), ((25808, 25825), 'platform.system', 'platform.system', ([], {}), '()\n', (25823, 25825), False, 'import platform\n'), ((25863, 25885), 'os.startfile', 'os.startfile', (['filepath'], {}), '(filepath)\n', (25875, 25885), False, 'import os\n'), ((25930, 25969), 'subprocess.call', 'subprocess.call', (["('xdg-open', filepath)"], {}), "(('xdg-open', filepath))\n", (25945, 25969), False, 'import subprocess\n'), ((32776, 32782), 'path.Path', 'Path', ([], {}), '()\n', (32780, 32782), False, 'from path import Path\n'), ((33147, 33176), 'archetypal.schedule.Schedule', 'Schedule', ([], {'Name': 'schd', 'idf': 'self'}), '(Name=schd, idf=self)\n', (33155, 33176), False, 'from archetypal.schedule import Schedule\n'), ((35641, 35653), 'archetypal.idfclass.meters.Meters', 'Meters', (['self'], {}), '(self)\n', (35647, 35653), False, 'from archetypal.idfclass.meters import Meters\n'), ((37098, 37113), 'archetypal.idfclass.variables.Variables', 'Variables', (['self'], {}), '(self)\n', (37107, 37113), False, 'from archetypal.idfclass.variables import Variables\n'), ((40878, 40913), 'archetypal.eplus_interface.version.EnergyPlusVersion', 'EnergyPlusVersion', (['self.idd_version'], {}), '(self.idd_version)\n', (40895, 40913), False, 'from archetypal.eplus_interface.version import EnergyPlusVersion, get_eplus_dirs\n'), ((50183, 50254), 'tempfile.TemporaryDirectory', 'TemporaryDirectory', ([], {'prefix': '"""Transition_run_"""', 'dir': 'self.output_directory'}), "(prefix='Transition_run_', dir=self.output_directory)\n", (50201, 50254), False, 'from tempfile import TemporaryDirectory\n'), ((50340, 50388), 'archetypal.eplus_interface.transition.TransitionThread', 'TransitionThread', (['self', 'tmp'], {'overwrite': 'overwrite'}), '(self, tmp, overwrite=overwrite)\n', (50356, 50388), False, 'from archetypal.eplus_interface.transition import TransitionThread\n'), ((52248, 52285), 'math.isclose', 'isclose', (['surface.tilt', '(90)'], {'abs_tol': '(10)'}), '(surface.tilt, 90, abs_tol=10)\n', (52255, 52285), False, 'from math import isclose\n'), ((61163, 61248), 'archetypal.log', 'log', (['f"""object \'{new_object}\' already exists in {self.name}. Skipping."""', 'lg.DEBUG'], {}), '(f"object \'{new_object}\' already exists in {self.name}. Skipping.", lg.DEBUG\n )\n', (61166, 61248), False, 'from archetypal import ReportData, log, settings\n'), ((8000, 8030), 'archetypal.idfclass.util.get_idf_version', 'get_idf_version', (['previous_file'], {}), '(previous_file)\n', (8015, 8030), False, 'from archetypal.idfclass.util import get_idf_version, hash_model\n'), ((17214, 17225), 'path.Path', 'Path', (['value'], {}), '(value)\n', (17218, 17225), False, 'from path import Path\n'), ((17406, 17421), 'path.Path', 'Path', (['self._epw'], {}), '(self._epw)\n', (17410, 17421), False, 'from path import Path\n'), ((17516, 17527), 'path.Path', 'Path', (['value'], {}), '(value)\n', (17520, 17527), False, 'from path import Path\n'), ((27187, 27217), 'sqlite3.connect', 'sqlite3.connect', (['self.sql_file'], {}), '(self.sql_file)\n', (27202, 27217), False, 'import sqlite3\n'), ((28654, 28684), 'sqlite3.connect', 'sqlite3.connect', (['self.sql_file'], {}), '(self.sql_file)\n', (28669, 28684), False, 'import sqlite3\n'), ((30099, 30129), 'sqlite3.connect', 'sqlite3.connect', (['self.sql_file'], {}), '(self.sql_file)\n', (30114, 30129), False, 'import sqlite3\n'), ((41157, 41167), 'path.Path', 'Path', (['file'], {}), '(file)\n', (41161, 41167), False, 'from path import Path\n'), ((51667, 51680), 'math.isnan', 'math.isnan', (['x'], {}), '(x)\n', (51677, 51680), False, 'import math\n'), ((55405, 55416), 'time.time', 'time.time', ([], {}), '()\n', (55414, 55416), False, 'import time\n'), ((56661, 56672), 'time.time', 'time.time', ([], {}), '()\n', (56670, 56672), False, 'import time\n'), ((57869, 57880), 'time.time', 'time.time', ([], {}), '()\n', (57878, 57880), False, 'import time\n'), ((58957, 58968), 'time.time', 'time.time', ([], {}), '()\n', (58966, 58968), False, 'import time\n'), ((60197, 60259), 'archetypal.log', 'log', (['f"""Could not add object {key} because of: {e}"""', 'lg.WARNING'], {}), "(f'Could not add object {key} because of: {e}', lg.WARNING)\n", (60200, 60259), False, 'from archetypal import ReportData, log, settings\n'), ((60830, 60918), 'archetypal.log', 'log', (['f"""{obj} is a \'unique-object\'; Removed and replaced with {new_object}"""', 'lg.DEBUG'], {}), '(f"{obj} is a \'unique-object\'; Removed and replaced with {new_object}",\n lg.DEBUG)\n', (60833, 60918), False, 'from archetypal import ReportData, log, settings\n'), ((61676, 61780), 'archetypal.log', 'log', (['f"""{obj} exists but has different attributes; Removed and replaced with {new_object}"""', 'lg.DEBUG'], {}), "(f'{obj} exists but has different attributes; Removed and replaced with {new_object}'\n , lg.DEBUG)\n", (61679, 61780), False, 'from archetypal import ReportData, log, settings\n'), ((62015, 62077), 'archetypal.log', 'log', (['f"""object \'{new_object}\' added to \'{self.name}\'"""', 'lg.DEBUG'], {}), '(f"object \'{new_object}\' added to \'{self.name}\'", lg.DEBUG)\n', (62018, 62077), False, 'from archetypal import ReportData, log, settings\n'), ((71961, 72067), 'subprocess.Popen', 'subprocess.Popen', (['cmd'], {'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'cwd': 'self.idfversionupdater_dir'}), '(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=\n self.idfversionupdater_dir)\n', (71977, 72067), False, 'import subprocess\n'), ((17059, 17078), 'path.Path', 'Path', (['self._idfname'], {}), '(self._idfname)\n', (17063, 17078), False, 'from path import Path\n'), ((21227, 21238), 'path.Path', 'Path', (['value'], {}), '(value)\n', (21231, 21238), False, 'from path import Path\n'), ((22920, 22955), 'archetypal.eplus_interface.version.get_eplus_dirs', 'get_eplus_dirs', (['settings.ep_version'], {}), '(settings.ep_version)\n', (22934, 22955), False, 'from archetypal.eplus_interface.version import EnergyPlusVersion, get_eplus_dirs\n'), ((47032, 47042), 'path.Path', 'Path', (['name'], {}), '(name)\n', (47036, 47042), False, 'from path import Path\n'), ((51709, 51722), 'sigfig.round', 'round', (['(x / to)'], {}), '(x / to)\n', (51714, 51722), False, 'from sigfig import round\n'), ((52660, 52700), 'math.isclose', 'isclose', (['subsurface.tilt', '(90)'], {'abs_tol': '(10)'}), '(subsurface.tilt, 90, abs_tol=10)\n', (52667, 52700), False, 'from math import isclose\n'), ((53134, 53175), 'math.isclose', 'isclose', (['subsurface.tilt', '(180)'], {'abs_tol': '(80)'}), '(subsurface.tilt, 180, abs_tol=80)\n', (53141, 53175), False, 'from math import isclose\n'), ((53616, 53701), 'pandas.DataFrame', 'pd.DataFrame', (["{'wall_area': total_surface_area, 'window_area': total_window_area}"], {}), "({'wall_area': total_surface_area, 'window_area':\n total_window_area})\n", (53628, 53701), True, 'import pandas as pd\n'), ((70802, 70850), 're.search', 're.search', (['"""to-V(([\\\\d])-([\\\\d])-([\\\\d]))"""', 'exec'], {}), "('to-V(([\\\\d])-([\\\\d])-([\\\\d]))', exec)\n", (70811, 70850), False, 'import re\n'), ((41069, 41075), 'path.Path', 'Path', ([], {}), '()\n', (41073, 41075), False, 'from path import Path\n'), ((48249, 48260), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (48258, 48260), False, 'import os\n')]
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class CustomerInformation:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'customer': 'str',
'account_name': 'str',
'customer_id': 'str',
'associated_on': 'str',
'association_type': 'str',
'label': 'str',
'telephone': 'str',
'verified_status': 'str',
'country_code': 'str',
'customer_type': 'int',
'is_frozen': 'int',
'account_managers': 'list[AccountManager]',
'xaccount_id': 'str',
'xaccount_type': 'str',
'customer_level': 'str'
}
attribute_map = {
'customer': 'customer',
'account_name': 'account_name',
'customer_id': 'customer_id',
'associated_on': 'associated_on',
'association_type': 'association_type',
'label': 'label',
'telephone': 'telephone',
'verified_status': 'verified_status',
'country_code': 'country_code',
'customer_type': 'customer_type',
'is_frozen': 'is_frozen',
'account_managers': 'account_managers',
'xaccount_id': 'xaccount_id',
'xaccount_type': 'xaccount_type',
'customer_level': 'customer_level'
}
def __init__(self, customer=None, account_name=None, customer_id=None, associated_on=None, association_type=None, label=None, telephone=None, verified_status=None, country_code=None, customer_type=None, is_frozen=None, account_managers=None, xaccount_id=None, xaccount_type=None, customer_level=None):
"""CustomerInformation - a model defined in huaweicloud sdk"""
self._customer = None
self._account_name = None
self._customer_id = None
self._associated_on = None
self._association_type = None
self._label = None
self._telephone = None
self._verified_status = None
self._country_code = None
self._customer_type = None
self._is_frozen = None
self._account_managers = None
self._xaccount_id = None
self._xaccount_type = None
self._customer_level = None
self.discriminator = None
if customer is not None:
self.customer = customer
self.account_name = account_name
self.customer_id = customer_id
if associated_on is not None:
self.associated_on = associated_on
if association_type is not None:
self.association_type = association_type
if label is not None:
self.label = label
if telephone is not None:
self.telephone = telephone
if verified_status is not None:
self.verified_status = verified_status
if country_code is not None:
self.country_code = country_code
if customer_type is not None:
self.customer_type = customer_type
if is_frozen is not None:
self.is_frozen = is_frozen
if account_managers is not None:
self.account_managers = account_managers
if xaccount_id is not None:
self.xaccount_id = xaccount_id
if xaccount_type is not None:
self.xaccount_type = xaccount_type
if customer_level is not None:
self.customer_level = customer_level
@property
def customer(self):
"""Gets the customer of this CustomerInformation.
|参数名称:实名认证名称。虚拟账号下,该字段无效。| |参数约束及描述:实名认证名称。虚拟账号下,该字段无效。|
:return: The customer of this CustomerInformation.
:rtype: str
"""
return self._customer
@customer.setter
def customer(self, customer):
"""Sets the customer of this CustomerInformation.
|参数名称:实名认证名称。虚拟账号下,该字段无效。| |参数约束及描述:实名认证名称。虚拟账号下,该字段无效。|
:param customer: The customer of this CustomerInformation.
:type: str
"""
self._customer = customer
@property
def account_name(self):
"""Gets the account_name of this CustomerInformation.
|参数名称:客户登录名称(如果客户创建了子用户,此处返回主账号登录名称)。| |参数约束及描述:客户登录名称(如果客户创建了子用户,此处返回主账号登录名称)。|
:return: The account_name of this CustomerInformation.
:rtype: str
"""
return self._account_name
@account_name.setter
def account_name(self, account_name):
"""Sets the account_name of this CustomerInformation.
|参数名称:客户登录名称(如果客户创建了子用户,此处返回主账号登录名称)。| |参数约束及描述:客户登录名称(如果客户创建了子用户,此处返回主账号登录名称)。|
:param account_name: The account_name of this CustomerInformation.
:type: str
"""
self._account_name = account_name
@property
def customer_id(self):
"""Gets the customer_id of this CustomerInformation.
|参数名称:客户ID。| |参数约束及描述:客户ID。|
:return: The customer_id of this CustomerInformation.
:rtype: str
"""
return self._customer_id
@customer_id.setter
def customer_id(self, customer_id):
"""Sets the customer_id of this CustomerInformation.
|参数名称:客户ID。| |参数约束及描述:客户ID。|
:param customer_id: The customer_id of this CustomerInformation.
:type: str
"""
self._customer_id = customer_id
@property
def associated_on(self):
"""Gets the associated_on of this CustomerInformation.
|参数名称:客户和伙伴关联时间。UTC时间,格式:yyyy-MM-ddTHH:mm:ssZ,如“2019-05-06T08:05:01Z”,其中,HH范围是0~23,mm和ss范围是0~59。| |参数约束及描述:客户和伙伴关联时间。UTC时间,格式:yyyy-MM-ddTHH:mm:ssZ,如“2019-05-06T08:05:01Z”,其中,HH范围是0~23,mm和ss范围是0~59。|
:return: The associated_on of this CustomerInformation.
:rtype: str
"""
return self._associated_on
@associated_on.setter
def associated_on(self, associated_on):
"""Sets the associated_on of this CustomerInformation.
|参数名称:客户和伙伴关联时间。UTC时间,格式:yyyy-MM-ddTHH:mm:ssZ,如“2019-05-06T08:05:01Z”,其中,HH范围是0~23,mm和ss范围是0~59。| |参数约束及描述:客户和伙伴关联时间。UTC时间,格式:yyyy-MM-ddTHH:mm:ssZ,如“2019-05-06T08:05:01Z”,其中,HH范围是0~23,mm和ss范围是0~59。|
:param associated_on: The associated_on of this CustomerInformation.
:type: str
"""
self._associated_on = associated_on
@property
def association_type(self):
"""Gets the association_type of this CustomerInformation.
|参数名称:合作模式。1:推荐2:垫付3:转售| |参数约束及描述:合作模式。1:推荐2:垫付3:转售|
:return: The association_type of this CustomerInformation.
:rtype: str
"""
return self._association_type
@association_type.setter
def association_type(self, association_type):
"""Sets the association_type of this CustomerInformation.
|参数名称:合作模式。1:推荐2:垫付3:转售| |参数约束及描述:合作模式。1:推荐2:垫付3:转售|
:param association_type: The association_type of this CustomerInformation.
:type: str
"""
self._association_type = association_type
@property
def label(self):
"""Gets the label of this CustomerInformation.
|参数名称:标签,支持模糊查找。虚拟账号下,该字段无效。| |参数约束及描述:标签,支持模糊查找。虚拟账号下,该字段无效。|
:return: The label of this CustomerInformation.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""Sets the label of this CustomerInformation.
|参数名称:标签,支持模糊查找。虚拟账号下,该字段无效。| |参数约束及描述:标签,支持模糊查找。虚拟账号下,该字段无效。|
:param label: The label of this CustomerInformation.
:type: str
"""
self._label = label
@property
def telephone(self):
"""Gets the telephone of this CustomerInformation.
|参数名称:客户电话号码。虚拟账号下,该字段无效。| |参数约束及描述:客户电话号码。虚拟账号下,该字段无效。|
:return: The telephone of this CustomerInformation.
:rtype: str
"""
return self._telephone
@telephone.setter
def telephone(self, telephone):
"""Sets the telephone of this CustomerInformation.
|参数名称:客户电话号码。虚拟账号下,该字段无效。| |参数约束及描述:客户电话号码。虚拟账号下,该字段无效。|
:param telephone: The telephone of this CustomerInformation.
:type: str
"""
self._telephone = telephone
@property
def verified_status(self):
"""Gets the verified_status of this CustomerInformation.
|参数名称:实名认证状态,虚拟账号下,该字段无效。:null:实名认证开关关闭;-1:未实名认证;0:实名认证审核中;1:实名认证不通过;2:已实名认证;3:实名认证失败。| |参数约束及描述:实名认证状态,虚拟账号下,该字段无效。:null:实名认证开关关闭;-1:未实名认证;0:实名认证审核中;1:实名认证不通过;2:已实名认证;3:实名认证失败。|
:return: The verified_status of this CustomerInformation.
:rtype: str
"""
return self._verified_status
@verified_status.setter
def verified_status(self, verified_status):
"""Sets the verified_status of this CustomerInformation.
|参数名称:实名认证状态,虚拟账号下,该字段无效。:null:实名认证开关关闭;-1:未实名认证;0:实名认证审核中;1:实名认证不通过;2:已实名认证;3:实名认证失败。| |参数约束及描述:实名认证状态,虚拟账号下,该字段无效。:null:实名认证开关关闭;-1:未实名认证;0:实名认证审核中;1:实名认证不通过;2:已实名认证;3:实名认证失败。|
:param verified_status: The verified_status of this CustomerInformation.
:type: str
"""
self._verified_status = verified_status
@property
def country_code(self):
"""Gets the country_code of this CustomerInformation.
|参数名称:国家码,电话号码的国家码前缀。虚拟账号下,该字段无效。例如:中国 0086。| |参数约束及描述:国家码,电话号码的国家码前缀。虚拟账号下,该字段无效。例如:中国 0086。|
:return: The country_code of this CustomerInformation.
:rtype: str
"""
return self._country_code
@country_code.setter
def country_code(self, country_code):
"""Sets the country_code of this CustomerInformation.
|参数名称:国家码,电话号码的国家码前缀。虚拟账号下,该字段无效。例如:中国 0086。| |参数约束及描述:国家码,电话号码的国家码前缀。虚拟账号下,该字段无效。例如:中国 0086。|
:param country_code: The country_code of this CustomerInformation.
:type: str
"""
self._country_code = country_code
@property
def customer_type(self):
"""Gets the customer_type of this CustomerInformation.
|参数名称:客户类型,虚拟账号下,该字段无效。:-1:无类型0:个人1:企业客户刚注册的时候,没有具体的客户类型,为“-1:无类型”,客户可以在账号中心通过设置客户类型或者在实名认证的时候,选择对应的企业/个人实名认证来决定自己的类型。| |参数的约束及描述:客户类型,虚拟账号下,该字段无效。:-1:无类型0:个人1:企业客户刚注册的时候,没有具体的客户类型,为“-1:无类型”,客户可以在账号中心通过设置客户类型或者在实名认证的时候,选择对应的企业/个人实名认证来决定自己的类型。|
:return: The customer_type of this CustomerInformation.
:rtype: int
"""
return self._customer_type
@customer_type.setter
def customer_type(self, customer_type):
"""Sets the customer_type of this CustomerInformation.
|参数名称:客户类型,虚拟账号下,该字段无效。:-1:无类型0:个人1:企业客户刚注册的时候,没有具体的客户类型,为“-1:无类型”,客户可以在账号中心通过设置客户类型或者在实名认证的时候,选择对应的企业/个人实名认证来决定自己的类型。| |参数的约束及描述:客户类型,虚拟账号下,该字段无效。:-1:无类型0:个人1:企业客户刚注册的时候,没有具体的客户类型,为“-1:无类型”,客户可以在账号中心通过设置客户类型或者在实名认证的时候,选择对应的企业/个人实名认证来决定自己的类型。|
:param customer_type: The customer_type of this CustomerInformation.
:type: int
"""
self._customer_type = customer_type
@property
def is_frozen(self):
"""Gets the is_frozen of this CustomerInformation.
|参数名称:是否伙伴冻结,注意,只有转售子客户才能被伙伴冻结:0:否1:是| |参数的约束及描述:是否伙伴冻结,注意,只有转售子客户才能被伙伴冻结:0:否1:是|
:return: The is_frozen of this CustomerInformation.
:rtype: int
"""
return self._is_frozen
@is_frozen.setter
def is_frozen(self, is_frozen):
"""Sets the is_frozen of this CustomerInformation.
|参数名称:是否伙伴冻结,注意,只有转售子客户才能被伙伴冻结:0:否1:是| |参数的约束及描述:是否伙伴冻结,注意,只有转售子客户才能被伙伴冻结:0:否1:是|
:param is_frozen: The is_frozen of this CustomerInformation.
:type: int
"""
self._is_frozen = is_frozen
@property
def account_managers(self):
"""Gets the account_managers of this CustomerInformation.
|参数名称:客户经理名称列表,目前只支持1个| |参数约束以及描述:客户经理名称列表,目前只支持1个|
:return: The account_managers of this CustomerInformation.
:rtype: list[AccountManager]
"""
return self._account_managers
@account_managers.setter
def account_managers(self, account_managers):
"""Sets the account_managers of this CustomerInformation.
|参数名称:客户经理名称列表,目前只支持1个| |参数约束以及描述:客户经理名称列表,目前只支持1个|
:param account_managers: The account_managers of this CustomerInformation.
:type: list[AccountManager]
"""
self._account_managers = account_managers
@property
def xaccount_id(self):
"""Gets the xaccount_id of this CustomerInformation.
|参数名称:伙伴销售平台的用户唯一标识| |参数的约束及描述:该标识的具体值由伙伴分配|
:return: The xaccount_id of this CustomerInformation.
:rtype: str
"""
return self._xaccount_id
@xaccount_id.setter
def xaccount_id(self, xaccount_id):
"""Sets the xaccount_id of this CustomerInformation.
|参数名称:伙伴销售平台的用户唯一标识| |参数的约束及描述:该标识的具体值由伙伴分配|
:param xaccount_id: The xaccount_id of this CustomerInformation.
:type: str
"""
self._xaccount_id = xaccount_id
@property
def xaccount_type(self):
"""Gets the xaccount_type of this CustomerInformation.
|参数名称:华为分给合作伙伴的平台标识| |参数的约束及描述:该标识的具体值由华为分配|
:return: The xaccount_type of this CustomerInformation.
:rtype: str
"""
return self._xaccount_type
@xaccount_type.setter
def xaccount_type(self, xaccount_type):
"""Sets the xaccount_type of this CustomerInformation.
|参数名称:华为分给合作伙伴的平台标识| |参数的约束及描述:该标识的具体值由华为分配|
:param xaccount_type: The xaccount_type of this CustomerInformation.
:type: str
"""
self._xaccount_type = xaccount_type
@property
def customer_level(self):
"""Gets the customer_level of this CustomerInformation.
|参数名称:客户等级| |参数的约束及描述:V0、V1、V2、V3、V4、V5|
:return: The customer_level of this CustomerInformation.
:rtype: str
"""
return self._customer_level
@customer_level.setter
def customer_level(self, customer_level):
"""Sets the customer_level of this CustomerInformation.
|参数名称:客户等级| |参数的约束及描述:V0、V1、V2、V3、V4、V5|
:param customer_level: The customer_level of this CustomerInformation.
:type: str
"""
self._customer_level = customer_level
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CustomerInformation):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"huaweicloudsdkcore.utils.http_utils.sanitize_for_serialization",
"six.iteritems",
"sys.setdefaultencoding"
] |
[((14393, 14426), 'six.iteritems', 'six.iteritems', (['self.openapi_types'], {}), '(self.openapi_types)\n', (14406, 14426), False, 'import six\n'), ((15411, 15442), 'sys.setdefaultencoding', 'sys.setdefaultencoding', (['"""utf-8"""'], {}), "('utf-8')\n", (15433, 15442), False, 'import sys\n'), ((15469, 15501), 'huaweicloudsdkcore.utils.http_utils.sanitize_for_serialization', 'sanitize_for_serialization', (['self'], {}), '(self)\n', (15495, 15501), False, 'from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization\n')]
|
# -*- coding: utf-8 -*-
"""The main app module of the project."""
import sys
from PyQt5.QtWidgets import QApplication
from app.design.design import Window
def run() -> None:
"""Run GUI that is already connected to the backend"""
app = QApplication(sys.argv)
gui = Window()
gui.show()
sys.exit(app.exec_())
|
[
"PyQt5.QtWidgets.QApplication",
"app.design.design.Window"
] |
[((246, 268), 'PyQt5.QtWidgets.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (258, 268), False, 'from PyQt5.QtWidgets import QApplication\n'), ((279, 287), 'app.design.design.Window', 'Window', ([], {}), '()\n', (285, 287), False, 'from app.design.design import Window\n')]
|
#!/usr/bin/env python
import os
import ssl
import kopf
import kubernetes.client as k8s
import requests
VERSION = "v1alpha1"
GROUP = "est.mitre.org"
WELLKNOWN = "/.well-known/est"
ESTORDER_TEMPLATE = """---
apiVersion: est.mitre.org/v1alpha1
kind: EstOrder
metadata:
name: "{ordername}"
spec:
issuerRef:
name: "{issuername}"
kind: "{issuerkind}"
group: "est.mitre.org"
request: "{request}"
renewal: {renewal}
"""
class SSLContextAdapter(requests.adapters.HTTPAdapter):
"""Custom SSL adapter with additional restrictions"""
def init_poolmanager(self, *args, **kwargs):
"""init pool manager with custom SSLContext"""
context = requests.packages.urllib3.util.ssl_.create_urllib3_context(
ssl.PROTOCOL_TLSv1_2
)
context.verify_mode = ssl.CERT_REQUIRED
context.options = context.options | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1
context.check_hostname = True
return super(SSLContextAdapter, self).init_poolmanager(
*args, ssl_context=context, **kwargs
)
def session_factory(base_url, cadata=None):
"""return a requests.Session object with optional client cert and trust anchor"""
session = requests.Session()
session.mount(base_url, SSLContextAdapter(cadata=cadata))
return session
def get_issuer_from_resource(resource):
"""return the named EstIssuer/EstClusterIssuer if Ready"""
issuer_ref = resource["spec"]["issuerRef"]
kwargs = dict(
group=issuer_ref["group"],
version=VERSION,
plural=issuer_ref["kind"].lower() + "s",
name=issuer_ref["name"],
)
if kwargs["plural"] == "estissuers":
kwargs["namespace"] = resource["metadata"]["namespace"]
try:
api = k8s.CustomObjectsApi()
if kwargs.get("namespace"):
issuer = api.get_namespaced_custom_object(**kwargs)
else:
issuer = api.get_cluster_custom_object(**kwargs)
except k8s.exceptions.OpenApiException as err:
raise kopf.TemporaryError(eval(err.body)["message"])
if (
(issuer.get("status") is None)
or (issuer["status"].get("estissuer_create") is None)
or (issuer["status"]["estissuer_create"]["Ready"] != "True")
):
raise kopf.TemporaryError(f"{issuer_ref['name']} not ready")
kopf.info(
resource,
reason="Debugging",
message=f"get_issuer_from_resource: {issuer['metadata']['name']}",
)
return issuer
def get_secret_from_resource(resource):
"""return secret of type tls or basic-auth from secretName or None"""
secretName = resource["spec"]["secretName"]
namespace = resource["metadata"].get("namespace")
if not namespace and resource["kind"] == "EstClusterIssuer":
namespace = os.getenv("CLUSTER_SCOPE_NAMESPACE", "est-operator")
try:
api = k8s.CoreV1Api()
secret = api.read_namespaced_secret(secretName, namespace)
except k8s.exceptions.OpenApiException:
secret = None
if secret and secret.type not in ["kubernetes.io/basic-auth", "kubernetes.io/tls"]:
secret = None
kopf.info(
resource,
reason="Debugging",
message=f"get_secret_from_resource: {namespace}:{secretName} {secret is not None}",
)
return secret
def get_owner_by_kind(resource, kind_list):
"""get the first owner of any kind in kind_list from resource, if present"""
ownerReferences = resource["metadata"].get("ownerReferences", [])
(owner,) = [owner for owner in ownerReferences if owner["kind"] in kind_list]
if not owner:
kopf.info(
resource,
reason="Debugging",
message=f"get_owner_by_kind: {kind_list} not found",
)
return None
group, version = owner["apiVersion"].split("/")
namespace = resource["metadata"]["namespace"]
kwargs = dict(
group=group,
version=version,
plural=owner["kind"].lower() + "s",
name=owner["name"],
)
try:
api = k8s.CustomObjectsApi()
# ownerRefs don't have namespace attributes, so we have to try both.
# Most resources are namespaced, so do that first. Namespaced owners
# have to be in the same namespace.
try:
owner = api.get_namespaced_custom_object(namespace=namespace, **kwargs)
except k8s.exceptions.OpenApiException:
owner = api.get_cluster_custom_object(**kwargs)
except k8s.exceptions.OpenApiException as err:
raise kopf.TemporaryError(eval(err.body)["message"])
kopf.info(resource, reason="Debugging", message=f"get_owner_by_kind: {kwargs}")
return owner
|
[
"kopf.TemporaryError",
"requests.Session",
"kopf.info",
"kubernetes.client.CoreV1Api",
"kubernetes.client.CustomObjectsApi",
"requests.packages.urllib3.util.ssl_.create_urllib3_context",
"os.getenv"
] |
[((1214, 1232), 'requests.Session', 'requests.Session', ([], {}), '()\n', (1230, 1232), False, 'import requests\n'), ((2330, 2441), 'kopf.info', 'kopf.info', (['resource'], {'reason': '"""Debugging"""', 'message': 'f"""get_issuer_from_resource: {issuer[\'metadata\'][\'name\']}"""'}), '(resource, reason=\'Debugging\', message=\n f"get_issuer_from_resource: {issuer[\'metadata\'][\'name\']}")\n', (2339, 2441), False, 'import kopf\n'), ((3128, 3256), 'kopf.info', 'kopf.info', (['resource'], {'reason': '"""Debugging"""', 'message': 'f"""get_secret_from_resource: {namespace}:{secretName} {secret is not None}"""'}), "(resource, reason='Debugging', message=\n f'get_secret_from_resource: {namespace}:{secretName} {secret is not None}')\n", (3137, 3256), False, 'import kopf\n'), ((4576, 4655), 'kopf.info', 'kopf.info', (['resource'], {'reason': '"""Debugging"""', 'message': 'f"""get_owner_by_kind: {kwargs}"""'}), "(resource, reason='Debugging', message=f'get_owner_by_kind: {kwargs}')\n", (4585, 4655), False, 'import kopf\n'), ((676, 761), 'requests.packages.urllib3.util.ssl_.create_urllib3_context', 'requests.packages.urllib3.util.ssl_.create_urllib3_context', (['ssl.PROTOCOL_TLSv1_2'], {}), '(ssl.PROTOCOL_TLSv1_2\n )\n', (734, 761), False, 'import requests\n'), ((1761, 1783), 'kubernetes.client.CustomObjectsApi', 'k8s.CustomObjectsApi', ([], {}), '()\n', (1781, 1783), True, 'import kubernetes.client as k8s\n'), ((2271, 2325), 'kopf.TemporaryError', 'kopf.TemporaryError', (['f"""{issuer_ref[\'name\']} not ready"""'], {}), '(f"{issuer_ref[\'name\']} not ready")\n', (2290, 2325), False, 'import kopf\n'), ((2789, 2841), 'os.getenv', 'os.getenv', (['"""CLUSTER_SCOPE_NAMESPACE"""', '"""est-operator"""'], {}), "('CLUSTER_SCOPE_NAMESPACE', 'est-operator')\n", (2798, 2841), False, 'import os\n'), ((2865, 2880), 'kubernetes.client.CoreV1Api', 'k8s.CoreV1Api', ([], {}), '()\n', (2878, 2880), True, 'import kubernetes.client as k8s\n'), ((3606, 3703), 'kopf.info', 'kopf.info', (['resource'], {'reason': '"""Debugging"""', 'message': 'f"""get_owner_by_kind: {kind_list} not found"""'}), "(resource, reason='Debugging', message=\n f'get_owner_by_kind: {kind_list} not found')\n", (3615, 3703), False, 'import kopf\n'), ((4034, 4056), 'kubernetes.client.CustomObjectsApi', 'k8s.CustomObjectsApi', ([], {}), '()\n', (4054, 4056), True, 'import kubernetes.client as k8s\n')]
|
# Copyright (C) 2013-2014 <NAME>
# This file is part of fortrace - http://fortrace.fbi.h-da.de
# See the file 'docs/LICENSE' for copying permission.
"""
This file contain the
"""
from __future__ import absolute_import
try:
import logging
from fortrace.inputDevice.keyboard import KeyboardManagement
from fortrace.inputDevice.mouse import MouseManagement
from fortrace.utility.logger_helper import create_logger
except ImportError as ie:
raise Exception("application " + str(ie))
exit(1)
class InputDeviceManagement(object):
"""InputDeviceManagement class in which the execute command will go one level down,
finally to the last execute function. Every execute function will remove one keyword.
"""
def __init__(self, agent_object, logger=None):
self.keyboardManager = KeyboardManagement()
self.mouseManager = MouseManagement()
self.agent_object = agent_object
self.logger = logger
if self.logger is None:
self.logger = create_logger('interactionManager', logging.INFO)
def execute(self, command):
"""Continue the command execution
supported sub commands:
mouse
keyboard
"""
try:
self.logger.info("command: " + command)
com = command.split(" ")
if len(com) < 2:
return
if "mouse" in com[0]:
"""call the execute function from webBrowser"""
self.mouseManager.execute(" ".join(com[1:]))
elif "keyboard" in com[0]:
"""call the execute function from mailClient"""
self.keyboardManager.execute(" ".join(com[1:]))
else:
raise Exception("inputDevice " + com[0] + " not found!")
except Exception as e:
raise Exception("inputDeviceManagement->execute " + str(e))
|
[
"fortrace.utility.logger_helper.create_logger",
"fortrace.inputDevice.keyboard.KeyboardManagement",
"fortrace.inputDevice.mouse.MouseManagement"
] |
[((824, 844), 'fortrace.inputDevice.keyboard.KeyboardManagement', 'KeyboardManagement', ([], {}), '()\n', (842, 844), False, 'from fortrace.inputDevice.keyboard import KeyboardManagement\n'), ((873, 890), 'fortrace.inputDevice.mouse.MouseManagement', 'MouseManagement', ([], {}), '()\n', (888, 890), False, 'from fortrace.inputDevice.mouse import MouseManagement\n'), ((1020, 1069), 'fortrace.utility.logger_helper.create_logger', 'create_logger', (['"""interactionManager"""', 'logging.INFO'], {}), "('interactionManager', logging.INFO)\n", (1033, 1069), False, 'from fortrace.utility.logger_helper import create_logger\n')]
|
import hashlib
import inspect
from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError
from konstel import formats
from konstel import encodings
ALGORITHMS = hashlib.algorithms_available
ALPHABETS = formats.alphabets.keys()
BASE_ENCODINGS = {'base32', 'cbase32'}
ENCODINGS = {o[0] for o in inspect.getmembers(encodings, inspect.isfunction)}
FORMATS = {o[0] for o in inspect.getmembers(formats, inspect.isfunction)}
OUTPUT_TYPES = {'json', 'tsv', 'table'}
def load_scheme(yaml_text):
'''
Some optional keys have enforced default values, otherwise use dict.get()
'''
schema = MapPattern(
Str(), Map({
'description': Str(),
Optional('alias'): Str(),
'version': Str(),
'directives': MapPattern(
Str(), Map({
Optional('description'): Str(),
'formats': Seq(Enum(FORMATS)),
Optional('prepare'): Map({
Optional('remove_whitespace', default=False): Bool(),
Optional('remove_characters', default=['']): Seq(Str()),
Optional('strip_characters', default=['']): Seq(Str()),
}),
Optional('validate'): Map({
Optional('alphabet'): Enum(ALPHABETS),
Optional('min_length'): Int(),
Optional('max_length'): Int(),
}),
Optional('target'): Str(),
Optional('helper', default=False): Bool()
}),
),
'algorithm': Enum(ALGORITHMS),
'encodings': MapPattern(
Str(), Map({
'type': Enum(ENCODINGS),
Optional('length', default=0): Int(),
Optional('prefix', default=''): Str(),
Optional('separator'): Map({
'character': Str(),
'interval': Int()
})
})
)
})
)
return load(yaml_text, schema)
|
[
"strictyaml.Optional",
"strictyaml.Enum",
"strictyaml.Str",
"strictyaml.Int",
"strictyaml.Bool",
"konstel.formats.alphabets.keys",
"strictyaml.load",
"inspect.getmembers"
] |
[((249, 273), 'konstel.formats.alphabets.keys', 'formats.alphabets.keys', ([], {}), '()\n', (271, 273), False, 'from konstel import formats\n'), ((2136, 2159), 'strictyaml.load', 'load', (['yaml_text', 'schema'], {}), '(yaml_text, schema)\n', (2140, 2159), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((340, 389), 'inspect.getmembers', 'inspect.getmembers', (['encodings', 'inspect.isfunction'], {}), '(encodings, inspect.isfunction)\n', (358, 389), False, 'import inspect\n'), ((416, 463), 'inspect.getmembers', 'inspect.getmembers', (['formats', 'inspect.isfunction'], {}), '(formats, inspect.isfunction)\n', (434, 463), False, 'import inspect\n'), ((662, 667), 'strictyaml.Str', 'Str', ([], {}), '()\n', (665, 667), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((721, 738), 'strictyaml.Optional', 'Optional', (['"""alias"""'], {}), "('alias')\n", (729, 738), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((702, 707), 'strictyaml.Str', 'Str', ([], {}), '()\n', (705, 707), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((740, 745), 'strictyaml.Str', 'Str', ([], {}), '()\n', (743, 745), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((770, 775), 'strictyaml.Str', 'Str', ([], {}), '()\n', (773, 775), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1671, 1687), 'strictyaml.Enum', 'Enum', (['ALGORITHMS'], {}), '(ALGORITHMS)\n', (1675, 1687), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((831, 836), 'strictyaml.Str', 'Str', ([], {}), '()\n', (834, 836), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1742, 1747), 'strictyaml.Str', 'Str', ([], {}), '()\n', (1745, 1747), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((864, 887), 'strictyaml.Optional', 'Optional', (['"""description"""'], {}), "('description')\n", (872, 887), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((967, 986), 'strictyaml.Optional', 'Optional', (['"""prepare"""'], {}), "('prepare')\n", (975, 986), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1277, 1297), 'strictyaml.Optional', 'Optional', (['"""validate"""'], {}), "('validate')\n", (1285, 1297), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1522, 1540), 'strictyaml.Optional', 'Optional', (['"""target"""'], {}), "('target')\n", (1530, 1540), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1569, 1602), 'strictyaml.Optional', 'Optional', (['"""helper"""'], {'default': '(False)'}), "('helper', default=False)\n", (1577, 1602), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((889, 894), 'strictyaml.Str', 'Str', ([], {}), '()\n', (892, 894), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1542, 1547), 'strictyaml.Str', 'Str', ([], {}), '()\n', (1545, 1547), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1604, 1610), 'strictyaml.Bool', 'Bool', ([], {}), '()\n', (1608, 1610), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1820, 1849), 'strictyaml.Optional', 'Optional', (['"""length"""'], {'default': '(0)'}), "('length', default=0)\n", (1828, 1849), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1878, 1908), 'strictyaml.Optional', 'Optional', (['"""prefix"""'], {'default': '""""""'}), "('prefix', default='')\n", (1886, 1908), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1937, 1958), 'strictyaml.Optional', 'Optional', (['"""separator"""'], {}), "('separator')\n", (1945, 1958), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1783, 1798), 'strictyaml.Enum', 'Enum', (['ENCODINGS'], {}), '(ENCODINGS)\n', (1787, 1798), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1851, 1856), 'strictyaml.Int', 'Int', ([], {}), '()\n', (1854, 1856), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1910, 1915), 'strictyaml.Str', 'Str', ([], {}), '()\n', (1913, 1915), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((931, 944), 'strictyaml.Enum', 'Enum', (['FORMATS'], {}), '(FORMATS)\n', (935, 944), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1018, 1062), 'strictyaml.Optional', 'Optional', (['"""remove_whitespace"""'], {'default': '(False)'}), "('remove_whitespace', default=False)\n", (1026, 1062), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1096, 1139), 'strictyaml.Optional', 'Optional', (['"""remove_characters"""'], {'default': "['']"}), "('remove_characters', default=[''])\n", (1104, 1139), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1177, 1219), 'strictyaml.Optional', 'Optional', (['"""strip_characters"""'], {'default': "['']"}), "('strip_characters', default=[''])\n", (1185, 1219), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1064, 1070), 'strictyaml.Bool', 'Bool', ([], {}), '()\n', (1068, 1070), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1329, 1349), 'strictyaml.Optional', 'Optional', (['"""alphabet"""'], {}), "('alphabet')\n", (1337, 1349), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1392, 1414), 'strictyaml.Optional', 'Optional', (['"""min_length"""'], {}), "('min_length')\n", (1400, 1414), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1447, 1469), 'strictyaml.Optional', 'Optional', (['"""max_length"""'], {}), "('max_length')\n", (1455, 1469), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1351, 1366), 'strictyaml.Enum', 'Enum', (['ALPHABETS'], {}), '(ALPHABETS)\n', (1355, 1366), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1416, 1421), 'strictyaml.Int', 'Int', ([], {}), '()\n', (1419, 1421), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1471, 1476), 'strictyaml.Int', 'Int', ([], {}), '()\n', (1474, 1476), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((2003, 2008), 'strictyaml.Str', 'Str', ([], {}), '()\n', (2006, 2008), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((2046, 2051), 'strictyaml.Int', 'Int', ([], {}), '()\n', (2049, 2051), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1145, 1150), 'strictyaml.Str', 'Str', ([], {}), '()\n', (1148, 1150), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n'), ((1225, 1230), 'strictyaml.Str', 'Str', ([], {}), '()\n', (1228, 1230), False, 'from strictyaml import load, Bool, Int, Str, Seq, Map, Enum, MapPattern, Optional, YAMLValidationError\n')]
|
# -*- coding: utf-8 -*-
# Copyright (C) 2004-2016 Mag. <NAME>. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. <EMAIL>
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# CAL.Date_Time
#
# Purpose
# Wrapper around `datetime.datetime`
#
# Revision Dates
# 15-Oct-2004 (CT) Creation
# 17-Oct-2004 (CT) Adapted to changes in `_DTW_` and `Delta`
# 28-Dec-2005 (MG) Static method `from_ical` added
# 30-Nov-2006 (CT) `__getattr__` for `CJD`, `MJD`, `TJD`, "CJS", "MJS",
# and "TJS" added
# 10-Dec-2006 (CT) `from_julian` added
# 11-Dec-2006 (CT) `from_julian` corrected
# 11-Dec-2006 (CT) `__getattr__` changed to `setattr` the modified value
# 11-Nov-2007 (CT) `sidereal_time` added
# 3-Jan-2008 (CT) `time_pattern` added and `_from_string_match_kw` redefined
# 7-Jan-2008 (CT) `as_utc` added
# 31-Mar-2008 (CT) `combine` added
# 16-Jun-2010 (CT) s/print/pyk.fprint/
# 29-Mar-2012 (CT) Add support for `tzinfo`; factor `as_local`; use
# `CAL.Time._from_string_match_kw`
# 15-Sep-2014 (CT) Add `_Date_Time_Arg_` to `CAO` as `[Arg|Opt].Date_Time`
# 19-Sep-2014 (CT) Add `from_string_x`
# 6-May-2015 (CT) Add tests for `jsonified`
# 29-Mar-2016 (CT) Derive `_Date_Time_Arg_` from `CAO.Opt.Date`, not `.Str`
# 21-Apr-2016 (CT) Add check for tail to `_from_string_match_kw`
# 21-Apr-2016 (CT) Redefine `from_string` to pass `check_tail=False`
# 26-Sep-2016 (CT) Add `as_date`, `as_time`
# 26-Sep-2016 (CT) Move `sidereal_time` to `CAL.Sky.Earth`
# ««revision-date»»···
#--
from _TFL import TFL
from _CAL import CAL
import _CAL.Date
import _CAL.Time
from _TFL._Meta.Once_Property import Once_Property
import _TFL.CAO
from _TFL.pyk import pyk
from _TFL.Regexp import *
import datetime
class Date_Time (CAL.Date, CAL.Time) :
"""Model a (gregorian) date plus time.
>>> from _CAL.Delta import Date_Time_Delta as Delta
>>> from _TFL.json_dump import to_string as jsonified
>>> d = Date_Time (2004, 10, 15, 16, 3, 14)
>>> print (d)
2004-10-15 16:03:14
>>> print (jsonified ([d, Delta (3)]))
["2004-10-15T16:03:14", "3 days, 0:00:00"]
>>> d.year, d.month, d.day, d.datetime, d.week, d.weekday, d.ordinal
(2004, 10, 15, datetime.datetime(2004, 10, 15, 16, 3, 14), 42, 4, 731869)
>>> d.month_name
'Oct'
>>> d = d - Delta (3)
>>> d.year, d.month, d.day, d.datetime, d.week, d.weekday, d.ordinal
(2004, 10, 12, datetime.datetime(2004, 10, 12, 16, 3, 14), 42, 1, 731866)
>>> d = d - 1
>>> d.year, d.month, d.day, d.datetime, d.week, d.weekday, d.ordinal
(2004, 10, 11, datetime.datetime(2004, 10, 11, 16, 3, 14), 42, 0, 731865)
>>> d1 = d2 = Date_Time (2004, 10, 15, 16, 3, 14)
>>> id (d1) == id (d2)
True
>>> d1 += 1
>>> id (d1) == id (d2)
False
>>> print (d2 - d1)
-1 day, 0:00:00
>>> d = Date_Time (2006, 12, 10, 12, 26, 30)
>>> print (jsonified ([d1, d, d - d1]))
["2004-10-16T16:03:14", "2006-12-10T12:26:30", "784 days, 20:23:16"]
>>> d.TJD, d.TJS
(14079.518402777778, 1216470390)
>>> d
Date_Time (2006, 12, 10, 12, 26, 30, 0)
>>> Date_Time.from_julian (14079, kind = "TJD")
Date_Time (2006, 12, 10, 0, 0, 0, 0)
>>> Date_Time.from_julian (14079.518402777778, kind = "TJD")
Date_Time (2006, 12, 10, 12, 26, 30, 0)
>>> Date_Time.from_julian (1216470390, kind = "TJS")
Date_Time (2006, 12, 10, 12, 26, 30, 0)
>>> Date_Time (1988,6,19,12).JD
2447332.0
>>> Date_Time (1988,1,27).JD
2447187.5
>>> Date_Time (1999,1,1).JD
2451179.5
>>> Date_Time (1999,1,1,12).JD
2451180.0
>>> Date_Time (2000,1,1,12).JD
2451545.0
>>> dt = Date_Time (2008, 1, 7, 10, 16, 42, 0)
>>> dt
Date_Time (2008, 1, 7, 10, 16, 42, 0)
>>> dt.as_utc ()
Date_Time (2008, 1, 7, 9, 16, 42, 0)
>>> dt = Date_Time (2008, 4, 7, 10, 16, 42, 0)
>>> dt
Date_Time (2008, 4, 7, 10, 16, 42, 0)
>>> dt.as_utc ()
Date_Time (2008, 4, 7, 8, 16, 42, 0)
>>> dt = Date_Time.from_string ("2012-03-29 10:06:46 -0400")
>>> dt
Date_Time (2012, 3, 29, 10, 6, 46, 0)
>>> dt.as_local ()
Date_Time (2012, 3, 29, 16, 6, 46, 0)
>>> dt.as_utc ()
Date_Time (2012, 3, 29, 14, 6, 46, 0)
>>> td = Date_Time (2014, 9, 19, 17, 23, 42)
>>> tt = CAL.Time (17, 23, 42)
>>> print (jsonified ((td, tt)))
["2014-09-19T17:23:42", "17:23:42"]
>>> Date_Time.from_string_x ("2017/09/19 17:42:23")
Date_Time (2017, 9, 19, 17, 42, 23, 0)
>>> td
Date_Time (2014, 9, 19, 17, 23, 42, 0)
>>> tt
Time (17, 23, 42, 0)
>>> Date_Time.from_string_x ("+15m", date = td)
Date_Time (2014, 9, 19, 17, 38, 42, 0)
>>> Date_Time.from_string_x ("+3d", date = td)
Date_Time (2014, 9, 22, 17, 23, 42, 0)
>>> Date_Time.from_string_x ("15:40", date = td, time = tt)
Date_Time (2014, 9, 19, 15, 40, 0, 0)
>>> Date_Time.from_string_x ("15:40", date = td, time = tt, future = True)
Date_Time (2014, 9, 20, 15, 40, 0, 0)
>>> Date_Time.from_string_x ("18:40", date = td, time = tt)
Date_Time (2014, 9, 19, 18, 40, 0, 0)
>>> Date_Time.from_string_x ("18:40", date = td, time = tt, future = True)
Date_Time (2014, 9, 19, 18, 40, 0, 0)
"""
_Type = datetime.datetime
_init_arg_names = \
CAL.Date._init_arg_names + CAL.Time._init_arg_names
_kind = "datetime"
_timetuple_slice = lambda s, tt : tt [:6] + (0, )
time_pattern = Regexp \
( r"(?P<hour> \d{2,2})"
r":"
r"(?P<minute> \d{2,2})"
r"(?: :"
r"(?P<second> \d{2,2})"
r"(?: \."
r"(?P<microsecond> \d+)"
r")?"
r")?"
r"(?: \s"
r"(?P<tzinfo> [-+]\d{4,4})"
r")?"
, flags = re.VERBOSE | re.IGNORECASE
)
from _CAL.Delta import Date_Time_Delta as Delta
def as_date (self) :
"""Return `self` converted to pure `Date`."""
return CAL.Date (date = self._body.date ())
# end def as_date
def as_local (self) :
"""Return `self` converted to local time."""
from dateutil.tz import tzlocal
local = self
if not local.tzinfo :
local = self.replace (tzinfo = tzlocal ())
return self.__class__ \
(** {self._kind : local._body.astimezone (tzlocal ())})
# end def as_local
def as_time (self) :
"""Return `self` converted to pure `Time`."""
return CAL.Time (time = self._body.time ())
# end def as_time
def as_utc (self) :
"""Return `self` converted to `UTC`."""
local = self.as_local ()
delta = self.Delta (seconds = local._body.utcoffset ().seconds)
return local - delta
# end def as_utc
@classmethod
def combine (cls, date, time) :
"""Create a `Date_Time` object from `date` and `time` objects."""
if isinstance (date, CAL._DTW_) :
date = date._body
if isinstance (time, CAL._DTW_) :
time = time._body
return cls (** {cls._kind : datetime.datetime.combine (date, time)})
# end def combine
@staticmethod
def from_ical (ical) :
for p_cls, tgl_cls in \
( (datetime.datetime, CAL.Date_Time)
, (datetime.date, CAL.Date)
, (datetime.timedelta, CAL.Time_Delta)
) :
if isinstance (ical.dt, p_cls) :
return tgl_cls (** {tgl_cls._kind : ical.dt})
# end def from_ical
@classmethod
def from_string (cls, s) :
return cls.__c_super.from_string (s, check_tail = False)
# end def from_string
@classmethod
def from_string_x (cls, s, ** kw) :
"""Convert `s` to `Date_Time`.
`s` can be a valid string representation of
* a date and time value
* a date and time delta value (relative to `now` at the time of call)
* a time value (relative to today's date at the time of call)
Possible keyword arguments are:
* `future`: if `s` is a time value smaller than `now`, force
`result` to tomorrow
* `date`: apply delta or time value `s` to `date` instead of `now`
* `time`: compare time value `s` to `time` instead of `now`
"""
v = s.strip ()
if v.startswith (("+", "-")) :
return cls._from_string_delta (v, ** kw)
else :
try :
return cls.from_string (v)
except ValueError :
return cls._from_string_time (v, ** kw)
# end def from_string_x
@classmethod
def from_julian (cls, jd, kind = "CJD") :
k = kind
if kind.endswith ("S") :
jd /= 86400.0
k = kind [:-1] + "D"
days = int (jd)
seconds = (jd - days) * 86400
result = super (Date_Time, cls).from_julian (days, kind = k)
return result + CAL.Time_Delta (seconds = seconds)
# end def from_ordinal
@classmethod
def _from_string_delta (cls, s, ** kw) :
now = kw.get ("date") or cls ()
delta = cls.Delta.from_string (s)
return now + delta
# end def _from_string_delta
@classmethod
def _from_string_match_kw (cls, s, match) :
assert match
kw = super (Date_Time, cls)._from_string_match_kw (s, match)
t = s [match.end () :].lstrip ().lstrip ("T")
if t :
match = cls.time_pattern.match (t)
if match and match.end () == len (t.rstrip ()) :
kw.update (CAL.Time._from_string_match_kw (t, match))
else :
raise ValueError (s)
return kw
# end def _from_string_match_kw
@classmethod
def _from_string_time (cls, s, ** kw) :
future = kw.get ("future")
date = kw.get ("date") or CAL.Date ()
now = kw.get ("time") or CAL.Time ()
time = CAL.Time.from_string (s)
if future and time < now :
date += 1
return cls.combine (date, time)
# end def _from_string_time
def __getattr__ (self, name) :
result = self.__super.__getattr__ (name)
if name in self.JD_offset :
if name.endswith ("S") :
result += self.seconds
else :
result += (self.seconds / 86400.)
setattr (self, name, result)
return result
# end def __getattr__
# end class Date_Time
class Date_Time_M (CAL._Mutable_DTW_) :
"""Mutable datetime object
>>> from _TFL.json_dump import to_string as jsonified
>>> d1 = d2 = Date_Time_M (2004, 10, 15, 16, 3, 14)
>>> print (d1, d2)
2004-10-15 16:03:14 2004-10-15 16:03:14
>>> id (d1) == id (d2)
True
>>> d1 += 1
>>> print (d1, d2)
2004-10-16 16:03:14 2004-10-16 16:03:14
>>> id (d1) == id (d2)
True
>>> print (jsonified ((d1, d2)))
["2004-10-16T16:03:14", "2004-10-16T16:03:14"]
"""
Class = Date_Time
# end class Date_Time_M
class _Date_Time_Arg_ (TFL.CAO.Opt.Date) :
"""Argument or option with a (calendary) date/time value"""
_real_name = "Date_Time"
_CAL_Type = Date_Time
# end class _Date_Time_Arg_
if __name__ != "__main__" :
CAL._Export ("*")
### __END__ CAL.Date_Time
|
[
"_CAL.CAL.Time.from_string",
"_CAL.CAL.Date",
"_CAL.CAL._Export",
"dateutil.tz.tzlocal",
"_CAL.CAL.Time._from_string_match_kw",
"_CAL.CAL.Time_Delta",
"_CAL.CAL.Time",
"datetime.datetime.combine"
] |
[((12002, 12018), '_CAL.CAL._Export', 'CAL._Export', (['"""*"""'], {}), "('*')\n", (12013, 12018), False, 'from _CAL import CAL\n'), ((10648, 10671), '_CAL.CAL.Time.from_string', 'CAL.Time.from_string', (['s'], {}), '(s)\n', (10668, 10671), False, 'from _CAL import CAL\n'), ((9651, 9682), '_CAL.CAL.Time_Delta', 'CAL.Time_Delta', ([], {'seconds': 'seconds'}), '(seconds=seconds)\n', (9665, 9682), False, 'from _CAL import CAL\n'), ((10569, 10579), '_CAL.CAL.Date', 'CAL.Date', ([], {}), '()\n', (10577, 10579), False, 'from _CAL import CAL\n'), ((10619, 10629), '_CAL.CAL.Time', 'CAL.Time', ([], {}), '()\n', (10627, 10629), False, 'from _CAL import CAL\n'), ((6936, 6945), 'dateutil.tz.tzlocal', 'tzlocal', ([], {}), '()\n', (6943, 6945), False, 'from dateutil.tz import tzlocal\n'), ((7764, 7801), 'datetime.datetime.combine', 'datetime.datetime.combine', (['date', 'time'], {}), '(date, time)\n', (7789, 7801), False, 'import datetime\n'), ((10281, 10321), '_CAL.CAL.Time._from_string_match_kw', 'CAL.Time._from_string_match_kw', (['t', 'match'], {}), '(t, match)\n', (10311, 10321), False, 'from _CAL import CAL\n'), ((7034, 7043), 'dateutil.tz.tzlocal', 'tzlocal', ([], {}), '()\n', (7041, 7043), False, 'from dateutil.tz import tzlocal\n')]
|
import json
from unittest.mock import ANY, MagicMock, Mock, call, patch
import pytest
import tornado
from jupyterlab_git.git import Git
from jupyterlab_git.handlers import NAMESPACE, setup_handlers, GitHandler
from .testutils import assert_http_error, maybe_future
def test_mapping_added():
mock_web_app = Mock()
mock_web_app.settings = {"base_url": "nb_base_url"}
setup_handlers(mock_web_app)
mock_web_app.add_handlers.assert_called_once_with(".*", ANY)
@pytest.mark.parametrize(
"path, with_cm", (("url", False), ("url/to/path", False), ("url/to/path", True))
)
def test_GitHandler_url2localpath(path, with_cm, jp_web_app, jp_root_dir):
req = tornado.httputil.HTTPServerRequest()
req.connection = MagicMock()
handler = GitHandler(jp_web_app, req)
if with_cm:
assert (
str(jp_root_dir / path),
handler.contents_manager,
) == handler.url2localpath(path, with_cm)
else:
assert str(jp_root_dir / path) == handler.url2localpath(path, with_cm)
@patch("jupyterlab_git.handlers.GitAllHistoryHandler.git", spec=Git)
async def test_all_history_handler_localbranch(mock_git, jp_fetch, jp_root_dir):
# Given
show_top_level = {"code": 0, "path": "foo"}
branch = "branch_foo"
log = "log_foo"
status = "status_foo"
local_path = jp_root_dir / "test_path"
mock_git.show_top_level.return_value = maybe_future(show_top_level)
mock_git.branch.return_value = maybe_future(branch)
mock_git.log.return_value = maybe_future(log)
mock_git.status.return_value = maybe_future(status)
# When
body = {"history_count": 25}
response = await jp_fetch(
NAMESPACE, local_path.name, "all_history", body=json.dumps(body), method="POST"
)
# Then
mock_git.show_top_level.assert_called_with(str(local_path))
mock_git.branch.assert_called_with(str(local_path))
mock_git.log.assert_called_with(str(local_path), 25)
mock_git.status.assert_called_with(str(local_path))
assert response.code == 200
payload = json.loads(response.body)
assert payload == {
"code": show_top_level["code"],
"data": {
"show_top_level": show_top_level,
"branch": branch,
"log": log,
"status": status,
},
}
@patch("jupyterlab_git.git.execute")
async def test_git_show_prefix(mock_execute, jp_fetch, jp_root_dir):
# Given
path = "path/to/repo"
local_path = jp_root_dir / "test_path"
mock_execute.return_value = maybe_future((0, str(path), ""))
# When
response = await jp_fetch(
NAMESPACE,
local_path.name + "/subfolder",
"show_prefix",
body="{}",
method="POST",
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["path"] == str(path)
mock_execute.assert_has_calls(
[
call(
["git", "rev-parse", "--show-prefix"],
cwd=str(local_path / "subfolder"),
),
]
)
@patch("jupyterlab_git.git.execute")
async def test_git_show_prefix_not_a_git_repo(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
mock_execute.return_value = maybe_future(
(128, "", "fatal: not a git repository (or any")
)
# When
response = await jp_fetch(
NAMESPACE,
local_path.name + "/subfolder",
"show_prefix",
body="{}",
method="POST",
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["path"] is None
mock_execute.assert_has_calls(
[
call(
["git", "rev-parse", "--show-prefix"],
cwd=str(local_path / "subfolder"),
),
]
)
@patch("jupyterlab_git.git.execute")
async def test_git_show_top_level(mock_execute, jp_fetch, jp_root_dir):
# Given
path = "path/to/repo"
local_path = jp_root_dir / "test_path"
mock_execute.return_value = maybe_future((0, str(path), ""))
# When
response = await jp_fetch(
NAMESPACE,
local_path.name + "/subfolder",
"show_top_level",
body="{}",
method="POST",
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["path"] == str(path)
mock_execute.assert_has_calls(
[
call(
["git", "rev-parse", "--show-toplevel"],
cwd=str(local_path / "subfolder"),
),
]
)
@patch("jupyterlab_git.git.execute")
async def test_git_show_top_level_not_a_git_repo(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
mock_execute.return_value = maybe_future(
(128, "", "fatal: not a git repository (or any")
)
# When
response = await jp_fetch(
NAMESPACE,
local_path.name + "/subfolder",
"show_top_level",
body="{}",
method="POST",
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["path"] is None
mock_execute.assert_has_calls(
[
call(
["git", "rev-parse", "--show-toplevel"],
cwd=str(local_path / "subfolder"),
),
]
)
@patch("jupyterlab_git.handlers.GitBranchHandler.git", spec=Git)
async def test_branch_handler_localbranch(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
branch = {
"code": 0,
"branches": [
{
"is_current_branch": True,
"is_remote_branch": False,
"name": "feature-foo",
"upstream": "origin/feature-foo",
"top_commit": "abcdefghijklmnopqrstuvwxyz01234567890123",
"tag": None,
},
{
"is_current_branch": False,
"is_remote_branch": False,
"name": "master",
"upstream": "origin/master",
"top_commit": "abcdefghijklmnopqrstuvwxyz01234567890123",
"tag": None,
},
{
"is_current_branch": False,
"is_remote_branch": False,
"name": "feature-bar",
"upstream": None,
"top_commit": "01234567899999abcdefghijklmnopqrstuvwxyz",
"tag": None,
},
{
"is_current_branch": False,
"is_remote_branch": True,
"name": "origin/feature-foo",
"upstream": None,
"top_commit": "abcdefghijklmnopqrstuvwxyz01234567890123",
"tag": None,
},
{
"is_current_branch": False,
"is_remote_branch": True,
"name": "origin/master",
"upstream": None,
"top_commit": "abcdefghijklmnopqrstuvwxyz01234567890123",
"tag": None,
},
],
}
mock_git.branch.return_value = maybe_future(branch)
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "branch", body="{}", method="POST"
)
# Then
mock_git.branch.assert_called_with(str(local_path))
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0, "branches": branch["branches"]}
@patch("jupyterlab_git.handlers.GitLogHandler.git", spec=Git)
async def test_log_handler(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
log = {"code": 0, "commits": []}
mock_git.log.return_value = maybe_future(log)
# When
body = {"history_count": 20}
response = await jp_fetch(
NAMESPACE, local_path.name, "log", body=json.dumps(body), method="POST"
)
# Then
mock_git.log.assert_called_with(str(local_path), 20)
assert response.code == 200
payload = json.loads(response.body)
assert payload == log
@patch("jupyterlab_git.handlers.GitLogHandler.git", spec=Git)
async def test_log_handler_no_history_count(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
log = {"code": 0, "commits": []}
mock_git.log.return_value = maybe_future(log)
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "log", body="{}", method="POST"
)
# Then
mock_git.log.assert_called_with(str(local_path), 25)
assert response.code == 200
payload = json.loads(response.body)
assert payload == log
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_localbranch(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
mock_git.get_current_branch.return_value = maybe_future("localbranch")
mock_git.get_upstream_branch.return_value = maybe_future(
{"code": 0, "remote_short_name": ".", "remote_branch": "localbranch"}
)
mock_git.push.return_value = maybe_future({"code": 0})
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "push", body="{}", method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "localbranch")
mock_git.push.assert_called_with(
".", "HEAD:localbranch", str(local_path), None, False
)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_remotebranch(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
mock_git.get_current_branch.return_value = maybe_future("foo/bar")
upstream = {
"code": 0,
"remote_short_name": "origin/something",
"remote_branch": "remote-branch-name",
}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.push.return_value = maybe_future({"code": 0})
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "push", body="{}", method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo/bar")
mock_git.push.assert_called_with(
"origin/something", "HEAD:remote-branch-name", str(local_path), None, False
)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_noupstream(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {
"code": 128,
"command": "",
"message": "fatal: no upstream configured for branch 'foo'",
}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future({"options": dict()})
mock_git.remote_show.return_value = maybe_future({})
mock_git.push.return_value = maybe_future({"code": 0})
# When
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(NAMESPACE, local_path.name, "push", body="{}", method="POST")
response = e.value.response
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo")
mock_git.config.assert_called_with(str(local_path))
mock_git.remote_show.assert_called_with(str(local_path))
mock_git.push.assert_not_called()
assert response.code == 500
payload = json.loads(response.body)
assert payload == {
"code": 128,
"message": "fatal: The current branch foo has no upstream branch.",
"remotes": list(),
}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_multipleupstream(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
remotes = ["origin", "upstream"]
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {"code": -1, "message": "oups"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future({"options": dict()})
mock_git.remote_show.return_value = maybe_future({"remotes": remotes})
mock_git.push.return_value = maybe_future({"code": 0})
# When
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(NAMESPACE, local_path.name, "push", body="{}", method="POST")
response = e.value.response
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo")
mock_git.config.assert_called_with(str(local_path))
mock_git.remote_show.assert_called_with(str(local_path))
mock_git.push.assert_not_called()
assert response.code == 500
payload = json.loads(response.body)
assert payload == {
"code": 128,
"message": "fatal: The current branch foo has no upstream branch.",
"remotes": remotes,
}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_noupstream_unique_remote(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
remote = "origin"
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {"code": -1, "message": "oups"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future({"options": dict()})
mock_git.remote_show.return_value = maybe_future({"remotes": [remote]})
mock_git.push.return_value = maybe_future({"code": 0})
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "push", body="{}", method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo")
mock_git.config.assert_called_with(str(local_path))
mock_git.remote_show.assert_called_with(str(local_path))
mock_git.push.assert_called_with(
remote, "foo", str(local_path), None, set_upstream=True
)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_noupstream_pushdefault(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
remote = "rorigin"
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {"code": -1, "message": "oups"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future(
{"options": {"remote.pushdefault": remote}}
)
mock_git.remote_show.return_value = maybe_future({"remotes": [remote, "upstream"]})
mock_git.push.return_value = maybe_future({"code": 0})
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "push", body="{}", method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo")
mock_git.config.assert_called_with(str(local_path))
mock_git.remote_show.assert_called_with(str(local_path))
mock_git.push.assert_called_with(
remote, "foo", str(local_path), None, set_upstream=True
)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_noupstream_pass_remote_nobranch(
mock_git, jp_fetch, jp_root_dir
):
# Given
local_path = jp_root_dir / "test_path"
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {"code": -1, "message": "oups"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future({"options": dict()})
mock_git.remote_show.return_value = maybe_future({})
mock_git.push.return_value = maybe_future({"code": 0})
remote = "online"
# When
body = {"remote": remote}
response = await jp_fetch(
NAMESPACE, local_path.name, "push", body=json.dumps(body), method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo")
mock_git.config.assert_not_called()
mock_git.remote_show.assert_not_called()
mock_git.push.assert_called_with(remote, "HEAD:foo", str(local_path), None, True)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitPushHandler.git", spec=Git)
async def test_push_handler_noupstream_pass_remote_branch(
mock_git, jp_fetch, jp_root_dir
):
# Given
local_path = jp_root_dir / "test_path"
mock_git.get_current_branch.return_value = maybe_future("foo")
upstream = {"code": -1, "message": "oups"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
mock_git.config.return_value = maybe_future({"options": dict()})
mock_git.remote_show.return_value = maybe_future({})
mock_git.push.return_value = maybe_future({"code": 0})
remote = "online"
remote_branch = "onfoo"
# When
body = {"remote": "/".join((remote, remote_branch))}
response = await jp_fetch(
NAMESPACE, local_path.name, "push", body=json.dumps(body), method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo")
mock_git.config.assert_not_called()
mock_git.remote_show.assert_not_called()
mock_git.push.assert_called_with(
remote, "HEAD:" + remote_branch, str(local_path), None, True
)
assert response.code == 200
payload = json.loads(response.body)
assert payload == {"code": 0}
@patch("jupyterlab_git.handlers.GitUpstreamHandler.git", spec=Git)
async def test_upstream_handler_forward_slashes(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
mock_git.get_current_branch.return_value = maybe_future("foo/bar")
upstream = {
"code": 0,
"remote_short_name": "origin/something",
"remote_branch": "foo/bar",
}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "upstream", body="{}", method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo/bar")
assert response.code == 200
payload = json.loads(response.body)
assert payload == upstream
@patch("jupyterlab_git.handlers.GitUpstreamHandler.git", spec=Git)
async def test_upstream_handler_localbranch(mock_git, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
mock_git.get_current_branch.return_value = maybe_future("foo/bar")
upstream = {"code": 0, "remote_short_name": ".", "remote_branch": "foo/bar"}
mock_git.get_upstream_branch.return_value = maybe_future(upstream)
# When
response = await jp_fetch(
NAMESPACE, local_path.name, "upstream", body="{}", method="POST"
)
# Then
mock_git.get_current_branch.assert_called_with(str(local_path))
mock_git.get_upstream_branch.assert_called_with(str(local_path), "foo/bar")
assert response.code == 200
payload = json.loads(response.body)
assert payload == upstream
@patch("jupyterlab_git.git.execute")
async def test_content(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/file"
content = "dummy content file\nwith multiple lines"
mock_execute.side_effect = [
maybe_future((0, "1\t1\t{}".format(filename), "")),
maybe_future((0, content, "")),
]
# When
body = {
"filename": filename,
"reference": {"git": "previous"},
}
response = await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["content"] == content
mock_execute.assert_has_calls(
[
call(
["git", "show", "{}:{}".format("previous", filename)],
cwd=str(local_path),
),
],
)
@patch("jupyterlab_git.git.execute")
async def test_content_working(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/file"
content = "dummy content file\nwith multiple lines"
mock_execute.side_effect = [
maybe_future((0, content, "")),
]
dummy_file = local_path / filename
dummy_file.parent.mkdir(parents=True)
dummy_file.write_text(content)
# When
body = {
"filename": filename,
"reference": {"special": "WORKING"},
}
response = await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["content"] == content
@patch("jupyterlab_git.git.execute")
async def test_content_index(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/file"
content = "dummy content file\nwith multiple lines"
mock_execute.side_effect = [
maybe_future((0, "1\t1\t{}".format(filename), "")),
maybe_future((0, content, "")),
]
# When
body = {
"filename": filename,
"reference": {"special": "INDEX"},
}
response = await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["content"] == content
mock_execute.assert_has_calls(
[
call(
["git", "show", "{}:{}".format("", filename)],
cwd=str(local_path),
),
],
)
@patch("jupyterlab_git.git.execute")
async def test_content_unknown_special(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/file"
content = "dummy content file\nwith multiple lines"
mock_execute.side_effect = [
maybe_future((0, "1\t1\t{}".format(filename), "")),
maybe_future((0, content, "")),
]
# When
body = {
"filename": filename,
"reference": {"special": "unknown"},
}
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
assert_http_error(e, 500, expected_message="unknown special ref")
@patch("jupyterlab_git.git.execute")
async def test_content_show_handled_error(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/file"
mock_execute.return_value = maybe_future(
(
-1,
"",
"fatal: Path '{}' does not exist (neither on disk nor in the index)".format(
filename
),
)
)
# When
body = {
"filename": filename,
"reference": {"git": "current"},
}
response = await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["content"] == ""
@patch("jupyterlab_git.git.execute")
async def test_content_binary(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/file"
mock_execute.return_value = maybe_future((0, "-\t-\t{}".format(filename), ""))
# When
body = {
"filename": filename,
"reference": {"git": "current"},
}
# Then
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
assert_http_error(e, 500, expected_message="file is not UTF-8")
@patch("jupyterlab_git.git.execute")
async def test_content_show_unhandled_error(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/file"
mock_execute.return_value = maybe_future((-1, "", "Dummy error"))
# When
body = {
"filename": filename,
"reference": {"git": "current"},
}
# Then
with pytest.raises(tornado.httpclient.HTTPClientError) as e:
await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
assert_http_error(e, 500, expected_message="Dummy error")
@patch("jupyterlab_git.git.execute")
async def test_content_getcontent_deleted_file(mock_execute, jp_fetch, jp_root_dir):
# Given
local_path = jp_root_dir / "test_path"
filename = "my/absent_file"
content = "dummy content file\nwith multiple lines"
# When
body = {
"filename": filename,
"reference": {"special": "WORKING"},
}
# Then
response = await jp_fetch(
NAMESPACE, local_path.name, "content", body=json.dumps(body), method="POST"
)
# Then
assert response.code == 200
payload = json.loads(response.body)
assert payload["content"] == ""
|
[
"tornado.httputil.HTTPServerRequest",
"unittest.mock.MagicMock",
"json.loads",
"jupyterlab_git.handlers.setup_handlers",
"unittest.mock.Mock",
"json.dumps",
"unittest.mock.patch",
"pytest.raises",
"jupyterlab_git.handlers.GitHandler",
"pytest.mark.parametrize"
] |
[((480, 590), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""path, with_cm"""', "(('url', False), ('url/to/path', False), ('url/to/path', True))"], {}), "('path, with_cm', (('url', False), ('url/to/path', \n False), ('url/to/path', True)))\n", (503, 590), False, 'import pytest\n'), ((1039, 1106), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.handlers.GitAllHistoryHandler.git"""'], {'spec': 'Git'}), "('jupyterlab_git.handlers.GitAllHistoryHandler.git', spec=Git)\n", (1044, 1106), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((2319, 2354), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.git.execute"""'], {}), "('jupyterlab_git.git.execute')\n", (2324, 2354), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((3072, 3107), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.git.execute"""'], {}), "('jupyterlab_git.git.execute')\n", (3077, 3107), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((3852, 3887), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.git.execute"""'], {}), "('jupyterlab_git.git.execute')\n", (3857, 3887), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((4613, 4648), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.git.execute"""'], {}), "('jupyterlab_git.git.execute')\n", (4618, 4648), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((5401, 5464), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.handlers.GitBranchHandler.git"""'], {'spec': 'Git'}), "('jupyterlab_git.handlers.GitBranchHandler.git', spec=Git)\n", (5406, 5464), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((7544, 7604), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.handlers.GitLogHandler.git"""'], {'spec': 'Git'}), "('jupyterlab_git.handlers.GitLogHandler.git', spec=Git)\n", (7549, 7604), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((8141, 8201), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.handlers.GitLogHandler.git"""'], {'spec': 'Git'}), "('jupyterlab_git.handlers.GitLogHandler.git', spec=Git)\n", (8146, 8201), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((8710, 8771), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.handlers.GitPushHandler.git"""'], {'spec': 'Git'}), "('jupyterlab_git.handlers.GitPushHandler.git', spec=Git)\n", (8715, 8771), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((9679, 9740), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.handlers.GitPushHandler.git"""'], {'spec': 'Git'}), "('jupyterlab_git.handlers.GitPushHandler.git', spec=Git)\n", (9684, 9740), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((10726, 10787), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.handlers.GitPushHandler.git"""'], {'spec': 'Git'}), "('jupyterlab_git.handlers.GitPushHandler.git', spec=Git)\n", (10731, 10787), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((12111, 12172), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.handlers.GitPushHandler.git"""'], {'spec': 'Git'}), "('jupyterlab_git.handlers.GitPushHandler.git', spec=Git)\n", (12116, 12172), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((13468, 13529), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.handlers.GitPushHandler.git"""'], {'spec': 'Git'}), "('jupyterlab_git.handlers.GitPushHandler.git', spec=Git)\n", (13473, 13529), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((14692, 14753), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.handlers.GitPushHandler.git"""'], {'spec': 'Git'}), "('jupyterlab_git.handlers.GitPushHandler.git', spec=Git)\n", (14697, 14753), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((15965, 16026), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.handlers.GitPushHandler.git"""'], {'spec': 'Git'}), "('jupyterlab_git.handlers.GitPushHandler.git', spec=Git)\n", (15970, 16026), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((17172, 17233), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.handlers.GitPushHandler.git"""'], {'spec': 'Git'}), "('jupyterlab_git.handlers.GitPushHandler.git', spec=Git)\n", (17177, 17233), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((18459, 18524), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.handlers.GitUpstreamHandler.git"""'], {'spec': 'Git'}), "('jupyterlab_git.handlers.GitUpstreamHandler.git', spec=Git)\n", (18464, 18524), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((19320, 19385), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.handlers.GitUpstreamHandler.git"""'], {'spec': 'Git'}), "('jupyterlab_git.handlers.GitUpstreamHandler.git', spec=Git)\n", (19325, 19385), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((20131, 20166), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.git.execute"""'], {}), "('jupyterlab_git.git.execute')\n", (20136, 20166), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((21059, 21094), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.git.execute"""'], {}), "('jupyterlab_git.git.execute')\n", (21064, 21094), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((21852, 21887), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.git.execute"""'], {}), "('jupyterlab_git.git.execute')\n", (21857, 21887), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((22779, 22814), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.git.execute"""'], {}), "('jupyterlab_git.git.execute')\n", (22784, 22814), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((23535, 23570), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.git.execute"""'], {}), "('jupyterlab_git.git.execute')\n", (23540, 23570), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((24311, 24346), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.git.execute"""'], {}), "('jupyterlab_git.git.execute')\n", (24316, 24346), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((24951, 24986), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.git.execute"""'], {}), "('jupyterlab_git.git.execute')\n", (24956, 24986), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((25586, 25621), 'unittest.mock.patch', 'patch', (['"""jupyterlab_git.git.execute"""'], {}), "('jupyterlab_git.git.execute')\n", (25591, 25621), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((315, 321), 'unittest.mock.Mock', 'Mock', ([], {}), '()\n', (319, 321), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((382, 410), 'jupyterlab_git.handlers.setup_handlers', 'setup_handlers', (['mock_web_app'], {}), '(mock_web_app)\n', (396, 410), False, 'from jupyterlab_git.handlers import NAMESPACE, setup_handlers, GitHandler\n'), ((677, 713), 'tornado.httputil.HTTPServerRequest', 'tornado.httputil.HTTPServerRequest', ([], {}), '()\n', (711, 713), False, 'import tornado\n'), ((735, 746), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (744, 746), False, 'from unittest.mock import ANY, MagicMock, Mock, call, patch\n'), ((761, 788), 'jupyterlab_git.handlers.GitHandler', 'GitHandler', (['jp_web_app', 'req'], {}), '(jp_web_app, req)\n', (771, 788), False, 'from jupyterlab_git.handlers import NAMESPACE, setup_handlers, GitHandler\n'), ((2061, 2086), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (2071, 2086), False, 'import json\n'), ((2803, 2828), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (2813, 2828), False, 'import json\n'), ((3588, 3613), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (3598, 3613), False, 'import json\n'), ((4342, 4367), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (4352, 4367), False, 'import json\n'), ((5135, 5160), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (5145, 5160), False, 'import json\n'), ((7449, 7474), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (7459, 7474), False, 'import json\n'), ((8086, 8111), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (8096, 8111), False, 'import json\n'), ((8655, 8680), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (8665, 8680), False, 'import json\n'), ((9616, 9641), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (9626, 9641), False, 'import json\n'), ((10663, 10688), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (10673, 10688), False, 'import json\n'), ((11928, 11953), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (11938, 11953), False, 'import json\n'), ((13284, 13309), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (13294, 13309), False, 'import json\n'), ((14629, 14654), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (14639, 14654), False, 'import json\n'), ((15902, 15927), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (15912, 15927), False, 'import json\n'), ((17109, 17134), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (17119, 17134), False, 'import json\n'), ((18396, 18421), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (18406, 18421), False, 'import json\n'), ((19260, 19285), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (19270, 19285), False, 'import json\n'), ((20071, 20096), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (20081, 20096), False, 'import json\n'), ((20786, 20811), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (20796, 20811), False, 'import json\n'), ((21782, 21807), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (21792, 21807), False, 'import json\n'), ((22514, 22539), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (22524, 22539), False, 'import json\n'), ((24246, 24271), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (24256, 24271), False, 'import json\n'), ((26146, 26171), 'json.loads', 'json.loads', (['response.body'], {}), '(response.body)\n', (26156, 26171), False, 'import json\n'), ((11396, 11445), 'pytest.raises', 'pytest.raises', (['tornado.httpclient.HTTPClientError'], {}), '(tornado.httpclient.HTTPClientError)\n', (11409, 11445), False, 'import pytest\n'), ((12753, 12802), 'pytest.raises', 'pytest.raises', (['tornado.httpclient.HTTPClientError'], {}), '(tornado.httpclient.HTTPClientError)\n', (12766, 12802), False, 'import pytest\n'), ((23284, 23333), 'pytest.raises', 'pytest.raises', (['tornado.httpclient.HTTPClientError'], {}), '(tornado.httpclient.HTTPClientError)\n', (23297, 23333), False, 'import pytest\n'), ((24702, 24751), 'pytest.raises', 'pytest.raises', (['tornado.httpclient.HTTPClientError'], {}), '(tornado.httpclient.HTTPClientError)\n', (24715, 24751), False, 'import pytest\n'), ((25343, 25392), 'pytest.raises', 'pytest.raises', (['tornado.httpclient.HTTPClientError'], {}), '(tornado.httpclient.HTTPClientError)\n', (25356, 25392), False, 'import pytest\n'), ((1731, 1747), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (1741, 1747), False, 'import json\n'), ((7932, 7948), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (7942, 7948), False, 'import json\n'), ((16697, 16713), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (16707, 16713), False, 'import json\n'), ((17957, 17973), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (17967, 17973), False, 'import json\n'), ((20690, 20706), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (20700, 20706), False, 'import json\n'), ((21686, 21702), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (21696, 21702), False, 'import json\n'), ((22418, 22434), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (22428, 22434), False, 'import json\n'), ((24150, 24166), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (24160, 24166), False, 'import json\n'), ((26050, 26066), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (26060, 26066), False, 'import json\n'), ((23420, 23436), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (23430, 23436), False, 'import json\n'), ((24838, 24854), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (24848, 24854), False, 'import json\n'), ((25479, 25495), 'json.dumps', 'json.dumps', (['body'], {}), '(body)\n', (25489, 25495), False, 'import json\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import unittest
from gpytorch.utils import sparse_eye, sparse_getitem, sparse_repeat, to_sparse
class TestSparse(unittest.TestCase):
def setUp(self):
self.indices = torch.LongTensor([[0, 1, 2, 3, 4], [2, 1, 0, 0, 1]])
self.values = torch.FloatTensor([3, 4, 5, 2, 6])
self.sparse = torch.sparse.FloatTensor(self.indices, self.values, torch.Size((5, 3)))
self.dense = self.sparse.to_dense()
def test_sparse_eye(self):
res = sparse_eye(5)
actual = torch.eye(5)
self.assertTrue(torch.equal(res.to_dense(), actual))
def test_sparse_getitem_one_dim_int(self):
actual = self.dense[3]
res = sparse_getitem(self.sparse, 3)
self.assertTrue(torch.equal(actual, res.to_dense()))
def test_sparse_getitem_one_dim_slice(self):
actual = self.dense[2:4]
res = sparse_getitem(self.sparse, slice(2, 4))
self.assertTrue(torch.equal(actual, res.to_dense()))
def test_sparse_getitem_two_dim_int(self):
actual = self.dense[2, 1]
res = sparse_getitem(self.sparse, (2, 1))
self.assertEqual(actual, res)
def test_sparse_getitem_two_dim_int_slice(self):
actual = self.dense[:, 1]
res = sparse_getitem(self.sparse, (slice(None, None, None), 1))
self.assertTrue(torch.equal(actual, res.to_dense()))
actual = self.dense[1, :]
res = sparse_getitem(self.sparse, (1, slice(None, None, None)))
self.assertTrue(torch.equal(actual, res.to_dense()))
def test_sparse_getitem_two_dim_slice(self):
actual = self.dense[2:4, 1:3]
res = sparse_getitem(self.sparse, (slice(2, 4), slice(1, 3)))
self.assertTrue(torch.equal(actual, res.to_dense()))
def test_sparse_repeat_1d(self):
sparse_1d = sparse_getitem(self.sparse, 1)
actual = sparse_1d.to_dense().repeat(3, 1)
res = sparse_repeat(sparse_1d, 3, 1)
self.assertTrue(torch.equal(actual, res.to_dense()))
actual = sparse_1d.to_dense().repeat(2, 3)
res = sparse_repeat(sparse_1d, 2, 3)
self.assertTrue(torch.equal(actual, res.to_dense()))
def test_sparse_repeat_2d(self):
actual = self.sparse.to_dense().repeat(3, 2)
res = sparse_repeat(self.sparse, 3, 2)
self.assertTrue(torch.equal(actual, res.to_dense()))
actual = self.sparse.to_dense().repeat(1, 2)
res = sparse_repeat(self.sparse, 1, 2)
self.assertTrue(torch.equal(actual, res.to_dense()))
actual = self.sparse.to_dense().repeat(3, 1)
res = sparse_repeat(self.sparse, 3, 1)
self.assertTrue(torch.equal(actual, res.to_dense()))
def test_to_sparse(self):
actual = self.sparse
res = to_sparse(self.sparse.to_dense())
self.assertTrue(torch.equal(actual.to_dense(), res.to_dense()))
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"torch.eye",
"torch.LongTensor",
"torch.FloatTensor",
"gpytorch.utils.sparse_repeat",
"gpytorch.utils.sparse_getitem",
"gpytorch.utils.sparse_eye",
"torch.Size"
] |
[((3043, 3058), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3056, 3058), False, 'import unittest\n'), ((342, 394), 'torch.LongTensor', 'torch.LongTensor', (['[[0, 1, 2, 3, 4], [2, 1, 0, 0, 1]]'], {}), '([[0, 1, 2, 3, 4], [2, 1, 0, 0, 1]])\n', (358, 394), False, 'import torch\n'), ((417, 451), 'torch.FloatTensor', 'torch.FloatTensor', (['[3, 4, 5, 2, 6]'], {}), '([3, 4, 5, 2, 6])\n', (434, 451), False, 'import torch\n'), ((636, 649), 'gpytorch.utils.sparse_eye', 'sparse_eye', (['(5)'], {}), '(5)\n', (646, 649), False, 'from gpytorch.utils import sparse_eye, sparse_getitem, sparse_repeat, to_sparse\n'), ((667, 679), 'torch.eye', 'torch.eye', (['(5)'], {}), '(5)\n', (676, 679), False, 'import torch\n'), ((834, 864), 'gpytorch.utils.sparse_getitem', 'sparse_getitem', (['self.sparse', '(3)'], {}), '(self.sparse, 3)\n', (848, 864), False, 'from gpytorch.utils import sparse_eye, sparse_getitem, sparse_repeat, to_sparse\n'), ((1221, 1256), 'gpytorch.utils.sparse_getitem', 'sparse_getitem', (['self.sparse', '(2, 1)'], {}), '(self.sparse, (2, 1))\n', (1235, 1256), False, 'from gpytorch.utils import sparse_eye, sparse_getitem, sparse_repeat, to_sparse\n'), ((1961, 1991), 'gpytorch.utils.sparse_getitem', 'sparse_getitem', (['self.sparse', '(1)'], {}), '(self.sparse, 1)\n', (1975, 1991), False, 'from gpytorch.utils import sparse_eye, sparse_getitem, sparse_repeat, to_sparse\n'), ((2057, 2087), 'gpytorch.utils.sparse_repeat', 'sparse_repeat', (['sparse_1d', '(3)', '(1)'], {}), '(sparse_1d, 3, 1)\n', (2070, 2087), False, 'from gpytorch.utils import sparse_eye, sparse_getitem, sparse_repeat, to_sparse\n'), ((2215, 2245), 'gpytorch.utils.sparse_repeat', 'sparse_repeat', (['sparse_1d', '(2)', '(3)'], {}), '(sparse_1d, 2, 3)\n', (2228, 2245), False, 'from gpytorch.utils import sparse_eye, sparse_getitem, sparse_repeat, to_sparse\n'), ((2412, 2444), 'gpytorch.utils.sparse_repeat', 'sparse_repeat', (['self.sparse', '(3)', '(2)'], {}), '(self.sparse, 3, 2)\n', (2425, 2444), False, 'from gpytorch.utils import sparse_eye, sparse_getitem, sparse_repeat, to_sparse\n'), ((2574, 2606), 'gpytorch.utils.sparse_repeat', 'sparse_repeat', (['self.sparse', '(1)', '(2)'], {}), '(self.sparse, 1, 2)\n', (2587, 2606), False, 'from gpytorch.utils import sparse_eye, sparse_getitem, sparse_repeat, to_sparse\n'), ((2736, 2768), 'gpytorch.utils.sparse_repeat', 'sparse_repeat', (['self.sparse', '(3)', '(1)'], {}), '(self.sparse, 3, 1)\n', (2749, 2768), False, 'from gpytorch.utils import sparse_eye, sparse_getitem, sparse_repeat, to_sparse\n'), ((526, 544), 'torch.Size', 'torch.Size', (['(5, 3)'], {}), '((5, 3))\n', (536, 544), False, 'import torch\n')]
|
import pytest
import saltfactories.utils.functional
import saltfactories.utils.markers
@pytest.hookimpl(tryfirst=True)
def pytest_runtest_setup(item):
"""
Fixtures injection based on markers or test skips based on CLI arguments
"""
__tracebackhide__ = True
saltfactories.utils.markers.evaluate_markers(item)
@pytest.mark.trylast
def pytest_configure(config):
"""
called after command line options have been parsed
and all plugins and initial conftest files been loaded.
"""
# Expose the markers we use to pytest CLI
config.addinivalue_line(
"markers",
"requires_salt_modules(*required_module_names): Skip if at least one module is not available.",
)
config.addinivalue_line(
"markers",
"requires_salt_states(*required_state_names): Skip if at least one state module is not available.",
)
@pytest.fixture(scope="session")
def session_markers_loader(salt_factories):
minion_id = "session-markers-minion"
overrides = {
"file_client": "local",
"features": {"enable_slsvars_fixes": True},
}
factory = salt_factories.salt_minion_daemon(
minion_id,
overrides=overrides,
)
loader_instance = saltfactories.utils.functional.Loaders(factory.config.copy())
# Sync Everything
loader_instance.modules.saltutil.sync_all()
# Reload Everything - This is required or custom modules in _modules will not be found
loader_instance.reload_all()
return loader_instance
|
[
"pytest.hookimpl",
"pytest.fixture"
] |
[((91, 121), 'pytest.hookimpl', 'pytest.hookimpl', ([], {'tryfirst': '(True)'}), '(tryfirst=True)\n', (106, 121), False, 'import pytest\n'), ((884, 915), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (898, 915), False, 'import pytest\n')]
|
from sqlalchemy_continuum_vendored import make_versioned
# make_versioned(user_cls=None, options={'strategy' : 'subquery'})
# Import the DB things.
from common.main_archive_db import WebPages
from common.main_archive_db import WebFiles
from common.main_archive_db import PluginStatus
from common.main_archive_db import NuReleaseItem
from common.main_archive_db import NuResolvedOutbound
from common.raw_archive_db import RawWebPages
from common.rss_func_db import Tags
from common.rss_func_db import Author
from common.rss_func_db import RssFeedPost
from common.rss_func_db import RssFeedUrlMapper
from common.rss_func_db import RssFeedEntry
from common.rss_func_db import QidianFeedPostMeta
from common.misc_db import KeyValueStore
from common.misc_db import get_from_db_key_value_store
from common.misc_db import set_in_db_key_value_store
from common.misc_db import get_from_version_check_table
from common.misc_db import set_in_version_check_table
from common.cookie_db import WebCookieDb
from common.db_engine import get_engine
from common.db_engine import get_db_session
from common.db_engine import delete_db_session
from common.db_engine import session_context
from common.db_constants import DB_REALTIME_PRIORITY
from common.db_constants import DB_HIGH_PRIORITY
from common.db_constants import DB_MED_PRIORITY
from common.db_constants import DB_LOW_PRIORITY
from common.db_constants import DB_IDLE_PRIORITY
from common.db_constants import DB_DEFAULT_DIST
from common.db_constants import MAX_DISTANCE
from common.db_base import Base
from common.redis import redis_session_context
import sqlalchemy as sa
sa.orm.configure_mappers()
# from sqlalchemy_searchable import make_searchable
# make_searchable()
|
[
"sqlalchemy.orm.configure_mappers"
] |
[((1624, 1650), 'sqlalchemy.orm.configure_mappers', 'sa.orm.configure_mappers', ([], {}), '()\n', (1648, 1650), True, 'import sqlalchemy as sa\n')]
|
#!/usr/bin/python
#
# Copyright 2017, The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Script that is used by developers to run style checks on Kotlin files."""
from __future__ import print_function
import argparse
import errno
import os
import subprocess
import sys
MAIN_DIRECTORY = os.path.normpath(os.path.dirname(__file__))
KTLINT_JAR = os.path.join(MAIN_DIRECTORY, 'ktlint-android-all.jar')
FORMAT_MESSAGE = '''
**********************************************************************
To format run:
{}/ktlint.py --format --file {}
**********************************************************************
'''
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('--file', '-f', nargs='*')
parser.add_argument('--format', '-F', dest='format', action='store_true')
parser.add_argument('--noformat', dest='format', action='store_false')
parser.set_defaults(format=False)
args = parser.parse_args()
kt_files = [f for f in args.file if f.endswith('.kt') or f.endswith('.kts')]
ktlint_args = kt_files[:]
if args.format:
ktlint_args += ['-F']
if not ktlint_args:
sys.exit(0)
ktlint_args += ['--android']
ktlint_env = os.environ.copy()
ktlint_env['JAVA_CMD'] = 'java'
try:
check = subprocess.Popen(['java', '-jar', KTLINT_JAR] + ktlint_args,
stdout=subprocess.PIPE, env=ktlint_env)
stdout, _ = check.communicate()
if stdout:
print('prebuilts/ktlint found errors in files you changed:')
print(stdout)
print(FORMAT_MESSAGE.format(MAIN_DIRECTORY, ' '.join(kt_files)))
sys.exit(1)
else:
sys.exit(0)
except OSError as e:
if e.errno == errno.ENOENT:
print('Error running ktlint!')
sys.exit(1)
if __name__ == '__main__':
main()
|
[
"subprocess.Popen",
"argparse.ArgumentParser",
"os.path.dirname",
"os.environ.copy",
"os.path.join",
"sys.exit"
] |
[((880, 934), 'os.path.join', 'os.path.join', (['MAIN_DIRECTORY', '"""ktlint-android-all.jar"""'], {}), "(MAIN_DIRECTORY, 'ktlint-android-all.jar')\n", (892, 934), False, 'import os\n'), ((840, 865), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (855, 865), False, 'import os\n'), ((1183, 1208), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1206, 1208), False, 'import argparse\n'), ((1709, 1726), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (1724, 1726), False, 'import os\n'), ((1649, 1660), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1657, 1660), False, 'import sys\n'), ((1780, 1885), 'subprocess.Popen', 'subprocess.Popen', (["(['java', '-jar', KTLINT_JAR] + ktlint_args)"], {'stdout': 'subprocess.PIPE', 'env': 'ktlint_env'}), "(['java', '-jar', KTLINT_JAR] + ktlint_args, stdout=\n subprocess.PIPE, env=ktlint_env)\n", (1796, 1885), False, 'import subprocess\n'), ((2125, 2136), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2133, 2136), False, 'import sys\n'), ((2153, 2164), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2161, 2164), False, 'import sys\n'), ((2263, 2274), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2271, 2274), False, 'import sys\n')]
|
import streamlit as st
import pandas as pd
import altair as alt
st.title("Let's analyze some data about autonomous vehicles, collected from Pittsburgh residents 🚗📊.")
@st.cache # add caching so we load the data only once
def load_data_nine():
# Load the penguin data from https://github.com/allisonhorst/palmerpenguins.
# TODO: Change this to load https://raw.githubusercontent.com/CMU-IDS-2022/assignment-2-h-j/master/2017_AV_data.csv
# and https://raw.githubusercontent.com/CMU-IDS-2022/assignment-2-h-j/master/2019_AV_data.csv
penguins_url = "https://raw.githubusercontent.com/allisonhorst/palmerpenguins/v0.1.0/inst/extdata/penguins.csv"
seven_url = "https://raw.githubusercontent.com/CMU-IDS-2022/assignment-2-h-j/master/2017_AV_data.csv"
nine_url = "https://raw.githubusercontent.com/CMU-IDS-2022/assignment-2-h-j/master/2019_AV_data.csv"
# return pd.read_csv(seven_url)
return pd.read_csv(nine_url)
def load_data_seven():
seven_url = "https://raw.githubusercontent.com/CMU-IDS-2022/assignment-2-h-j/master/2017_AV_data.csv"
return pd.read_csv(seven_url)
df_nine = load_data_nine()
df_seven = load_data_seven()
st.write("We're interested in seeing how opinions about AVs and using Pittsburgh as a testing 'Proving Ground' for AVs have changed from 2017 to 2019.")
st.write("First, Let's look at raw data for 2017 in the Pandas Data Frame.")
st.write(df_seven)
st.write("Now, let's look at raw data for 2019 in the Pandas Data Frame.")
st.write(df_nine)
st.write("Hmm 🤔, is there a difference between 2017 and 2019 for the approval of Pittsburgh as a proving gound for AV testing? Let's compare bar charts with [Altair](https://altair-viz.github.io/) to find out.")
hist_2019 = alt.Chart(df_nine).mark_bar(
tooltip=True
).encode(
# alt.X("ProvingGround", type="nominal"),
# alt.Y(aggregate="count", type="quantitative")
alt.X("ProvingGround", type="nominal", axis = alt.Axis(title="Proving Ground Approval, 2019"), sort=[
'Approve',
'Somewhat Approve',
'Neutral',
'Somewhat Disapprove',
'Disapprove',
'nan']),
y='count()',
#alt.Color("Species", scale=alt.Scale(domain=["Adelie", "Chinstrap", "Gentoo"], range=["orangered", "purple", "seagreen"]))
).properties(
width=300, height=400
)
hist_2017 = alt.Chart(df_seven).mark_bar(
tooltip=True
).encode(
# alt.X("ProvingGround", type="nominal"),
# alt.Y(aggregate="count", type="quantitative")
alt.X("FeelingsProvingGround", type="nominal", axis = alt.Axis(title="Proving Ground Approval, 2017"), sort=[
'Approve',
'Somewhat Approve',
'Neutral',
'Somewhat Disapprove',
'Disapprove',
'nan']),
y='count()',
#alt.Color("Species", scale=alt.Scale(domain=["Adelie", "Chinstrap", "Gentoo"], range=["orangered", "purple", "seagreen"]))
).properties(
width=300, height=400
)
pg_compare = alt.hconcat(hist_2017, hist_2019)
st.write(pg_compare)
st.write("There doesn't seem to be much of a difference between 2017 and 2019 based on the charts above. Let's try another way of investigating our question by looking at the interaction between **approval of Pittsburgh as a proving ground in 2019** and **impact of the 2018 Arizona Uber crash** from the 2019 data.")
pg_brush = alt.selection_multi(fields=['ProvingGround'])
arizona_brush = alt.selection_multi(fields=['ArizonaCrash'])
pg_2019_chart = alt.Chart(df_nine, title='Proving Ground Approval, 2019').transform_filter(arizona_brush).mark_bar().encode(
x='count()',
y= alt.Y('ProvingGround', type="nominal", sort=[
'Approve',
'Somewhat Approve',
'Neutral',
'Somewhat Disapprove',
'Disapprove',
'nan']),
color=alt.condition(pg_brush, alt.value('steelblue'), alt.value('lightgray'))
).add_selection(pg_brush).interactive()
arizona_chart = alt.Chart(df_nine, title='Impact of 2018 Arizona Uber Crash on AV Opinion').transform_filter(pg_brush).mark_bar().encode(
x='count()',
y=alt.Y('ArizonaCrash', sort=[
'Significantly more negative opinion',
'Somewhat more negative opinion',
'No change',
'Somewhat more positive opinion',
'Significantly more positive opinion',
'nan']),
color=alt.condition(arizona_brush, alt.value('salmon'), alt.value('lightgray'))
).add_selection(arizona_brush).interactive()
st.altair_chart(pg_2019_chart & arizona_chart)
st.write("From the charts above, it appears that for the majority of survey responders, the 2018 crash did not impact their opnions on AVs, and most Pittsburghers still approve of Pittsburgh as a testing groundfor AVs. Unsurprisingly, however, residents who responded **Significantly more negative opinion** to the Arizona Crash question mostly disapproved of using Pittsburgh as a proving ground.")
st.write("Lastly, we're interested in finding out if people feel safer sharing the road with human drivers vs AVs. Do people trust their fellow humans or robots more? Let's look at the 2019 data to see.")
hist_humans = alt.Chart(df_nine).mark_bar(size=20,
tooltip=True
).encode(
x=alt.X('SafeHuman', sort='-y', axis = alt.Axis(title="Safety Sharing Road with Human Drivers. (0 = Very Unsafe, 5 = Very Safe)", tickMinStep=1)),
y='count()',
).properties(
width=300, height=400
).interactive()
selection = alt.selection_multi(fields=['SafeAv'], bind='legend')
hist_avs = alt.Chart(df_nine).mark_bar(size=20,
tooltip=True
).encode(
x=alt.X('SafeAv', sort='-y', axis = alt.Axis(title="Safety Sharing Road with AVs. (0 = Very Unsafe, 5 = Very Safe)", tickMinStep=1)),
y='count()',
).properties(
width=300, height=400
).interactive().add_selection(
selection
)
human_av_compare = alt.hconcat(hist_humans, hist_avs)
st.write(human_av_compare)
st.write("Interestingly enough, it appears that people feel more safe overall sharing the road with AVs than with human drivers. What are your theories as to why this is?")
st.markdown("This project was created by <NAME> and <NAME> for the [Interactive Data Science](https://dig.cmu.edu/ids2022) course at [Carnegie Mellon University](https://www.cmu.edu).")
|
[
"streamlit.altair_chart",
"streamlit.markdown",
"altair.Y",
"pandas.read_csv",
"altair.Chart",
"altair.selection_multi",
"streamlit.write",
"streamlit.title",
"altair.Axis",
"altair.value",
"altair.hconcat"
] |
[((65, 177), 'streamlit.title', 'st.title', (['"""Let\'s analyze some data about autonomous vehicles, collected from Pittsburgh residents 🚗📊."""'], {}), '(\n "Let\'s analyze some data about autonomous vehicles, collected from Pittsburgh residents 🚗📊."\n )\n', (73, 177), True, 'import streamlit as st\n'), ((1164, 1326), 'streamlit.write', 'st.write', (['"""We\'re interested in seeing how opinions about AVs and using Pittsburgh as a testing \'Proving Ground\' for AVs have changed from 2017 to 2019."""'], {}), '(\n "We\'re interested in seeing how opinions about AVs and using Pittsburgh as a testing \'Proving Ground\' for AVs have changed from 2017 to 2019."\n )\n', (1172, 1326), True, 'import streamlit as st\n'), ((1318, 1394), 'streamlit.write', 'st.write', (['"""First, Let\'s look at raw data for 2017 in the Pandas Data Frame."""'], {}), '("First, Let\'s look at raw data for 2017 in the Pandas Data Frame.")\n', (1326, 1394), True, 'import streamlit as st\n'), ((1396, 1414), 'streamlit.write', 'st.write', (['df_seven'], {}), '(df_seven)\n', (1404, 1414), True, 'import streamlit as st\n'), ((1416, 1490), 'streamlit.write', 'st.write', (['"""Now, let\'s look at raw data for 2019 in the Pandas Data Frame."""'], {}), '("Now, let\'s look at raw data for 2019 in the Pandas Data Frame.")\n', (1424, 1490), True, 'import streamlit as st\n'), ((1492, 1509), 'streamlit.write', 'st.write', (['df_nine'], {}), '(df_nine)\n', (1500, 1509), True, 'import streamlit as st\n'), ((1511, 1732), 'streamlit.write', 'st.write', (['"""Hmm 🤔, is there a difference between 2017 and 2019 for the approval of Pittsburgh as a proving gound for AV testing? Let\'s compare bar charts with [Altair](https://altair-viz.github.io/) to find out."""'], {}), '(\n "Hmm 🤔, is there a difference between 2017 and 2019 for the approval of Pittsburgh as a proving gound for AV testing? Let\'s compare bar charts with [Altair](https://altair-viz.github.io/) to find out."\n )\n', (1519, 1732), True, 'import streamlit as st\n'), ((2932, 2965), 'altair.hconcat', 'alt.hconcat', (['hist_2017', 'hist_2019'], {}), '(hist_2017, hist_2019)\n', (2943, 2965), True, 'import altair as alt\n'), ((2966, 2986), 'streamlit.write', 'st.write', (['pg_compare'], {}), '(pg_compare)\n', (2974, 2986), True, 'import streamlit as st\n'), ((2988, 3315), 'streamlit.write', 'st.write', (['"""There doesn\'t seem to be much of a difference between 2017 and 2019 based on the charts above. Let\'s try another way of investigating our question by looking at the interaction between **approval of Pittsburgh as a proving ground in 2019** and **impact of the 2018 Arizona Uber crash** from the 2019 data."""'], {}), '(\n "There doesn\'t seem to be much of a difference between 2017 and 2019 based on the charts above. Let\'s try another way of investigating our question by looking at the interaction between **approval of Pittsburgh as a proving ground in 2019** and **impact of the 2018 Arizona Uber crash** from the 2019 data."\n )\n', (2996, 3315), True, 'import streamlit as st\n'), ((3318, 3363), 'altair.selection_multi', 'alt.selection_multi', ([], {'fields': "['ProvingGround']"}), "(fields=['ProvingGround'])\n", (3337, 3363), True, 'import altair as alt\n'), ((3380, 3424), 'altair.selection_multi', 'alt.selection_multi', ([], {'fields': "['ArizonaCrash']"}), "(fields=['ArizonaCrash'])\n", (3399, 3424), True, 'import altair as alt\n'), ((4414, 4460), 'streamlit.altair_chart', 'st.altair_chart', (['(pg_2019_chart & arizona_chart)'], {}), '(pg_2019_chart & arizona_chart)\n', (4429, 4460), True, 'import streamlit as st\n'), ((4462, 4871), 'streamlit.write', 'st.write', (['"""From the charts above, it appears that for the majority of survey responders, the 2018 crash did not impact their opnions on AVs, and most Pittsburghers still approve of Pittsburgh as a testing groundfor AVs. Unsurprisingly, however, residents who responded **Significantly more negative opinion** to the Arizona Crash question mostly disapproved of using Pittsburgh as a proving ground."""'], {}), "(\n 'From the charts above, it appears that for the majority of survey responders, the 2018 crash did not impact their opnions on AVs, and most Pittsburghers still approve of Pittsburgh as a testing groundfor AVs. Unsurprisingly, however, residents who responded **Significantly more negative opinion** to the Arizona Crash question mostly disapproved of using Pittsburgh as a proving ground.'\n )\n", (4470, 4871), True, 'import streamlit as st\n'), ((4863, 5077), 'streamlit.write', 'st.write', (['"""Lastly, we\'re interested in finding out if people feel safer sharing the road with human drivers vs AVs. Do people trust their fellow humans or robots more? Let\'s look at the 2019 data to see."""'], {}), '(\n "Lastly, we\'re interested in finding out if people feel safer sharing the road with human drivers vs AVs. Do people trust their fellow humans or robots more? Let\'s look at the 2019 data to see."\n )\n', (4871, 5077), True, 'import streamlit as st\n'), ((5383, 5436), 'altair.selection_multi', 'alt.selection_multi', ([], {'fields': "['SafeAv']", 'bind': '"""legend"""'}), "(fields=['SafeAv'], bind='legend')\n", (5402, 5436), True, 'import altair as alt\n'), ((5774, 5808), 'altair.hconcat', 'alt.hconcat', (['hist_humans', 'hist_avs'], {}), '(hist_humans, hist_avs)\n', (5785, 5808), True, 'import altair as alt\n'), ((5809, 5835), 'streamlit.write', 'st.write', (['human_av_compare'], {}), '(human_av_compare)\n', (5817, 5835), True, 'import streamlit as st\n'), ((5837, 6019), 'streamlit.write', 'st.write', (['"""Interestingly enough, it appears that people feel more safe overall sharing the road with AVs than with human drivers. What are your theories as to why this is?"""'], {}), "(\n 'Interestingly enough, it appears that people feel more safe overall sharing the road with AVs than with human drivers. What are your theories as to why this is?'\n )\n", (5845, 6019), True, 'import streamlit as st\n'), ((6011, 6206), 'streamlit.markdown', 'st.markdown', (['"""This project was created by <NAME> and <NAME> for the [Interactive Data Science](https://dig.cmu.edu/ids2022) course at [Carnegie Mellon University](https://www.cmu.edu)."""'], {}), "(\n 'This project was created by <NAME> and <NAME> for the [Interactive Data Science](https://dig.cmu.edu/ids2022) course at [Carnegie Mellon University](https://www.cmu.edu).'\n )\n", (6022, 6206), True, 'import streamlit as st\n'), ((919, 940), 'pandas.read_csv', 'pd.read_csv', (['nine_url'], {}), '(nine_url)\n', (930, 940), True, 'import pandas as pd\n'), ((1082, 1104), 'pandas.read_csv', 'pd.read_csv', (['seven_url'], {}), '(seven_url)\n', (1093, 1104), True, 'import pandas as pd\n'), ((1940, 1987), 'altair.Axis', 'alt.Axis', ([], {'title': '"""Proving Ground Approval, 2019"""'}), "(title='Proving Ground Approval, 2019')\n", (1948, 1987), True, 'import altair as alt\n'), ((2542, 2589), 'altair.Axis', 'alt.Axis', ([], {'title': '"""Proving Ground Approval, 2017"""'}), "(title='Proving Ground Approval, 2017')\n", (2550, 2589), True, 'import altair as alt\n'), ((1736, 1754), 'altair.Chart', 'alt.Chart', (['df_nine'], {}), '(df_nine)\n', (1745, 1754), True, 'import altair as alt\n'), ((2329, 2348), 'altair.Chart', 'alt.Chart', (['df_seven'], {}), '(df_seven)\n', (2338, 2348), True, 'import altair as alt\n'), ((3574, 3709), 'altair.Y', 'alt.Y', (['"""ProvingGround"""'], {'type': '"""nominal"""', 'sort': "['Approve', 'Somewhat Approve', 'Neutral', 'Somewhat Disapprove',\n 'Disapprove', 'nan']"}), "('ProvingGround', type='nominal', sort=['Approve', 'Somewhat Approve',\n 'Neutral', 'Somewhat Disapprove', 'Disapprove', 'nan'])\n", (3579, 3709), True, 'import altair as alt\n'), ((4039, 4245), 'altair.Y', 'alt.Y', (['"""ArizonaCrash"""'], {'sort': "['Significantly more negative opinion', 'Somewhat more negative opinion',\n 'No change', 'Somewhat more positive opinion',\n 'Significantly more positive opinion', 'nan']"}), "('ArizonaCrash', sort=['Significantly more negative opinion',\n 'Somewhat more negative opinion', 'No change',\n 'Somewhat more positive opinion', 'Significantly more positive opinion',\n 'nan'])\n", (4044, 4245), True, 'import altair as alt\n'), ((3790, 3812), 'altair.value', 'alt.value', (['"""steelblue"""'], {}), "('steelblue')\n", (3799, 3812), True, 'import altair as alt\n'), ((3814, 3836), 'altair.value', 'alt.value', (['"""lightgray"""'], {}), "('lightgray')\n", (3823, 3836), True, 'import altair as alt\n'), ((4323, 4342), 'altair.value', 'alt.value', (['"""salmon"""'], {}), "('salmon')\n", (4332, 4342), True, 'import altair as alt\n'), ((4344, 4366), 'altair.value', 'alt.value', (['"""lightgray"""'], {}), "('lightgray')\n", (4353, 4366), True, 'import altair as alt\n'), ((5083, 5101), 'altair.Chart', 'alt.Chart', (['df_nine'], {}), '(df_nine)\n', (5092, 5101), True, 'import altair as alt\n'), ((5191, 5305), 'altair.Axis', 'alt.Axis', ([], {'title': '"""Safety Sharing Road with Human Drivers. (0 = Very Unsafe, 5 = Very Safe)"""', 'tickMinStep': '(1)'}), "(title=\n 'Safety Sharing Road with Human Drivers. (0 = Very Unsafe, 5 = Very Safe)',\n tickMinStep=1)\n", (5199, 5305), True, 'import altair as alt\n'), ((3441, 3498), 'altair.Chart', 'alt.Chart', (['df_nine'], {'title': '"""Proving Ground Approval, 2019"""'}), "(df_nine, title='Proving Ground Approval, 2019')\n", (3450, 3498), True, 'import altair as alt\n'), ((3894, 3969), 'altair.Chart', 'alt.Chart', (['df_nine'], {'title': '"""Impact of 2018 Arizona Uber Crash on AV Opinion"""'}), "(df_nine, title='Impact of 2018 Arizona Uber Crash on AV Opinion')\n", (3903, 3969), True, 'import altair as alt\n'), ((5449, 5467), 'altair.Chart', 'alt.Chart', (['df_nine'], {}), '(df_nine)\n', (5458, 5467), True, 'import altair as alt\n'), ((5554, 5658), 'altair.Axis', 'alt.Axis', ([], {'title': '"""Safety Sharing Road with AVs. (0 = Very Unsafe, 5 = Very Safe)"""', 'tickMinStep': '(1)'}), "(title=\n 'Safety Sharing Road with AVs. (0 = Very Unsafe, 5 = Very Safe)',\n tickMinStep=1)\n", (5562, 5658), True, 'import altair as alt\n')]
|
import os
import time
from datetime import datetime, timedelta
from time import sleep
from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len
from mamba import it, before, context, description
from sdcclient.monitor import EventsClientV2
from specs import be_successful_api_call
with description("Events v2", "integration") as self:
with before.all:
self.client = EventsClientV2(sdc_url=os.getenv("SDC_MONITOR_URL", "https://app.sysdigcloud.com"),
token=os.getenv("SDC_MONITOR_TOKEN"))
self.event_name = "event_v2_test_ci"
with it("is able to create a custom event"):
call = self.client.post_event(name=self.event_name,
description="This event was created in a CI pipeline for the Python SDK library")
expect(call).to(be_successful_api_call)
with it("is able to create a custom event with a scope"):
call = self.client.post_event(name=self.event_name,
description="This event was created in a CI pipeline for the Python SDK library",
event_filter="host.hostName='ci'")
expect(call).to(be_successful_api_call)
sleep(2) # sleep to guarantee the event is created
ok, res = self.client.get_events()
expect((ok, res)).to(be_successful_api_call)
expect(res).to(have_key("events"))
expect(res["events"]).to(contain(have_key("scope", equal("host.hostName = 'ci'"))))
with it("is able to retrieve an event by ID"):
ok, res = self.client.post_event(name=self.event_name,
description="This event was created in a CI pipeline for the Python SDK library")
expect((ok, res)).to(be_successful_api_call)
event = res["event"]
event_id = event["id"]
ok, res = self.client.get_event(id=event_id)
expect((ok, res)).to(be_successful_api_call)
expect(res["event"]).to(equal(event))
with it("is able to list the events happened without any filter"):
time.sleep(3) # Wait for the event to appear in the feed
ok, res = self.client.get_events()
expect((ok, res)).to(be_successful_api_call)
expect(res).to(have_key("events"))
with it("is able to list the events created by the tests"):
time.sleep(3) # Wait for the event to appear in the feed
ok, res = self.client.get_events(category=["custom"])
expect((ok, res)).to(be_successful_api_call)
expect(res).to(have_key("events", contain(have_keys(name=self.event_name))))
with it("fails to retrieve the events with an incorrect category"):
ok, res = self.client.get_events(category=['incorrect_category'])
expect(ok).to(be_false)
expect(res).to(equal("Invalid category 'incorrect_category'"))
with it("is able to retrieve events that match a status"):
ok, res = self.client.get_events(status=['triggered'])
expect((ok, res)).to(be_successful_api_call)
expect(res).to(have_key("events", contain(have_keys(name=self.event_name))))
with it("fails to retrieve the events with an incorrect status"):
ok, res = self.client.get_events(status=['incorrect_status'])
expect(ok).to(be_false)
expect(res).to(equal("Invalid status 'incorrect_status'"))
with it("retrieves the events correctly specifying direction 'before'"):
ok, res = self.client.get_events(direction="before")
expect((ok, res)).to(be_successful_api_call)
expect(res).to(have_keys('events', 'total', 'matched'))
with it("retrieves the events correctly specifying direction 'after'"):
ok, res = self.client.get_events(direction="after")
expect((ok, res)).to(be_successful_api_call)
expect(res).to(have_keys('events', 'total', 'matched'))
with it("fails to retrieve the events with an incorrect direction"):
ok, res = self.client.get_events(direction="incorrect_direction")
expect(ok).to(be_false)
expect(res).to(equal("Invalid direction 'incorrect_direction', must be either 'before' or 'after'"))
with it("is able to retrieve events by name"):
ok, res = self.client.get_events(name=self.event_name)
expect((ok, res)).to(be_successful_api_call)
expect(res).to(have_key("events", contain(have_key("name", equal(self.event_name)))))
with it("retrieves an empty list when the name provided is not found"):
ok, res = self.client.get_events(name="RandomUnexistingEvent")
expect((ok, res)).to(be_successful_api_call)
expect(res).to(have_key("events", be_empty))
with it("is able to retrieve the last event only"):
ok, res = self.client.get_events(limit=1)
expect((ok, res)).to(be_successful_api_call)
expect(res).to(have_key("events", have_len(1)))
with it("is able to retrieve the events from the last day"):
to_s = datetime.now()
from_s = to_s - timedelta(weeks=2)
ok, res = self.client.get_events(from_s=from_s, to_s=to_s)
expect((ok, res)).to(be_successful_api_call)
expect(res).to(have_key("events", have_len(be_above_or_equal(1))))
with context("but the from and to parameters are incorrectly specified"):
with it("returns an error if any of the parameters is specified but not the other"):
t = datetime.now() - timedelta(weeks=2)
ok1, res1 = self.client.get_events(from_s=t)
ok2, res2 = self.client.get_events(to_s=t)
expect((ok1, res1)).not_to(be_successful_api_call)
expect((ok2, res2)).not_to(be_successful_api_call)
expect(res1).to(equal("only one of 'from_s' or 'to_s' has been specified, "
"both are required when filtering by time"))
expect(res2).to(equal("only one of 'from_s' or 'to_s' has been specified, "
"both are required when filtering by time"))
with it("returns an error if they are specified in the wrong order"):
to_s = datetime.now()
from_s = to_s - timedelta(weeks=2)
ok, res = self.client.get_events(from_s=to_s, to_s=from_s)
expect((ok, res)).not_to(be_successful_api_call)
expect(res).to(equal("'from_s' must be lower than 'to_s'"))
with it("is able to remove the event from the feed"):
time.sleep(3) # Wait for the event to appear in the feed
_, res = self.client.get_events(category=["custom"])
events = [event for event in res["events"] if event["name"] == self.event_name]
expect(events).to_not(be_empty)
call = self.client.delete_event(events[0])
expect(call).to(be_successful_api_call)
|
[
"expects.have_key",
"mamba.context",
"mamba.it",
"expects.have_len",
"expects.expect",
"expects.have_keys",
"time.sleep",
"expects.equal",
"datetime.timedelta",
"mamba.description",
"datetime.datetime.now",
"os.getenv",
"expects.be_above_or_equal"
] |
[((344, 383), 'mamba.description', 'description', (['"""Events v2"""', '"""integration"""'], {}), "('Events v2', 'integration')\n", (355, 383), False, 'from mamba import it, before, context, description\n'), ((650, 688), 'mamba.it', 'it', (['"""is able to create a custom event"""'], {}), "('is able to create a custom event')\n", (652, 688), False, 'from mamba import it, before, context, description\n'), ((928, 979), 'mamba.it', 'it', (['"""is able to create a custom event with a scope"""'], {}), "('is able to create a custom event with a scope')\n", (930, 979), False, 'from mamba import it, before, context, description\n'), ((1290, 1298), 'time.sleep', 'sleep', (['(2)'], {}), '(2)\n', (1295, 1298), False, 'from time import sleep\n'), ((1584, 1624), 'mamba.it', 'it', (['"""is able to retrieve an event by ID"""'], {}), "('is able to retrieve an event by ID')\n", (1586, 1624), False, 'from mamba import it, before, context, description\n'), ((2090, 2150), 'mamba.it', 'it', (['"""is able to list the events happened without any filter"""'], {}), "('is able to list the events happened without any filter')\n", (2092, 2150), False, 'from mamba import it, before, context, description\n'), ((2160, 2173), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2170, 2173), False, 'import time\n'), ((2367, 2420), 'mamba.it', 'it', (['"""is able to list the events created by the tests"""'], {}), "('is able to list the events created by the tests')\n", (2369, 2420), False, 'from mamba import it, before, context, description\n'), ((2430, 2443), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2440, 2443), False, 'import time\n'), ((2698, 2759), 'mamba.it', 'it', (['"""fails to retrieve the events with an incorrect category"""'], {}), "('fails to retrieve the events with an incorrect category')\n", (2700, 2759), False, 'from mamba import it, before, context, description\n'), ((2949, 3001), 'mamba.it', 'it', (['"""is able to retrieve events that match a status"""'], {}), "('is able to retrieve events that match a status')\n", (2951, 3001), False, 'from mamba import it, before, context, description\n'), ((3214, 3273), 'mamba.it', 'it', (['"""fails to retrieve the events with an incorrect status"""'], {}), "('fails to retrieve the events with an incorrect status')\n", (3216, 3273), False, 'from mamba import it, before, context, description\n'), ((3455, 3521), 'mamba.it', 'it', (['"""retrieves the events correctly specifying direction \'before\'"""'], {}), '("retrieves the events correctly specifying direction \'before\'")\n', (3457, 3521), False, 'from mamba import it, before, context, description\n'), ((3712, 3777), 'mamba.it', 'it', (['"""retrieves the events correctly specifying direction \'after\'"""'], {}), '("retrieves the events correctly specifying direction \'after\'")\n', (3714, 3777), False, 'from mamba import it, before, context, description\n'), ((3967, 4029), 'mamba.it', 'it', (['"""fails to retrieve the events with an incorrect direction"""'], {}), "('fails to retrieve the events with an incorrect direction')\n", (3969, 4029), False, 'from mamba import it, before, context, description\n'), ((4257, 4297), 'mamba.it', 'it', (['"""is able to retrieve events by name"""'], {}), "('is able to retrieve events by name')\n", (4259, 4297), False, 'from mamba import it, before, context, description\n'), ((4520, 4585), 'mamba.it', 'it', (['"""retrieves an empty list when the name provided is not found"""'], {}), "('retrieves an empty list when the name provided is not found')\n", (4522, 4585), False, 'from mamba import it, before, context, description\n'), ((4775, 4820), 'mamba.it', 'it', (['"""is able to retrieve the last event only"""'], {}), "('is able to retrieve the last event only')\n", (4777, 4820), False, 'from mamba import it, before, context, description\n'), ((4991, 5045), 'mamba.it', 'it', (['"""is able to retrieve the events from the last day"""'], {}), "('is able to retrieve the events from the last day')\n", (4993, 5045), False, 'from mamba import it, before, context, description\n'), ((5062, 5076), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5074, 5076), False, 'from datetime import datetime, timedelta\n'), ((5326, 5393), 'mamba.context', 'context', (['"""but the from and to parameters are incorrectly specified"""'], {}), "('but the from and to parameters are incorrectly specified')\n", (5333, 5393), False, 'from mamba import it, before, context, description\n'), ((6488, 6535), 'mamba.it', 'it', (['"""is able to remove the event from the feed"""'], {}), "('is able to remove the event from the feed')\n", (6490, 6535), False, 'from mamba import it, before, context, description\n'), ((6545, 6558), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (6555, 6558), False, 'import time\n'), ((1462, 1480), 'expects.have_key', 'have_key', (['"""events"""'], {}), "('events')\n", (1470, 1480), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((2066, 2078), 'expects.equal', 'equal', (['event'], {}), '(event)\n', (2071, 2078), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((2337, 2355), 'expects.have_key', 'have_key', (['"""events"""'], {}), "('events')\n", (2345, 2355), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((2891, 2937), 'expects.equal', 'equal', (['"""Invalid category \'incorrect_category\'"""'], {}), '("Invalid category \'incorrect_category\'")\n', (2896, 2937), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((3401, 3443), 'expects.equal', 'equal', (['"""Invalid status \'incorrect_status\'"""'], {}), '("Invalid status \'incorrect_status\'")\n', (3406, 3443), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((3661, 3700), 'expects.have_keys', 'have_keys', (['"""events"""', '"""total"""', '"""matched"""'], {}), "('events', 'total', 'matched')\n", (3670, 3700), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((3916, 3955), 'expects.have_keys', 'have_keys', (['"""events"""', '"""total"""', '"""matched"""'], {}), "('events', 'total', 'matched')\n", (3925, 3955), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((4161, 4255), 'expects.equal', 'equal', (['"""Invalid direction \'incorrect_direction\', must be either \'before\' or \'after\'"""'], {}), '(\n "Invalid direction \'incorrect_direction\', must be either \'before\' or \'after\'"\n )\n', (4166, 4255), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((4735, 4763), 'expects.have_key', 'have_key', (['"""events"""', 'be_empty'], {}), "('events', be_empty)\n", (4743, 4763), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((5101, 5119), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(2)'}), '(weeks=2)\n', (5110, 5119), False, 'from datetime import datetime, timedelta\n'), ((5408, 5486), 'mamba.it', 'it', (['"""returns an error if any of the parameters is specified but not the other"""'], {}), "('returns an error if any of the parameters is specified but not the other')\n", (5410, 5486), False, 'from mamba import it, before, context, description\n'), ((6127, 6190), 'mamba.it', 'it', (['"""returns an error if they are specified in the wrong order"""'], {}), "('returns an error if they are specified in the wrong order')\n", (6129, 6190), False, 'from mamba import it, before, context, description\n'), ((6211, 6225), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6223, 6225), False, 'from datetime import datetime, timedelta\n'), ((459, 518), 'os.getenv', 'os.getenv', (['"""SDC_MONITOR_URL"""', '"""https://app.sysdigcloud.com"""'], {}), "('SDC_MONITOR_URL', 'https://app.sysdigcloud.com')\n", (468, 518), False, 'import os\n'), ((563, 593), 'os.getenv', 'os.getenv', (['"""SDC_MONITOR_TOKEN"""'], {}), "('SDC_MONITOR_TOKEN')\n", (572, 593), False, 'import os\n'), ((878, 890), 'expects.expect', 'expect', (['call'], {}), '(call)\n', (884, 890), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((1242, 1254), 'expects.expect', 'expect', (['call'], {}), '(call)\n', (1248, 1254), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((1394, 1411), 'expects.expect', 'expect', (['(ok, res)'], {}), '((ok, res))\n', (1400, 1411), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((1447, 1458), 'expects.expect', 'expect', (['res'], {}), '(res)\n', (1453, 1458), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((1490, 1511), 'expects.expect', 'expect', (["res['events']"], {}), "(res['events'])\n", (1496, 1511), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((1820, 1837), 'expects.expect', 'expect', (['(ok, res)'], {}), '((ok, res))\n', (1826, 1837), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((1988, 2005), 'expects.expect', 'expect', (['(ok, res)'], {}), '((ok, res))\n', (1994, 2005), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((2042, 2062), 'expects.expect', 'expect', (["res['event']"], {}), "(res['event'])\n", (2048, 2062), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((2269, 2286), 'expects.expect', 'expect', (['(ok, res)'], {}), '((ok, res))\n', (2275, 2286), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((2322, 2333), 'expects.expect', 'expect', (['res'], {}), '(res)\n', (2328, 2333), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((2558, 2575), 'expects.expect', 'expect', (['(ok, res)'], {}), '((ok, res))\n', (2564, 2575), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((2611, 2622), 'expects.expect', 'expect', (['res'], {}), '(res)\n', (2617, 2622), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((2844, 2854), 'expects.expect', 'expect', (['ok'], {}), '(ok)\n', (2850, 2854), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((2876, 2887), 'expects.expect', 'expect', (['res'], {}), '(res)\n', (2882, 2887), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((3074, 3091), 'expects.expect', 'expect', (['(ok, res)'], {}), '((ok, res))\n', (3080, 3091), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((3127, 3138), 'expects.expect', 'expect', (['res'], {}), '(res)\n', (3133, 3138), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((3354, 3364), 'expects.expect', 'expect', (['ok'], {}), '(ok)\n', (3360, 3364), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((3386, 3397), 'expects.expect', 'expect', (['res'], {}), '(res)\n', (3392, 3397), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((3593, 3610), 'expects.expect', 'expect', (['(ok, res)'], {}), '((ok, res))\n', (3599, 3610), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((3646, 3657), 'expects.expect', 'expect', (['res'], {}), '(res)\n', (3652, 3657), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((3848, 3865), 'expects.expect', 'expect', (['(ok, res)'], {}), '((ok, res))\n', (3854, 3865), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((3901, 3912), 'expects.expect', 'expect', (['res'], {}), '(res)\n', (3907, 3912), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((4114, 4124), 'expects.expect', 'expect', (['ok'], {}), '(ok)\n', (4120, 4124), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((4146, 4157), 'expects.expect', 'expect', (['res'], {}), '(res)\n', (4152, 4157), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((4371, 4388), 'expects.expect', 'expect', (['(ok, res)'], {}), '((ok, res))\n', (4377, 4388), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((4424, 4435), 'expects.expect', 'expect', (['res'], {}), '(res)\n', (4430, 4435), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((4667, 4684), 'expects.expect', 'expect', (['(ok, res)'], {}), '((ok, res))\n', (4673, 4684), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((4720, 4731), 'expects.expect', 'expect', (['res'], {}), '(res)\n', (4726, 4731), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((4880, 4897), 'expects.expect', 'expect', (['(ok, res)'], {}), '((ok, res))\n', (4886, 4897), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((4933, 4944), 'expects.expect', 'expect', (['res'], {}), '(res)\n', (4939, 4944), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((4967, 4978), 'expects.have_len', 'have_len', (['(1)'], {}), '(1)\n', (4975, 4978), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((5196, 5213), 'expects.expect', 'expect', (['(ok, res)'], {}), '((ok, res))\n', (5202, 5213), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((5249, 5260), 'expects.expect', 'expect', (['res'], {}), '(res)\n', (5255, 5260), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((5504, 5518), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5516, 5518), False, 'from datetime import datetime, timedelta\n'), ((5521, 5539), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(2)'}), '(weeks=2)\n', (5530, 5539), False, 'from datetime import datetime, timedelta\n'), ((5807, 5917), 'expects.equal', 'equal', (['"""only one of \'from_s\' or \'to_s\' has been specified, both are required when filtering by time"""'], {}), '(\n "only one of \'from_s\' or \'to_s\' has been specified, both are required when filtering by time"\n )\n', (5812, 5917), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((5974, 6084), 'expects.equal', 'equal', (['"""only one of \'from_s\' or \'to_s\' has been specified, both are required when filtering by time"""'], {}), '(\n "only one of \'from_s\' or \'to_s\' has been specified, both are required when filtering by time"\n )\n', (5979, 6084), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((6254, 6272), 'datetime.timedelta', 'timedelta', ([], {'weeks': '(2)'}), '(weeks=2)\n', (6263, 6272), False, 'from datetime import datetime, timedelta\n'), ((6433, 6476), 'expects.equal', 'equal', (['"""\'from_s\' must be lower than \'to_s\'"""'], {}), '("\'from_s\' must be lower than \'to_s\'")\n', (6438, 6476), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((6761, 6775), 'expects.expect', 'expect', (['events'], {}), '(events)\n', (6767, 6775), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((6853, 6865), 'expects.expect', 'expect', (['call'], {}), '(call)\n', (6859, 6865), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((1541, 1570), 'expects.equal', 'equal', (['"""host.hostName = \'ci\'"""'], {}), '("host.hostName = \'ci\'")\n', (1546, 1570), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((2653, 2684), 'expects.have_keys', 'have_keys', ([], {'name': 'self.event_name'}), '(name=self.event_name)\n', (2662, 2684), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((3169, 3200), 'expects.have_keys', 'have_keys', ([], {'name': 'self.event_name'}), '(name=self.event_name)\n', (3178, 3200), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((5292, 5312), 'expects.be_above_or_equal', 'be_above_or_equal', (['(1)'], {}), '(1)\n', (5309, 5312), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((5665, 5684), 'expects.expect', 'expect', (['(ok1, res1)'], {}), '((ok1, res1))\n', (5671, 5684), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((5728, 5747), 'expects.expect', 'expect', (['(ok2, res2)'], {}), '((ok2, res2))\n', (5734, 5747), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((5791, 5803), 'expects.expect', 'expect', (['res1'], {}), '(res1)\n', (5797, 5803), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((5958, 5970), 'expects.expect', 'expect', (['res2'], {}), '(res2)\n', (5964, 5970), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((6357, 6374), 'expects.expect', 'expect', (['(ok, res)'], {}), '((ok, res))\n', (6363, 6374), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((6418, 6429), 'expects.expect', 'expect', (['res'], {}), '(res)\n', (6424, 6429), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n'), ((4483, 4505), 'expects.equal', 'equal', (['self.event_name'], {}), '(self.event_name)\n', (4488, 4505), False, 'from expects import expect, have_key, contain, have_keys, be_empty, equal, be_false, be_above_or_equal, have_len\n')]
|
from datetime import datetime
from typing import Any, Dict, List, Optional
from enum import Enum
from pydantic import Field
from avaandmed.api_resources import ApiResource
from avaandmed.api_resources.users.user import User
class KeywordEmsCategory(ApiResource):
id: int
ems_category_id: int
keyword_id: int
class Keyword(ApiResource):
"""
Handles keywords serialization in Dataset model.
"""
id: int
name: str
language: str
keyword_ems_category: KeywordEmsCategory
class Citation(ApiResource):
"""
Handles citations serialization in Dataset model.
"""
url: str
name: str
class Conformity(ApiResource):
"""
Handles conformities serialization in Dataset model.
"""
release_date: str
specification: str
class Licence(ApiResource):
"""
Handles licences serialization.
"""
id: str
name: str
description: str
code: Optional[str]
identifier: Optional[str]
class CoordinateReferenceSystem(ApiResource):
"""
Handles coordinateReferenceSystems serialization in Dataset model.
"""
id: int
uri: str
class Category(ApiResource):
"""
Handles categories serialization.
"""
id: int
name: str
description: Optional[str]
ems_ids: Optional[List[int]]
class EmsCategory(ApiResource):
"""
Handles Ems categories serialization.
"""
id: int
name: str
class Region(ApiResource):
"""
Handles regions serialization.
"""
id: int
name: str
coordinates: Optional[str]
class ProcessingStatus(Enum):
NONE = 'none'
PENDING = 'pending'
COMPLETED = 'completed'
FAILED = 'failed'
class File(ApiResource):
"""
Handles files field serialization in Dataset model.
"""
id: str
name: str
mimetype: str
size: str
dataset_id: str
metadata: Dict[str, Any]
processing_status: ProcessingStatus
storage_filename: str
class UpdateIntervalUnit(str, Enum):
"""
Handles updateIntervalUnit field deserialization in Dataset model.
Serializaes into Enum i.e UpdateIntervalUnit.MINUTE.
"""
CONTINUAL = 'continual'
MINUTE = 'minute'
WORKDAY = 'workday'
DAY = 'day'
WEEK = 'week'
MONTH = 'month'
QUARTER = 'quarter'
YEAR = 'year'
AS_NEEDED = 'asNeeded'
IRREGULAR = 'irregular'
NOT_PLANNED = 'notPlanned'
UNKNOWN = 'unknown'
NEVER = 'never'
class Access(str, Enum):
"""
Handles access field serialization in Dataset model.
Serializes into Enum i.e Access.PUBLIC
"""
PUBLIC = 'public'
PROTECTED = 'protected'
PRIVATE = 'private'
class ResourceType(str, Enum):
"""
Handles resourceType field serialization in Dataset model.
Serializes into Enum i.e ResourceType.DATASET
"""
DATASET = 'dataset'
SERIES = 'series'
SERVICE = 'service'
class TopicCategory(str, Enum):
"""
Handles topicCategores field serialization in Dataset model.
Seriliazles into Enum i.e TopicCategoty.BIOTA
"""
BIOTA = 'biota'
BOUNDARIES = 'boundaries'
CLIMATOLOGY_METEROROLOGY_ATMOSPHERE = 'climatologyMeteorologyAtmosphere'
ECONOMY = 'economy'
ELEVATION = 'elevation'
ENVIRONMENT = 'environment'
FARMING = 'farming'
GEO_SCIENTIFIC_INFORMATION = 'geoscientificInformation'
HEALTH = 'health'
IMAGERY_BASE_MAPS_EARTH_COVER = 'imageryBaseMapsEarthCover'
INLAND_WATERS = 'inlandWaters'
INTELLIGENCE_MILITARY = 'intelligenceMilitary'
LOCATION = 'location'
OCEANS = 'oceans'
PLANNING_CADASTRE = 'planningCadastre'
SOCIETY = 'society'
STRUCTURE = 'structure'
TRANSPORTATION = 'transportation'
UTILITIES_COMMUNICATIOn = 'utilitiesCommunication'
class Notification(str, Enum):
"""
Handles notificaitons field serialization in Organization model.
"""
DATASET_COMMENTED = 'DATASET_COMMENTED'
DATASET_RATED = 'DATASET_RATED'
DATASET_ACCESS_REQUEST = 'DATASET_ACCESS_REQUEST'
DATA_WISH_NEW = 'DATA_WISH_NEW'
DATASET_PRIVACY_VIOLATION = 'DATASET_PRIVACY_VIOLATION'
class FileColumn(ApiResource):
"""
Handles field serialization from /columns endpoint.
"""
column: Optional[str]
type: Optional[str]
description: Optional[str]
api_field_name: Optional[str]
unit: Optional[str]
required: Optional[bool]
private: Optional[bool]
unique: Optional[bool]
class InformationHolder(ApiResource):
slug: str
name: str
class SearchResult(ApiResource):
"""
Handles search results serialization.
"""
id: Optional[str]
slug: Optional[str]
name: Optional[str]
name_et: Optional[str]
name_en: Optional[str]
description: Optional[str]
description_et: Optional[str]
description_en: Optional[str]
information_holder: Optional[InformationHolder]
update_interval_frequency: Optional[int]
update_interval_unit: Optional[UpdateIntervalUnit]
created_at: Optional[str]
updated_at: Optional[str]
keywords: Optional[List[str]]
keywords_et: Optional[List[str]]
keywords_en: Optional[List[str]]
categories: Optional[List[str]]
categories_et: Optional[List[str]]
categories_en: Optional[List[str]]
Preview = List[Dict[str, Any]]
class PartialDatasetInfo(ApiResource):
id: Optional[str]
name_et: Optional[str]
name_en: Optional[str]
slug: Optional[str]
organiztion_id: Optional[str]
user_id: Optional[str]
name: Optional[str]
class Inquiry(ApiResource):
id: Optional[str]
user_id: Optional[str]
description: Optional[str]
dataset_id: Optional[str]
status: ProcessingStatus
created_at: Optional[str]
dataset: Optional[PartialDatasetInfo]
user: Optional[User]
seen: Optional[bool]
PrivacyViolation = Inquiry
AccessPermission = Inquiry
class Polynomial(ApiResource):
id: int
column: str
class Identifier(ApiResource):
id: int
column: str
identifier: str
class Index(ApiResource):
polynomial: List[Polynomial]
identifier: List[Identifier]
FileErrors = List[Dict[str, Any]]
class DatasetRating(ApiResource):
id: int
quality_rating: Optional[int]
metadata_rating: Optional[int]
description: Optional[str]
DatasetRatingList = List[DatasetRating]
class Language(ApiResource):
code: str
name: str
class KeywordInfo(ApiResource):
id: int
name: str
ems_id: Optional[str]
class DatasetMetadata(ApiResource):
name_et: str = Field(...)
name_en: str = Field(...)
description_et: str = Field(...)
description_en: str = Field(...)
maintainer: str = Field(...)
maintainer_email: str = Field(...)
maintainer_phone: str = Field(...)
# TODO: Find a way to map ids to actual values
keyword_ids: List[int] = Field(...)
# TODO: Find a way to map ids to actual values
category_ids: List[int] = Field(...)
# TODO: Find a way to map ids to actual values
region_ids: List[int] = Field(...)
data_from: datetime = Field(...)
available_to: datetime = Field(...)
update_interval_unit: UpdateIntervalUnit = Field(...)
update_interval_frequency: int = Field(...)
# optional fields
conformities: Optional[List[Conformity]] = []
lineage: Optional[str]
spatial_representation_type: Optional[str]
spatial_data_service_type: Optional[str]
topic_categories: Optional[List[TopicCategory]] = []
pixel_size: Optional[int]
coordinate_reference_system_ids: Optional[List[int]] = []
south_latitude: Optional[str]
north_latitude: Optional[str]
west_longitude: Optional[str]
east_longitude: Optional[str]
language: Optional[str]
qualified_attribution: Optional[str]
was_generated_by: Optional[str]
spatial_resolution: Optional[str]
temporal_resolution: Optional[str]
maturity: Optional[str]
parent_dataset_ids: Optional[List[str]]
child_dataset_ids: Optional[List[str]]
version_notes: Optional[str]
data_to: Optional[datetime]
landing_page: Optional[str]
resource_type: Optional[ResourceType]
|
[
"pydantic.Field"
] |
[((6523, 6533), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (6528, 6533), False, 'from pydantic import Field\n'), ((6553, 6563), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (6558, 6563), False, 'from pydantic import Field\n'), ((6590, 6600), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (6595, 6600), False, 'from pydantic import Field\n'), ((6627, 6637), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (6632, 6637), False, 'from pydantic import Field\n'), ((6660, 6670), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (6665, 6670), False, 'from pydantic import Field\n'), ((6699, 6709), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (6704, 6709), False, 'from pydantic import Field\n'), ((6738, 6748), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (6743, 6748), False, 'from pydantic import Field\n'), ((6829, 6839), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (6834, 6839), False, 'from pydantic import Field\n'), ((6921, 6931), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (6926, 6931), False, 'from pydantic import Field\n'), ((7011, 7021), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (7016, 7021), False, 'from pydantic import Field\n'), ((7048, 7058), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (7053, 7058), False, 'from pydantic import Field\n'), ((7088, 7098), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (7093, 7098), False, 'from pydantic import Field\n'), ((7146, 7156), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (7151, 7156), False, 'from pydantic import Field\n'), ((7194, 7204), 'pydantic.Field', 'Field', (['...'], {}), '(...)\n', (7199, 7204), False, 'from pydantic import Field\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Last Change: Tue Jul 17 05:00 PM 2007 J
# The code and descriptive text is copyrighted and offered under the terms of
# the BSD License from the authors; see below. However, the actual dataset may
# have a different origin and intellectual property status. See the SOURCE and
# COPYRIGHT variables for this information.
# Copyright (c) 2007 <NAME> <<EMAIL>>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the author nor the names of any contributors may be used
# to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from ..vtypes import integer
from ..utils import standard_properties, standard_classification_loader
__docformat__ = 'restructuredtext'
COPYRIGHT = """This is public domain. """
name = "German Dataset"
short_name = "German Dataset"
url = 'http://www.liacc.up.pt/ML/old/statlog/datasets.html'
SOURCE = """
http://www.liacc.up.pt/ML/old/statlog/datasets.html
Professor Dr. <NAME>
Institut für Statistik und Ökonometrie Universität Hamburg
FB Wirtschaftswissenschaften
Von-Melle-Park 5
2000 Hamburg 13
Two datasets are provided. the original dataset, in the form provided by Prof.
Hofmann, contains categorical/symbolic attributes and is in the file
"german.dat". For algorithms that need numerical attributes, Strathclyde
University produced the file "german.numer". This file has been edited and
several indicator variables added to make it suitable for algorithms which
cannot cope with categorical variables. Several attributes that are ordered
categorical (such as attribute 17) have been coded as integer. This was the
form used by StatLog.
Here (milksets), only the numeric datasets are provided.
"""
notes = """
Number of Instances: 1000. 700 for class 0 (good credit) and 300 for class 1
(bad credit).
Number of Attributes: 24.
label: 0 for good credit, +1 for bad credit
"""
label_names = ['good_credit', 'bad_credit']
missing_values = False
value_types = [
# FIXME
# This is wrong! Not all outputs are integers (some are categorical),
# but the above does not give enough information to know which features are what.
integer('feat{}'.format(i+1)) for i in range(24)
]
@standard_classification_loader(name)
def load(force_contiguous=True):
"""load the german data and returns them.
:returns:
data: dict
Contains the following values:
'data' : the actual data
'label' : label[i] is the label index of data[i]
'class' : class[label[i]] is the label name of data[i]
"""
import numpy
import pickle
import gzip
from os.path import dirname, join
features,labels = pickle.load(gzip.GzipFile(join(dirname(__file__), 'data', 'german.pp.gz')))
featnames = list(features.keys())
featnames.sort()
nfeatures = []
for k in featnames:
nfeatures.append(list(map(float,features[k])))
nfeatures = np.array(nfeatures)
features = nfeatures.T
if force_contiguous:
features = features.copy()
labels = np.array([(lab == '+1') for lab in labels])
labels = labels.astype(np.int)
return features,labels
|
[
"os.path.dirname",
"numpy.array"
] |
[((4312, 4331), 'numpy.array', 'np.array', (['nfeatures'], {}), '(nfeatures)\n', (4320, 4331), True, 'import numpy as np\n'), ((4432, 4475), 'numpy.array', 'np.array', (["[(lab == '+1') for lab in labels]"], {}), "([(lab == '+1') for lab in labels])\n", (4440, 4475), True, 'import numpy as np\n'), ((4094, 4111), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (4101, 4111), False, 'from os.path import dirname, join\n')]
|
import requests
import os
# get your API KEY from <https://deepai.org>
# you need to make a free account
MY_API_KEY = 'put-your-API-key-here'
# path where you keep your pictures
# can be a folder of folders of pictures, or just a folder of pictures
# make sure to end it in /
mypath = "/path/to/the/folder/with/your/pictures/"
# create list of all folders in "mypath"
(_, folders, _) = next(os.walk(mypath))
# if list of folders is empty
if not folders:
folders = [""]
for myfolder in folders:
# create list of all files in "myfolder"
(_, _, files) = next(os.walk(mypath + myfolder + "/"))
for myfile in files:
# colorize
r = requests.post(
"https://api.deepai.org/api/colorizer",
files={
'image': open(mypath + myfolder + "/" + myfile, 'rb'),
},
headers={'api-key': MY_API_KEY}
)
# download file produced by colorize website
try:
dl = requests.get(r.json()['output_url'])
except:
print("Error with file: " + myfolder + "/" + myfile)
# make folder "colorized" if it doesn't exist
if not os.path.exists(mypath + 'colorized'):
os.makedirs(mypath + 'colorized')
# make folder "myfolder" if it doesn't exist
if not os.path.exists(mypath + 'colorized/' + myfolder):
os.makedirs(mypath + 'colorized/' + myfolder)
# save downloaded file to the "colorized/myfolder" folder
with open(mypath + "colorized/" + myfolder + "/" + myfile, 'wb') as f:
f.write(dl.content)
# end files for
# end folders for
|
[
"os.walk",
"os.path.exists",
"os.makedirs"
] |
[((394, 409), 'os.walk', 'os.walk', (['mypath'], {}), '(mypath)\n', (401, 409), False, 'import os\n'), ((575, 607), 'os.walk', 'os.walk', (["(mypath + myfolder + '/')"], {}), "(mypath + myfolder + '/')\n", (582, 607), False, 'import os\n'), ((1166, 1202), 'os.path.exists', 'os.path.exists', (["(mypath + 'colorized')"], {}), "(mypath + 'colorized')\n", (1180, 1202), False, 'import os\n'), ((1216, 1249), 'os.makedirs', 'os.makedirs', (["(mypath + 'colorized')"], {}), "(mypath + 'colorized')\n", (1227, 1249), False, 'import os\n'), ((1319, 1367), 'os.path.exists', 'os.path.exists', (["(mypath + 'colorized/' + myfolder)"], {}), "(mypath + 'colorized/' + myfolder)\n", (1333, 1367), False, 'import os\n'), ((1381, 1426), 'os.makedirs', 'os.makedirs', (["(mypath + 'colorized/' + myfolder)"], {}), "(mypath + 'colorized/' + myfolder)\n", (1392, 1426), False, 'import os\n')]
|
# -*- coding: utf-8 -*-
import sqlite3
class DataBase:
def __init__(self, db_file):
self.db_file = db_file
self.rows = [("lamb", "meat", 39.2, 10400, "red", "red"),
("beef", "meat", 27, 15400, "red", "red"),
("pork", "meat", 12.1, 6000, "yellow", "yellow"),
("turkey", "meat", 10.9, 4300, "green", "green"),
("chicken", "meat", 6.9, 4300, "green", "green"),
("eggs", "gps", 4.8, 3265, "green", "green"),
("rice", "gps", 2.7, 2500, "red", "green"),
("tofu", "gps", 2, 926, "green", "green"),
("beans", "gps", 2, 4055, "red", "green"),
("lentils", "gps", 0.9, 4055, "red", "green"),
("peanut butter", "gps", 2.9, 628, "green", "green"),
("potatoes", "gps", 2.9, 322, "green", "green"),
("bread", "gps", 0.75, 1608, "yellow", "green"),
("tomatoes", "fruitvegg", 1.1, 322, "green", "green"),
("nuts", "fruitvegg", 2.3, 9063, "red", "yellow"),
("broccoli", "fruitvegg", 2, 322, "green", "green"),
("strawberries", "fruitvegg", 0.3, 322, "green", "green"),
("apple", "fruitvegg", 0.55, 962, "green", "green"),
("milk", "dairy", 1.9, 3180, "yellow", "green"),
("cheese", "dairy", 13.5, 3178, "yellow", "green"),
("yogurt", "dairy", 2.2, 778.05, "green", "green"),
("butter", "dairy", 23.8, 5553, "red", "yellow")]
self.init_db()
def init_db(self):
conn = sqlite3.connect(self.db_file)
cursor = conn.cursor()
cursor.execute("drop table if exists data;")
sql = """create table if not exists data (
id integer primary key,
food text not null,
type text not null,
co2 decimal not null,
water integer not null,
local text check (local in ('red', 'yellow', 'green')),
global text check (global in ('red', 'yellow', 'green')));"""
cursor.execute(sql)
insert = """insert into data (food, type, co2, water, local, global)
values (?,?,?,?,?,?);"""
cursor.executemany(insert, self.rows)
conn.commit()
cursor.close()
conn.close()
def get_food(self, food):
conn = sqlite3.connect(self.db_file)
cursor = conn.cursor()
food = food.lower()
sql = "select * from data where food = ?"
cursor.execute(sql, (food,))
result = cursor.fetchone()
conn.commit()
cursor.close()
conn.close()
response = None
if result is not None:
response = {"id": result[0],
"food": result[1],
"type": result[2],
"carbon": result[3],
"water": result[4],
"local": result[5],
"global": result[6]}
if response["type"] == "gps":
response["type"] = "Grains, Proteins, and Starch"
elif response["type"] == "fruitvegg":
response["type"] = "Fruits and Vegetables"
else:
response["type"] = response["type"].title()
return response
def fetch(self):
conn = sqlite3.connect(self.db_file)
cursor = conn.cursor()
cursor.execute("select food from data;")
response = cursor.fetchall()
conn.commit()
cursor.close()
conn.close()
return response
|
[
"sqlite3.connect"
] |
[((1733, 1762), 'sqlite3.connect', 'sqlite3.connect', (['self.db_file'], {}), '(self.db_file)\n', (1748, 1762), False, 'import sqlite3\n'), ((2414, 2443), 'sqlite3.connect', 'sqlite3.connect', (['self.db_file'], {}), '(self.db_file)\n', (2429, 2443), False, 'import sqlite3\n'), ((3411, 3440), 'sqlite3.connect', 'sqlite3.connect', (['self.db_file'], {}), '(self.db_file)\n', (3426, 3440), False, 'import sqlite3\n')]
|
#!/usr/bin/env python
"""print repo topics"""
import click
import github_topics
MODULE_NAME = "github_topics.get"
USAGE = 'python -m %s fullname' % MODULE_NAME
PROG_NAME = 'python -m %s' % USAGE
@click.command()
@click.argument('fullname')
def _cli(fullname):
topics = github_topics.get(fullname)
if topics:
print("\n".join(sorted(topics)))
if __name__ == "__main__":
_cli()
|
[
"github_topics.get",
"click.argument",
"click.command"
] |
[((199, 214), 'click.command', 'click.command', ([], {}), '()\n', (212, 214), False, 'import click\n'), ((216, 242), 'click.argument', 'click.argument', (['"""fullname"""'], {}), "('fullname')\n", (230, 242), False, 'import click\n'), ((276, 303), 'github_topics.get', 'github_topics.get', (['fullname'], {}), '(fullname)\n', (293, 303), False, 'import github_topics\n')]
|
#
# Copyright (c) 2018 Juniper Networks, Inc. All rights reserved.
#
"""Contains common logger initialization to be used in job mgr internals."""
import logging
DEFAULT_JOB_MGR_LOG_PATH = '/var/log/contrail/contrail-fabric-ansible.log'
DATE_FORMAT = "%m/%d/%Y %I:%M:%S %p"
LOGGING_FORMAT = '%(asctime)s [%(name)s] [%(levelname)s]: %(message)s'
def job_mgr_logger(name, ctx=None):
name = name
ctx = ctx
logger = logging.getLogger(name)
handler = logging.FileHandler(DEFAULT_JOB_MGR_LOG_PATH)
formatter = logging.Formatter(fmt=LOGGING_FORMAT, datefmt=DATE_FORMAT)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger
|
[
"logging.Formatter",
"logging.FileHandler",
"logging.getLogger"
] |
[((430, 453), 'logging.getLogger', 'logging.getLogger', (['name'], {}), '(name)\n', (447, 453), False, 'import logging\n'), ((468, 513), 'logging.FileHandler', 'logging.FileHandler', (['DEFAULT_JOB_MGR_LOG_PATH'], {}), '(DEFAULT_JOB_MGR_LOG_PATH)\n', (487, 513), False, 'import logging\n'), ((530, 588), 'logging.Formatter', 'logging.Formatter', ([], {'fmt': 'LOGGING_FORMAT', 'datefmt': 'DATE_FORMAT'}), '(fmt=LOGGING_FORMAT, datefmt=DATE_FORMAT)\n', (547, 588), False, 'import logging\n')]
|
# Generated by Django 3.0.5 on 2020-04-02 10:28
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='PassModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('district', models.CharField(max_length=20, null=True)),
('name', models.CharField(max_length=200, null=True)),
('email', models.CharField(max_length=200, null=True)),
('vehiclenumber', models.CharField(max_length=200, null=True)),
('phonenumber', models.IntegerField(null=True)),
('aadharcardnumber', models.IntegerField(null=True)),
('address', models.CharField(max_length=200, null=True)),
('reason', models.CharField(max_length=200, null=True)),
('issuedate', models.DateTimeField(default=django.utils.timezone.now)),
('passcategory', models.CharField(choices=[('Essential Services Pass', 'Essential Services Pass'), ('Emergency Services Pass', 'Emergency Services Pass')], max_length=30)),
('subcategory', models.CharField(choices=[('ATM/Banking', 'ATM/Banking'), ('Delivery Worker', 'Delivery Worker'), ('Fruit/Vegetable Vendor', 'Fruit/Vegetable Vendor'), ('Govt Officials', 'Govt Officials'), ('Grocery Vendor', 'Grocery Vendor'), ('Milk Vendor', 'Milk Vendor'), ('Health Worker', 'Health Worker'), ('IT/Tele Communication', 'IT/Tele Communication'), ('Municipal Services', 'Municipal Services'), ('Power/Electricity', 'Power/Electricity'), ('Sanitation', 'Sanitation'), ('Businessman', 'Businessman')], max_length=30)),
('attachphoto', models.ImageField(upload_to='profile_pics')),
('attachidproof', models.ImageField(upload_to='id_proof')),
],
),
]
|
[
"django.db.models.CharField",
"django.db.models.AutoField",
"django.db.models.ImageField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] |
[((334, 427), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (350, 427), False, 'from django.db import migrations, models\n'), ((455, 497), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(20)', 'null': '(True)'}), '(max_length=20, null=True)\n', (471, 497), False, 'from django.db import migrations, models\n'), ((525, 568), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'null': '(True)'}), '(max_length=200, null=True)\n', (541, 568), False, 'from django.db import migrations, models\n'), ((597, 640), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'null': '(True)'}), '(max_length=200, null=True)\n', (613, 640), False, 'from django.db import migrations, models\n'), ((677, 720), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'null': '(True)'}), '(max_length=200, null=True)\n', (693, 720), False, 'from django.db import migrations, models\n'), ((755, 785), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (774, 785), False, 'from django.db import migrations, models\n'), ((825, 855), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'null': '(True)'}), '(null=True)\n', (844, 855), False, 'from django.db import migrations, models\n'), ((886, 929), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'null': '(True)'}), '(max_length=200, null=True)\n', (902, 929), False, 'from django.db import migrations, models\n'), ((959, 1002), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'null': '(True)'}), '(max_length=200, null=True)\n', (975, 1002), False, 'from django.db import migrations, models\n'), ((1035, 1090), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now'}), '(default=django.utils.timezone.now)\n', (1055, 1090), False, 'from django.db import migrations, models\n'), ((1126, 1287), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('Essential Services Pass', 'Essential Services Pass'), (\n 'Emergency Services Pass', 'Emergency Services Pass')]", 'max_length': '(30)'}), "(choices=[('Essential Services Pass',\n 'Essential Services Pass'), ('Emergency Services Pass',\n 'Emergency Services Pass')], max_length=30)\n", (1142, 1287), False, 'from django.db import migrations, models\n'), ((1314, 1861), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('ATM/Banking', 'ATM/Banking'), ('Delivery Worker', 'Delivery Worker'), (\n 'Fruit/Vegetable Vendor', 'Fruit/Vegetable Vendor'), ('Govt Officials',\n 'Govt Officials'), ('Grocery Vendor', 'Grocery Vendor'), ('Milk Vendor',\n 'Milk Vendor'), ('Health Worker', 'Health Worker'), (\n 'IT/Tele Communication', 'IT/Tele Communication'), (\n 'Municipal Services', 'Municipal Services'), ('Power/Electricity',\n 'Power/Electricity'), ('Sanitation', 'Sanitation'), ('Businessman',\n 'Businessman')]", 'max_length': '(30)'}), "(choices=[('ATM/Banking', 'ATM/Banking'), (\n 'Delivery Worker', 'Delivery Worker'), ('Fruit/Vegetable Vendor',\n 'Fruit/Vegetable Vendor'), ('Govt Officials', 'Govt Officials'), (\n 'Grocery Vendor', 'Grocery Vendor'), ('Milk Vendor', 'Milk Vendor'), (\n 'Health Worker', 'Health Worker'), ('IT/Tele Communication',\n 'IT/Tele Communication'), ('Municipal Services', 'Municipal Services'),\n ('Power/Electricity', 'Power/Electricity'), ('Sanitation', 'Sanitation'\n ), ('Businessman', 'Businessman')], max_length=30)\n", (1330, 1861), False, 'from django.db import migrations, models\n'), ((1864, 1907), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""profile_pics"""'}), "(upload_to='profile_pics')\n", (1881, 1907), False, 'from django.db import migrations, models\n'), ((1944, 1983), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': '"""id_proof"""'}), "(upload_to='id_proof')\n", (1961, 1983), False, 'from django.db import migrations, models\n')]
|
"""Includes function to compare abstracts."""
from weighted_levenshtein import lev
def lev_similarity(aa: str, bb: str) -> float:
"""
Get a Levenshtein similarity score.
:param aa: first string
:param bb: second string
:return: The similarity of the two strings (0=bad, 1=match):
1- lev(aa,bb)/max(len(aa), len(bb))
"""
# Since weighted levenshtein can't handle unicode,
# convert to ASCII first:
def convert_to_ascii(text: str, label: str) -> str:
try:
text_out = text.encode('ascii', 'ignore')
return text_out
except Exception as ex:
raise Exception(f'Could not encode f{label}: f{aa}') from ex
aa = convert_to_ascii(aa, 'aa')
bb = convert_to_ascii(bb, 'bb')
# TODO, consider penalizing whitespace alterations less
return 1.0 - lev(aa, bb)/max(len(aa), len(bb))
|
[
"weighted_levenshtein.lev"
] |
[((853, 864), 'weighted_levenshtein.lev', 'lev', (['aa', 'bb'], {}), '(aa, bb)\n', (856, 864), False, 'from weighted_levenshtein import lev\n')]
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MPEG SA3D box processing classes.
Enables the injection of an SA3D MPEG-4. The SA3D box specification
conforms to that outlined in docs/spatial-audio-rfc.md
"""
import struct
from spatialmedia.mpeg import box
from spatialmedia.mpeg import constants
def load(fh, position=None, end=None):
""" Loads the SA3D box located at position in an mp4 file.
Args:
fh: file handle, input file handle.
position: int or None, current file position.
Returns:
new_box: box, SA3D box loaded from the file location or None.
"""
if position is None:
position = fh.tell()
fh.seek(position)
new_box = SA3DBox()
new_box.position = position
size = struct.unpack(">I", fh.read(4))[0]
name = fh.read(4)
if (name != constants.TAG_SA3D):
print("Error: box is not an SA3D box.")
return None
if (position + size > end):
print("Error: SA3D box size exceeds bounds.")
return None
new_box.content_size = size - new_box.header_size
new_box.version = struct.unpack(">B", fh.read(1))[0]
new_box.ambisonic_type = struct.unpack(">B", fh.read(1))[0]
new_box.head_locked_stereo = (new_box.ambisonic_type & int('10000000', 2) != 0)
new_box.ambisonic_type = new_box.ambisonic_type & int('01111111', 2)
new_box.ambisonic_order = struct.unpack(">I", fh.read(4))[0]
new_box.ambisonic_channel_ordering = struct.unpack(">B", fh.read(1))[0]
new_box.ambisonic_normalization = struct.unpack(">B", fh.read(1))[0]
new_box.num_channels = struct.unpack(">I", fh.read(4))[0]
for i in range(0, new_box.num_channels):
new_box.channel_map.append(
struct.unpack(">I", fh.read(4))[0])
return new_box
class SA3DBox(box.Box):
ambisonic_types = {'periphonic': 0}
ambisonic_orderings = {'ACN': 0}
ambisonic_normalizations = {'SN3D': 0}
def __init__(self):
box.Box.__init__(self)
self.name = constants.TAG_SA3D
self.header_size = 8
self.version = 0
self.ambisonic_type = 0
self.head_locked_stereo = False
self.ambisonic_order = 0
self.ambisonic_channel_ordering = 0
self.ambisonic_normalization = 0
self.num_channels = 0
self.channel_map = list()
@staticmethod
def create(num_channels, audio_metadata):
new_box = SA3DBox()
new_box.header_size = 8
new_box.name = constants.TAG_SA3D
new_box.version = 0 # uint8
new_box.content_size += 1 # uint8
new_box.ambisonic_type = SA3DBox.ambisonic_types[
audio_metadata["ambisonic_type"]]
new_box.head_locked_stereo = audio_metadata["head_locked_stereo"]
new_box.content_size += 1 # uint8
new_box.ambisonic_order = audio_metadata["ambisonic_order"]
new_box.content_size += 4 # uint32
new_box.ambisonic_channel_ordering = SA3DBox.ambisonic_orderings[
audio_metadata["ambisonic_channel_ordering"]]
new_box.content_size += 1 # uint8
new_box.ambisonic_normalization = SA3DBox.ambisonic_normalizations[
audio_metadata["ambisonic_normalization"]]
new_box.content_size += 1 # uint8
new_box.num_channels = num_channels
new_box.content_size += 4 # uint32
channel_map = audio_metadata["channel_map"]
for channel_element in channel_map:
new_box.channel_map.append(channel_element)
new_box.content_size += 4 # uint32
return new_box
def ambisonic_type_name(self):
return next((key for key,value in SA3DBox.ambisonic_types.items()
if value==self.ambisonic_type))
def ambisonic_channel_ordering_name(self):
return next((key for key,value in SA3DBox.ambisonic_orderings.items()
if value==self.ambisonic_channel_ordering))
def ambisonic_normalization_name(self):
return next((key for key,value in SA3DBox.ambisonic_normalizations.items()
if value==self.ambisonic_normalization))
def print_box(self, console):
""" Prints the contents of this spatial audio (SA3D) box to the
console.
"""
ambisonic_type = self.ambisonic_type_name()
channel_ordering = self.ambisonic_channel_ordering_name()
ambisonic_normalization = self.ambisonic_normalization_name()
console("\t\tAmbisonic Type: %s" % ambisonic_type)
console("\t\tContains Head-Locked Stereo: %r" % self.head_locked_stereo)
console("\t\tAmbisonic Order: %d" % self.ambisonic_order)
console("\t\tAmbisonic Channel Ordering: %s" % channel_ordering)
console("\t\tAmbisonic Normalization: %s" % ambisonic_normalization)
console("\t\tNumber of Channels: %d" % self.num_channels)
console("\t\tChannel Map: %s" % str(self.channel_map))
def get_metadata_string(self):
""" Outputs a concise single line audio metadata string. """
metadata = "%s, %s, %s, Order %d, %d Channel(s), Channel Map: %s" \
% (self.ambisonic_normalization_name(),\
self.ambisonic_channel_ordering_name(),\
self.ambisonic_type_name(),\
self.ambisonic_order,\
self.num_channels,\
str(self.channel_map))
return metadata
def save(self, in_fh, out_fh, delta):
if (self.header_size == 16):
out_fh.write(struct.pack(">I", 1))
out_fh.write(struct.pack(">Q", self.size()))
out_fh.write(self.name)
elif(self.header_size == 8):
out_fh.write(struct.pack(">I", self.size()))
out_fh.write(self.name)
ambisonic_type = (
self.ambisonic_type | int('10000000', 2) if
self.head_locked_stereo else self.ambisonic_type & int('01111111', 2))
out_fh.write(struct.pack(">B", self.version))
out_fh.write(struct.pack(">B", ambisonic_type))
out_fh.write(struct.pack(">I", self.ambisonic_order))
out_fh.write(struct.pack(">B", self.ambisonic_channel_ordering))
out_fh.write(struct.pack(">B", self.ambisonic_normalization))
out_fh.write(struct.pack(">I", self.num_channels))
for i in self.channel_map:
if (i != None):
out_fh.write(struct.pack(">I", int(i)))
|
[
"spatialmedia.mpeg.box.Box.__init__",
"struct.pack"
] |
[((2549, 2571), 'spatialmedia.mpeg.box.Box.__init__', 'box.Box.__init__', (['self'], {}), '(self)\n', (2565, 2571), False, 'from spatialmedia.mpeg import box\n'), ((6608, 6639), 'struct.pack', 'struct.pack', (['""">B"""', 'self.version'], {}), "('>B', self.version)\n", (6619, 6639), False, 'import struct\n'), ((6662, 6695), 'struct.pack', 'struct.pack', (['""">B"""', 'ambisonic_type'], {}), "('>B', ambisonic_type)\n", (6673, 6695), False, 'import struct\n'), ((6718, 6757), 'struct.pack', 'struct.pack', (['""">I"""', 'self.ambisonic_order'], {}), "('>I', self.ambisonic_order)\n", (6729, 6757), False, 'import struct\n'), ((6780, 6830), 'struct.pack', 'struct.pack', (['""">B"""', 'self.ambisonic_channel_ordering'], {}), "('>B', self.ambisonic_channel_ordering)\n", (6791, 6830), False, 'import struct\n'), ((6853, 6900), 'struct.pack', 'struct.pack', (['""">B"""', 'self.ambisonic_normalization'], {}), "('>B', self.ambisonic_normalization)\n", (6864, 6900), False, 'import struct\n'), ((6923, 6959), 'struct.pack', 'struct.pack', (['""">I"""', 'self.num_channels'], {}), "('>I', self.num_channels)\n", (6934, 6959), False, 'import struct\n'), ((6175, 6195), 'struct.pack', 'struct.pack', (['""">I"""', '(1)'], {}), "('>I', 1)\n", (6186, 6195), False, 'import struct\n')]
|
from django.core.management.base import BaseCommand
from django.db import connections
from symposion.reviews.models import ProposalResult, promote_proposal
class Command(BaseCommand):
def handle(self, *args, **options):
accepted_proposals = ProposalResult.objects.filter(status="accepted")
accepted_proposals = accepted_proposals.order_by("proposal")
for result in accepted_proposals:
promote_proposal(result.proposal)
connections["default"].cursor().execute("SELECT setval('schedule_session_id_seq', (SELECT max(id) FROM schedule_session))")
|
[
"symposion.reviews.models.promote_proposal",
"symposion.reviews.models.ProposalResult.objects.filter"
] |
[((261, 309), 'symposion.reviews.models.ProposalResult.objects.filter', 'ProposalResult.objects.filter', ([], {'status': '"""accepted"""'}), "(status='accepted')\n", (290, 309), False, 'from symposion.reviews.models import ProposalResult, promote_proposal\n'), ((442, 475), 'symposion.reviews.models.promote_proposal', 'promote_proposal', (['result.proposal'], {}), '(result.proposal)\n', (458, 475), False, 'from symposion.reviews.models import ProposalResult, promote_proposal\n')]
|
from __future__ import print_function, division
import abc
import numpy as np
class StreamProcessor(object):
"""Base class for stream processors"""
def __call__(self, items):
"""Processed the whole stream of items.
Args:
items (Iterable(object)) the stream of items to process.
"""
for item in items:
self.put(item)
@abc.abstractmethod
def put(self, item):
"""The method for processing one item"""
raise NotImplementedError('')
@abc.abstractmethod
def reset(self):
"""Resets the stream processor"""
raise NotImplementedError('')
class ReservoirSampling(StreamProcessor):
"""Maintains a UNIFORM SAMPLE of processed items up to any time t."""
def __init__(self, size, seed=None):
self.size = size
self.t = None
self.reservoir = None
self.seed = seed
np.random.seed(seed)
self.reset()
def put(self, item):
self.t += 1
if len(self.reservoir) < self.size:
self.reservoir.append(item)
else:
replace_probability = self.size / self.t
if np.random.random() < replace_probability:
replace_idx = np.random.randint(0, self.size)
self.reservoir[replace_idx] = item
def reset(self):
self.reservoir = []
self.t = 0
|
[
"numpy.random.randint",
"numpy.random.random",
"numpy.random.seed"
] |
[((917, 937), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (931, 937), True, 'import numpy as np\n'), ((1171, 1189), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1187, 1189), True, 'import numpy as np\n'), ((1243, 1274), 'numpy.random.randint', 'np.random.randint', (['(0)', 'self.size'], {}), '(0, self.size)\n', (1260, 1274), True, 'import numpy as np\n')]
|
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import pandas as pd
def plot_hist(df: pd.DataFrame, column: str, color: str) -> None:
plt.figure(figsize=(9, 7))
sns.displot(data=df, x=column, color=color, kde=True, height=7, aspect=2)
plt.title(f'Distribution of {column}', size=20, fontweight='bold')
plt.show()
def plot_dist(df: pd.DataFrame, column: str):
plt.figure(figsize=(9, 7))
sns.distplot(df).set_title(f'Distribution of {column}')
plt.show()
def plot_count(df: pd.DataFrame, column: str) -> None:
plt.figure(figsize=(12, 7))
sns.countplot(data=df, x=column)
plt.title(f'Plot count of {column}', size=20, fontweight='bold')
plt.show()
def plot_bar(df: pd.DataFrame, x_col: str, y_col: str, title: str, xlabel: str, ylabel: str) -> None:
plt.figure(figsize=(9, 7))
sns.barplot(data=df, x=x_col, y=y_col)
plt.title(title, size=20)
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
plt.xlabel(xlabel, fontsize=16)
plt.ylabel(ylabel, fontsize=16)
plt.show()
def plot_heatmap(df: pd.DataFrame, title: str, cbar=False) -> None:
plt.figure(figsize=(12, 7))
sns.heatmap(df, annot=True, cmap='viridis', vmin=0,
vmax=1, fmt='.2f', linewidths=.7, cbar=cbar)
plt.title(title, size=18, fontweight='bold')
plt.show()
def plot_box(df: pd.DataFrame, x_col: str, title: str) -> None:
plt.figure(figsize=(12, 7))
sns.boxplot(data=df, x=x_col)
plt.title(title, size=20)
plt.xticks(rotation=75, fontsize=14)
plt.show()
def plot_box_multi(df: pd.DataFrame, x_col: str, y_col: str, title: str) -> None:
plt.figure(figsize=(12, 7))
sns.boxplot(data=df, x=x_col, y=y_col)
plt.title(title, size=20)
plt.xticks(rotation=75, fontsize=14)
plt.yticks(fontsize=14)
plt.show()
def plot_scatter(df: pd.DataFrame, x_col: str, y_col: str, title: str, hue: str, style: str) -> None:
plt.figure(figsize=(10, 8))
sns.scatterplot(data=df, x=x_col, y=y_col, hue=hue, style=style)
plt.title(title, size=20)
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
plt.show()
|
[
"matplotlib.pyplot.title",
"seaborn.displot",
"matplotlib.pyplot.show",
"seaborn.heatmap",
"seaborn.scatterplot",
"matplotlib.pyplot.yticks",
"seaborn.barplot",
"matplotlib.pyplot.figure",
"seaborn.boxplot",
"seaborn.countplot",
"seaborn.distplot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.xlabel"
] |
[((173, 199), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (183, 199), True, 'import matplotlib.pyplot as plt\n'), ((204, 277), 'seaborn.displot', 'sns.displot', ([], {'data': 'df', 'x': 'column', 'color': 'color', 'kde': '(True)', 'height': '(7)', 'aspect': '(2)'}), '(data=df, x=column, color=color, kde=True, height=7, aspect=2)\n', (215, 277), True, 'import seaborn as sns\n'), ((282, 348), 'matplotlib.pyplot.title', 'plt.title', (['f"""Distribution of {column}"""'], {'size': '(20)', 'fontweight': '"""bold"""'}), "(f'Distribution of {column}', size=20, fontweight='bold')\n", (291, 348), True, 'import matplotlib.pyplot as plt\n'), ((353, 363), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (361, 363), True, 'import matplotlib.pyplot as plt\n'), ((416, 442), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (426, 442), True, 'import matplotlib.pyplot as plt\n'), ((507, 517), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (515, 517), True, 'import matplotlib.pyplot as plt\n'), ((579, 606), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (589, 606), True, 'import matplotlib.pyplot as plt\n'), ((611, 643), 'seaborn.countplot', 'sns.countplot', ([], {'data': 'df', 'x': 'column'}), '(data=df, x=column)\n', (624, 643), True, 'import seaborn as sns\n'), ((648, 712), 'matplotlib.pyplot.title', 'plt.title', (['f"""Plot count of {column}"""'], {'size': '(20)', 'fontweight': '"""bold"""'}), "(f'Plot count of {column}', size=20, fontweight='bold')\n", (657, 712), True, 'import matplotlib.pyplot as plt\n'), ((717, 727), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (725, 727), True, 'import matplotlib.pyplot as plt\n'), ((836, 862), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (846, 862), True, 'import matplotlib.pyplot as plt\n'), ((867, 905), 'seaborn.barplot', 'sns.barplot', ([], {'data': 'df', 'x': 'x_col', 'y': 'y_col'}), '(data=df, x=x_col, y=y_col)\n', (878, 905), True, 'import seaborn as sns\n'), ((910, 935), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'size': '(20)'}), '(title, size=20)\n', (919, 935), True, 'import matplotlib.pyplot as plt\n'), ((940, 976), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(75)', 'fontsize': '(14)'}), '(rotation=75, fontsize=14)\n', (950, 976), True, 'import matplotlib.pyplot as plt\n'), ((981, 1004), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (991, 1004), True, 'import matplotlib.pyplot as plt\n'), ((1009, 1040), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {'fontsize': '(16)'}), '(xlabel, fontsize=16)\n', (1019, 1040), True, 'import matplotlib.pyplot as plt\n'), ((1045, 1076), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {'fontsize': '(16)'}), '(ylabel, fontsize=16)\n', (1055, 1076), True, 'import matplotlib.pyplot as plt\n'), ((1081, 1091), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1089, 1091), True, 'import matplotlib.pyplot as plt\n'), ((1166, 1193), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (1176, 1193), True, 'import matplotlib.pyplot as plt\n'), ((1198, 1299), 'seaborn.heatmap', 'sns.heatmap', (['df'], {'annot': '(True)', 'cmap': '"""viridis"""', 'vmin': '(0)', 'vmax': '(1)', 'fmt': '""".2f"""', 'linewidths': '(0.7)', 'cbar': 'cbar'}), "(df, annot=True, cmap='viridis', vmin=0, vmax=1, fmt='.2f',\n linewidths=0.7, cbar=cbar)\n", (1209, 1299), True, 'import seaborn as sns\n'), ((1315, 1359), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'size': '(18)', 'fontweight': '"""bold"""'}), "(title, size=18, fontweight='bold')\n", (1324, 1359), True, 'import matplotlib.pyplot as plt\n'), ((1364, 1374), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1372, 1374), True, 'import matplotlib.pyplot as plt\n'), ((1445, 1472), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (1455, 1472), True, 'import matplotlib.pyplot as plt\n'), ((1477, 1506), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'df', 'x': 'x_col'}), '(data=df, x=x_col)\n', (1488, 1506), True, 'import seaborn as sns\n'), ((1511, 1536), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'size': '(20)'}), '(title, size=20)\n', (1520, 1536), True, 'import matplotlib.pyplot as plt\n'), ((1541, 1577), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(75)', 'fontsize': '(14)'}), '(rotation=75, fontsize=14)\n', (1551, 1577), True, 'import matplotlib.pyplot as plt\n'), ((1582, 1592), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1590, 1592), True, 'import matplotlib.pyplot as plt\n'), ((1681, 1708), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 7)'}), '(figsize=(12, 7))\n', (1691, 1708), True, 'import matplotlib.pyplot as plt\n'), ((1713, 1751), 'seaborn.boxplot', 'sns.boxplot', ([], {'data': 'df', 'x': 'x_col', 'y': 'y_col'}), '(data=df, x=x_col, y=y_col)\n', (1724, 1751), True, 'import seaborn as sns\n'), ((1756, 1781), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'size': '(20)'}), '(title, size=20)\n', (1765, 1781), True, 'import matplotlib.pyplot as plt\n'), ((1786, 1822), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'rotation': '(75)', 'fontsize': '(14)'}), '(rotation=75, fontsize=14)\n', (1796, 1822), True, 'import matplotlib.pyplot as plt\n'), ((1827, 1850), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (1837, 1850), True, 'import matplotlib.pyplot as plt\n'), ((1855, 1865), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1863, 1865), True, 'import matplotlib.pyplot as plt\n'), ((1974, 2001), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (1984, 2001), True, 'import matplotlib.pyplot as plt\n'), ((2006, 2070), 'seaborn.scatterplot', 'sns.scatterplot', ([], {'data': 'df', 'x': 'x_col', 'y': 'y_col', 'hue': 'hue', 'style': 'style'}), '(data=df, x=x_col, y=y_col, hue=hue, style=style)\n', (2021, 2070), True, 'import seaborn as sns\n'), ((2075, 2100), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'size': '(20)'}), '(title, size=20)\n', (2084, 2100), True, 'import matplotlib.pyplot as plt\n'), ((2105, 2128), 'matplotlib.pyplot.xticks', 'plt.xticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (2115, 2128), True, 'import matplotlib.pyplot as plt\n'), ((2133, 2156), 'matplotlib.pyplot.yticks', 'plt.yticks', ([], {'fontsize': '(14)'}), '(fontsize=14)\n', (2143, 2156), True, 'import matplotlib.pyplot as plt\n'), ((2161, 2171), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2169, 2171), True, 'import matplotlib.pyplot as plt\n'), ((447, 463), 'seaborn.distplot', 'sns.distplot', (['df'], {}), '(df)\n', (459, 463), True, 'import seaborn as sns\n')]
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
"Module to show how to handle exceptions"
# This line must be at the beginning of the file
from __future__ import print_function
import sys
def GenericException():
"Generic and simple use of exceptions"
try:
a = 10 / 0
except:
print( "Exception" )
def GenericExceptionMessage():
"Catch exception and print exception message"
try:
a = 10 / 0
except Exception as e:
print( "Exception:", e )
def GenericExceptionMultiple( value ):
"Catch several exceptions"
try:
a = 10 / value
except (TypeError, ZeroDivisionError) as e:
print( "Specific Exception:", type(e), e )
return
except Exception as e:
print( "generic Exception:", type(e), e )
return
else:
print( "Execute if no exception" )
finally:
print( "Always execute at end of try clause" )
print( "Return" )
def RaiseGenericException( str ):
try:
raise Exception( str )
except Exception as e:
print( "Exception:", e )
def IgnoreException():
try:
t = 1 / 0
except:
pass
print( "Still here" )
def RethrowException():
try:
try:
t = 1 / 0
except:
print( "Caught first exception" )
raise
except Exception as e:
print( "second exception", e )
print( "Still here" )
# Define exceptions raised by this module
class ModuleError(Exception):
"Base class for exceptions in this module. So the we can catch all exceptions of the module"
pass
class UserError(ModuleError):
"""Exception example.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr( "UserError with value '" + self.value + "'" )
def RaiseUserException():
try:
raise UserError( "My exception" )
except Exception as e:
print( "Exception:", e )
def main():
"The main function called when the utility is run."
print( 'Exceptions test' )
print()
GenericException()
print()
GenericExceptionMessage()
print()
GenericExceptionMultiple( "qq" )
print()
GenericExceptionMultiple( 0 )
print()
GenericExceptionMultiple( 1 )
print()
RaiseGenericException( "My Exception" )
print()
IgnoreException()
print()
RethrowException()
print()
RaiseUserException()
print()
return 0
# This is the standard boilerplate that calls the main() function.
if __name__ == '__main__':
rc = main()
# This function will set the result value of this utility
sys.exit( rc )
|
[
"sys.exit"
] |
[((2632, 2644), 'sys.exit', 'sys.exit', (['rc'], {}), '(rc)\n', (2640, 2644), False, 'import sys\n')]
|
from setuptools import setup
with open("README.md", "r") as readme_file:
readme = readme_file.read()
setup(
name="uitestcore",
version="7.0.2",
description="Package providing common functionality for UI automation test packs",
long_description=readme,
long_description_content_type="text/markdown",
license="MIT",
homepage="https://github.com/nhsuk/ui-test-core/",
packages=["uitestcore", "uitestcore.utilities"],
install_requires=[
"certifi==2021.10.8",
"chardet==4.0.0",
"idna==3.3",
"pyhamcrest==2.0.2",
"python-dateutil==2.8.2",
"requests==2.26.0",
"selenium==3.141.0",
"six==1.16.0",
"urllib3==1.26.7"
],
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
"License :: OSI Approved :: MIT License"
]
)
|
[
"setuptools.setup"
] |
[((107, 788), 'setuptools.setup', 'setup', ([], {'name': '"""uitestcore"""', 'version': '"""7.0.2"""', 'description': '"""Package providing common functionality for UI automation test packs"""', 'long_description': 'readme', 'long_description_content_type': '"""text/markdown"""', 'license': '"""MIT"""', 'homepage': '"""https://github.com/nhsuk/ui-test-core/"""', 'packages': "['uitestcore', 'uitestcore.utilities']", 'install_requires': "['certifi==2021.10.8', 'chardet==4.0.0', 'idna==3.3', 'pyhamcrest==2.0.2',\n 'python-dateutil==2.8.2', 'requests==2.26.0', 'selenium==3.141.0',\n 'six==1.16.0', 'urllib3==1.26.7']", 'classifiers': "['Programming Language :: Python :: 3',\n 'Operating System :: OS Independent',\n 'License :: OSI Approved :: MIT License']"}), "(name='uitestcore', version='7.0.2', description=\n 'Package providing common functionality for UI automation test packs',\n long_description=readme, long_description_content_type='text/markdown',\n license='MIT', homepage='https://github.com/nhsuk/ui-test-core/',\n packages=['uitestcore', 'uitestcore.utilities'], install_requires=[\n 'certifi==2021.10.8', 'chardet==4.0.0', 'idna==3.3',\n 'pyhamcrest==2.0.2', 'python-dateutil==2.8.2', 'requests==2.26.0',\n 'selenium==3.141.0', 'six==1.16.0', 'urllib3==1.26.7'], classifiers=[\n 'Programming Language :: Python :: 3',\n 'Operating System :: OS Independent',\n 'License :: OSI Approved :: MIT License'])\n", (112, 788), False, 'from setuptools import setup\n')]
|
import sys
import os
from datetime import datetime
import argparse
import pandas as pd
from Bio import Restriction
from Bio.Seq import Seq
from Bio import SeqIO
def partition(alist, indices):
"""A function to split a list based on item indices
Parameters:
-----------------------------
: alist (list): a list to be split
: indices (list): list of indices on which to divide the input list
Returns:
-----------------------------
: splits (list): a list of subreads based on cut sites
"""
return [alist[i:j] for i, j in zip([0]+indices, indices+[None])]
def split_fastq(infile, N=300):
"""A function to digest the reads by a cut site prior to mapping
against the reference.
This is adapted from:
https://github.com/dekkerlab/MC-3C_scripts
Parameters:
-----------------------------
: infile (str): path to the input fastq file
: N (int): number of base pairs surrounding the cut-site to define as pairs
Returns:
-----------------------------
: pairs_table (pd.DataFrame): a dataframe with subread pairs and metadata
"""
handle = open(infile, "r")
records = list(SeqIO.parse(handle, "fastq"))
handle.close()
new_rows = []
# iterate through each reaa of the fastq file
for record in records:
# digest read on NlaIII sites
restriction_site_len = len(Restriction.NlaIII.site)
cut_sites = Restriction.NlaIII.search(record.seq)
cut_sites = [x-1 for x in cut_sites] # cut site indices are the bp AFTER the CATG enzyme
if len(cut_sites) > 0:
# split the read on NlaIII cites
splits = partition(record, cut_sites)
# loop through pairs of subreads (i, i+1)
subread_count = 0
for idx in range(len(splits) - 1):
subread_count += 1
left_read = splits[idx]
right_read = splits[idx + 1]
# trim thye CATG sequence from the reads
if left_read.seq.endswith("CATG"):
left_read = left_read[:-4]
if right_read.seq.endswith("CATG"):
right_read = right_read[:-4]
# trim subreads based on parameter N
if len(left_read) > N:
left_read = left_read[-N:]
if len(right_read) > N:
right_read = right_read[:N]
# get read metadata from left read - assume consistent between subreads
metadata = dict(item.split("=") for item in left_read.description.split(" ") if "=" in item)
# construct new data record
new_record = {
'read_id' : left_read.id,
'subread_id' : subread_count,
'left_read' : "".join(left_read.seq),
'right_read' : "".join(right_read.seq),
'subread_max_length' : N,
}
# add metadata
for k, v in metadata.items():
new_record[k] = v
new_rows.append(new_record)
# build pairs table
pairs_table = pd.DataFrame(new_rows)
return pairs_table
if __name__ == '__main__':
############################################################################################
# INPUT ARGUMENT DEFINITIONS
############################################################################################
desc = """A Python3 commandline tool to virtually digest sequences based on NlaIII sites."""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument("-i",
help="Input merged fastq file.")
parser.add_argument("-n",
nargs='?',
default=300,
help="Number of base pairs surrounding a cut site.")
parser.add_argument("-o",
nargs='?',
type=str,
default="results/",
help="The path to a directory for all output files.")
parser.add_argument("-overwrite",
nargs='?',
type=bool,
default=True,
help="If True, overwrite the directory at `-o`")
############################################################################################
# INPUT ARGUMENT PARSING
############################################################################################
args = parser.parse_args()
# argument parsing - may need to handle more robustly
FASTQ = args.i
N = int(args.n)
OUTPUT_DIR = args.o
OVERWRITE = args.overwrite
# make output dir, handle overwriting flag
os.makedirs(OUTPUT_DIR, mode=0o777, exist_ok=OVERWRITE)
############################################################################################
# DATA PROCESSING
############################################################################################
# generate pairs
pairs_table = split_fastq(FASTQ, N)
print(pairs_table.columns)
# get input file basename and generate output filename
base = os.path.basename(FASTQ)
basename = os.path.splitext(base)[0]
new_filename = f"{OUTPUT_DIR}{basename}_PAIRS.csv"
############################################################################################
# OUTPUTS
############################################################################################
pairs_table.to_csv(new_filename, index=False)
print(f"Done saving: `{new_filename}`")
|
[
"pandas.DataFrame",
"Bio.SeqIO.parse",
"os.makedirs",
"argparse.ArgumentParser",
"os.path.basename",
"Bio.Restriction.NlaIII.search",
"os.path.splitext"
] |
[((3448, 3470), 'pandas.DataFrame', 'pd.DataFrame', (['new_rows'], {}), '(new_rows)\n', (3460, 3470), True, 'import pandas as pd\n'), ((3879, 3920), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'desc'}), '(description=desc)\n', (3902, 3920), False, 'import argparse\n'), ((5104, 5157), 'os.makedirs', 'os.makedirs', (['OUTPUT_DIR'], {'mode': '(511)', 'exist_ok': 'OVERWRITE'}), '(OUTPUT_DIR, mode=511, exist_ok=OVERWRITE)\n', (5115, 5157), False, 'import os\n'), ((5563, 5586), 'os.path.basename', 'os.path.basename', (['FASTQ'], {}), '(FASTQ)\n', (5579, 5586), False, 'import os\n'), ((1220, 1248), 'Bio.SeqIO.parse', 'SeqIO.parse', (['handle', '"""fastq"""'], {}), "(handle, 'fastq')\n", (1231, 1248), False, 'from Bio import SeqIO\n'), ((1497, 1534), 'Bio.Restriction.NlaIII.search', 'Restriction.NlaIII.search', (['record.seq'], {}), '(record.seq)\n', (1522, 1534), False, 'from Bio import Restriction\n'), ((5602, 5624), 'os.path.splitext', 'os.path.splitext', (['base'], {}), '(base)\n', (5618, 5624), False, 'import os\n')]
|
import numpy as np
from config import FEEDRATE, X_STEP, Y_STEP, HEIGHT, WIDTH
# TO DO:
# * We assume that the head's nozzles extend along the Y direction.
# (This is apparently the case.)
def array_to_gcode(array):
"""Convert numpy array into a sequence of gcodes, saved to file."""
assert isinstance(array, np.ndarray)
height = array.shape[0]
#assert height == HEIGHT
width = array.shape[1]
#assert width == WIDTH
strip_number = int(np.ceil(height/12.0))
gcode = ""
x_pos = 0
y_pos = 0
for strip_idx in xrange(strip_number):
x_pos = 0
for column_idx in xrange(width):
nozzles_gcode = fire_nozzles(array[12*strip_idx:12*(strip_idx+1),
column_idx])
if nozzles_gcode: # Only print and move if there's any non-white
# pixels
gcode += move(x_pos, y_pos)
gcode += nozzles_gcode
x_pos += X_STEP
y_pos += Y_STEP
return gcode
def move(x_pos, y_pos):
"""Return the G-CODE describing motion to x_pos, y_pos."""
out = ""
out += "G1X"+str(x_pos)+"Y"+str(y_pos)+"F"+str(FEEDRATE)+";\n"
out += "M400;\n"
return out
def fire_nozzles(firing_column):
"""Return the G-CODE describing the printing sequence. If there
is nothing to be printed, return an empty string.
"""
out = ''
if np.all(firing_column == 0):
return out
else:
while np.any(firing_column != 0):
firing_pattern, firing_column = salvo_integer(firing_column)
out += "M700 P0 S"+str(firing_pattern)+";\n"
return out
def salvo_integer(firing_column):
"""Given a column from a numpy array, return the decimal firing pattern
and a new firing column ()
The decimal firing pattern is a decimal integer which, written in binary,
designates the nozzles that ought to be fired. It is a component of the
firing G-CODE.
"""
pattern = ''
for idx, entry in enumerate(firing_column):
if entry > 0:
pattern += '1'
firing_column[idx] -= 1
else:
pattern += '0'
# Pad with zeroes
pattern += '0'*(12 - len(pattern))
return int(pattern, 2), firing_column
|
[
"numpy.any",
"numpy.ceil",
"numpy.all"
] |
[((1442, 1468), 'numpy.all', 'np.all', (['(firing_column == 0)'], {}), '(firing_column == 0)\n', (1448, 1468), True, 'import numpy as np\n'), ((477, 499), 'numpy.ceil', 'np.ceil', (['(height / 12.0)'], {}), '(height / 12.0)\n', (484, 499), True, 'import numpy as np\n'), ((1513, 1539), 'numpy.any', 'np.any', (['(firing_column != 0)'], {}), '(firing_column != 0)\n', (1519, 1539), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
import os
import re
import sys
from ast import parse
from inspect import getmembers, isfunction
from unittest.mock import MagicMock
from sphinx.ext.napoleon.docstring import GoogleDocstring
confpath = os.path.dirname(__file__)
sys.path.append(confpath)
from docutil import insert_inheritance_diagram, package_classes
## See
## https://github.com/sphinx-doc/sphinx/issues/2115
## https://michaelgoerz.net/notes/extending-sphinx-napoleon-docstring-sections.html
##
# first, we define new methods for any new sections and add them to the class
def parse_keys_section(self, section):
return self._format_fields("Keys", self._consume_fields())
GoogleDocstring._parse_keys_section = parse_keys_section
def parse_attributes_section(self, section):
return self._format_fields("Attributes", self._consume_fields())
GoogleDocstring._parse_attributes_section = parse_attributes_section
def parse_class_attributes_section(self, section):
return self._format_fields("Class Attributes", self._consume_fields())
GoogleDocstring._parse_class_attributes_section = parse_class_attributes_section
# we now patch the parse method to guarantee that the the above methods are
# assigned to the _section dict
def patched_parse(self):
self._sections["keys"] = self._parse_keys_section
self._sections["class attributes"] = self._parse_class_attributes_section
self._unpatched_parse()
GoogleDocstring._unpatched_parse = GoogleDocstring._parse
GoogleDocstring._parse = patched_parse
confpath = os.path.dirname(__file__)
sys.path.append(confpath)
on_rtd = os.environ.get("READTHEDOCS") == "True"
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
rootpath = os.path.abspath("../..")
sys.path.insert(0, rootpath)
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "3.3"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.napoleon",
"sphinx.ext.autodoc",
"sphinx_autodoc_typehints",
"sphinx.ext.autosummary",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.viewcode",
"sphinxcontrib.bibtex",
"sphinx.ext.inheritance_diagram",
"sphinx.ext.mathjax",
"sphinx.ext.todo",
"nbsphinx",
]
bibtex_bibfiles = ["references.bib"]
nbsphinx_execute = "never"
nbsphinx_prolog = """
.. raw:: html
<style>
.nbinput .prompt, .nboutput .prompt {
display: none;
}
div.highlight {
background-color: #f9f9f4;
}
p {
margin-bottom: 0.8em;
margin-top: 0.8em;
}
</style>
"""
# See
# https://stackoverflow.com/questions/2701998#62613202
# https://github.com/JamesALeedham/Sphinx-Autosummary-Recursion
autosummary_generate = True
# Copied from scikit-learn sphinx configuration
if os.environ.get("NO_MATHJAX"):
extensions.append("sphinx.ext.imgmath")
imgmath_image_format = "svg"
else:
extensions.append("sphinx.ext.mathjax")
mathjax_path = "https://cdn.mathjax.org/mathjax/latest/" "MathJax.js?config=TeX-AMS_HTML"
mathjax_config = {
"TeX": {
"Macros": {
"mb": [r"\mathbf{#1}", 1],
"mbs": [r"\boldsymbol{#1}", 1],
"mbb": [r"\mathbb{#1}", 1],
"norm": [r"\lVert #1 \rVert", 1],
"abs": [r"\left| #1 \right|", 1],
"argmin": [r"\mathop{\mathrm{argmin}}"],
"sign": [r"\mathop{\mathrm{sign}}"],
"prox": [r"\mathop{\mathrm{prox}}"],
"loss": [r"\mathop{\mathrm{loss}}"],
"kp": [r"k_{\|}"],
"rp": [r"r_{\|}"],
}
}
}
# See https://stackoverflow.com/questions/5599254
autoclass_content = "both"
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
source_encoding = "utf-8"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "SCICO"
copyright = "2020-2021, SCICO Developers"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
with open(os.path.join("../../scico", "__init__.py")) as f:
version = parse(next(filter(lambda line: line.startswith("__version__"), f))).body[0].value.s
# The full version, including alpha/beta/rc tags.
release = version
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["tmp", "*.tmp.*", "*.tmp"]
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
# html_theme = "sphinx_rtd_theme"
html_theme = "faculty-sphinx-theme"
html_theme_options = {
"includehidden": False,
"logo_only": True,
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
html_logo = "_static/logo.svg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
html_favicon = "_static/scico.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
if on_rtd:
html_static_path = []
else:
html_static_path = ["_static"]
# Output file base name for HTML help builder.
htmlhelp_basename = "SCICOdoc"
# Include TOODs
todo_include_todos = True
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
("index", "scico.tex", "SCICO Documentation", "The SCICO Developers", "manual"),
]
latex_engine = "xelatex"
# latex_use_xindy = False
latex_macros = []
for k, v in mathjax_config["TeX"]["Macros"].items():
if len(v) == 1:
latex_macros.append(r"\newcommand{\%s}{%s}" % (k, v[0]))
else:
latex_macros.append(r"\newcommand{\%s}[1]{%s}" % (k, v[0]))
latex_elements = {"preamble": "\n".join(latex_macros)}
# Intersphinx mapping
intersphinx_mapping = {
"python": ("https://docs.python.org/3/", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
"matplotlib": ("https://matplotlib.org/stable/", None),
"jax": ("https://jax.readthedocs.io/en/latest/", None),
"objax": ("https://objax.readthedocs.io/en/latest/", None),
}
# Added timeout due to periodic scipy.org down time
# intersphinx_timeout = 30
# napoleon_include_init_with_doc = True
napoleon_use_ivar = True
napoleon_use_rtype = False
graphviz_output_format = "svg"
inheritance_graph_attrs = dict(rankdir="LR", fontsize=9, ratio="compress", bgcolor="transparent")
inheritance_node_attrs = dict(
shape="box",
fontsize=9,
height=0.4,
margin='"0.08, 0.03"',
style='"rounded,filled"',
fillcolor='"#f4f4ffff"',
)
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [("index", "scico", "SCICO Documentation", ["SCICO Developers"], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
"index",
"SCICO",
"SCICO Documentation",
"SCICO Developers",
"SCICO",
"Scientific Computational Imaging COde (SCICO)",
"Miscellaneous",
),
]
if on_rtd:
print("Building on ReadTheDocs")
print
print("Current working directory: {}".format(os.path.abspath(os.curdir)))
import numpy as np
print("NumPy version: %s" % np.__version__)
import matplotlib
matplotlib.use("agg")
MOCK_MODULES = ["astra", "svmbir"]
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
print("rootpath: %s" % rootpath)
print("confpath: %s" % confpath)
# Sort members by type
autodoc_default_options = {
"member-order": "bysource",
"inherited-members": True,
"ignore-module-all": False,
"show-inheritance": True,
"special-members": "__call__",
}
autodoc_docstring_signature = True
autoclass_content = "both"
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "**tests**", "**spi**"]
# Rewrite module names for certain functions imported into scico.numpy so that they are
# included in the docs for that module. While a bit messy to do so here rather than in a
# function run via app.connect, it is necessary (for some yet to be identified reason)
# to do it here to ensure that the relevant API docs include a table of functions.
import scico.numpy
snp_func = getmembers(scico.numpy, isfunction)
for _, f in snp_func:
if f.__module__[0:14] == "jax._src.numpy" or f.__module__ == "scico.numpy._create":
# Rewrite module name so that function is included in docs
f.__module__ = "scico.numpy"
# Attempt to fix incorrect cross-reference
if f.__name__ == "compare_chararrays":
modname = "numpy.char"
else:
modname = "numpy"
f.__doc__ = re.sub(
r"^LAX-backend implementation of :func:`([\w_]+)`.",
r"LAX-backend implementation of :obj:`%s.\1`." % modname,
str(f.__doc__),
flags=re.M,
)
# Improve formatting of jax.numpy warning
f.__doc__ = re.sub(
r"^\*\*\* This function is not yet implemented by jax.numpy, and will "
"raise NotImplementedError \*\*\*",
"**WARNING**: This function is not yet implemented by jax.numpy, "
" and will raise :exc:`NotImplementedError`.",
f.__doc__,
flags=re.M,
)
# Remove cross-reference to numpydoc style references section
f.__doc__ = re.sub(r" \[(\d+)\]_", "", f.__doc__, flags=re.M)
# Remove entire numpydoc references section
f.__doc__ = re.sub(r"References\n----------\n.*\n", "", f.__doc__, flags=re.DOTALL)
# Remove spurious two-space indentation of entire docstring
scico.numpy.vectorize.__doc__ = re.sub("^ ", "", scico.numpy.vectorize.__doc__, flags=re.M)
def class_inherit_diagrams(_):
# Insert inheritance diagrams for classes that have base classes
import scico
clslst = package_classes(scico)
for cls in clslst:
insert_inheritance_diagram(cls)
def setup(app):
app.add_css_file("scico.css")
app.add_css_file(
"http://netdna.bootstrapcdn.com/font-awesome/4.7.0/" "css/font-awesome.min.css"
)
app.connect("builder-inited", class_inherit_diagrams)
|
[
"sys.path.append",
"docutil.package_classes",
"os.path.abspath",
"os.path.join",
"unittest.mock.MagicMock",
"os.path.dirname",
"sys.path.insert",
"os.environ.get",
"matplotlib.use",
"re.sub",
"docutil.insert_inheritance_diagram",
"inspect.getmembers"
] |
[((228, 253), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (243, 253), False, 'import os\n'), ((254, 279), 'sys.path.append', 'sys.path.append', (['confpath'], {}), '(confpath)\n', (269, 279), False, 'import sys\n'), ((1539, 1564), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1554, 1564), False, 'import os\n'), ((1565, 1590), 'sys.path.append', 'sys.path.append', (['confpath'], {}), '(confpath)\n', (1580, 1590), False, 'import sys\n'), ((1892, 1916), 'os.path.abspath', 'os.path.abspath', (['"""../.."""'], {}), "('../..')\n", (1907, 1916), False, 'import os\n'), ((1917, 1945), 'sys.path.insert', 'sys.path.insert', (['(0)', 'rootpath'], {}), '(0, rootpath)\n', (1932, 1945), False, 'import sys\n'), ((3078, 3106), 'os.environ.get', 'os.environ.get', (['"""NO_MATHJAX"""'], {}), "('NO_MATHJAX')\n", (3092, 3106), False, 'import os\n'), ((10273, 10308), 'inspect.getmembers', 'getmembers', (['scico.numpy', 'isfunction'], {}), '(scico.numpy, isfunction)\n', (10283, 10308), False, 'from inspect import getmembers, isfunction\n'), ((11707, 11767), 're.sub', 're.sub', (['"""^ """', '""""""', 'scico.numpy.vectorize.__doc__'], {'flags': 're.M'}), "('^ ', '', scico.numpy.vectorize.__doc__, flags=re.M)\n", (11713, 11767), False, 'import re\n'), ((1601, 1630), 'os.environ.get', 'os.environ.get', (['"""READTHEDOCS"""'], {}), "('READTHEDOCS')\n", (1615, 1630), False, 'import os\n'), ((9138, 9159), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (9152, 9159), False, 'import matplotlib\n'), ((11901, 11923), 'docutil.package_classes', 'package_classes', (['scico'], {}), '(scico)\n', (11916, 11923), False, 'from docutil import insert_inheritance_diagram, package_classes\n'), ((4546, 4588), 'os.path.join', 'os.path.join', (['"""../../scico"""', '"""__init__.py"""'], {}), "('../../scico', '__init__.py')\n", (4558, 4588), False, 'import os\n'), ((9286, 9297), 'unittest.mock.MagicMock', 'MagicMock', ([], {}), '()\n', (9295, 9297), False, 'from unittest.mock import MagicMock\n'), ((10995, 11264), 're.sub', 're.sub', (['"""^\\\\*\\\\*\\\\* This function is not yet implemented by jax.numpy, and will raise NotImplementedError \\\\*\\\\*\\\\*"""', '"""**WARNING**: This function is not yet implemented by jax.numpy, and will raise :exc:`NotImplementedError`."""', 'f.__doc__'], {'flags': 're.M'}), "(\n '^\\\\*\\\\*\\\\* This function is not yet implemented by jax.numpy, and will raise NotImplementedError \\\\*\\\\*\\\\*'\n ,\n '**WARNING**: This function is not yet implemented by jax.numpy, and will raise :exc:`NotImplementedError`.'\n , f.__doc__, flags=re.M)\n", (11001, 11264), False, 'import re\n'), ((11420, 11471), 're.sub', 're.sub', (['""" \\\\[(\\\\d+)\\\\]_"""', '""""""', 'f.__doc__'], {'flags': 're.M'}), "(' \\\\[(\\\\d+)\\\\]_', '', f.__doc__, flags=re.M)\n", (11426, 11471), False, 'import re\n'), ((11542, 11615), 're.sub', 're.sub', (['"""References\\\\n----------\\\\n.*\\\\n"""', '""""""', 'f.__doc__'], {'flags': 're.DOTALL'}), "('References\\\\n----------\\\\n.*\\\\n', '', f.__doc__, flags=re.DOTALL)\n", (11548, 11615), False, 'import re\n'), ((11955, 11986), 'docutil.insert_inheritance_diagram', 'insert_inheritance_diagram', (['cls'], {}), '(cls)\n', (11981, 11986), False, 'from docutil import insert_inheritance_diagram, package_classes\n'), ((9010, 9036), 'os.path.abspath', 'os.path.abspath', (['os.curdir'], {}), '(os.curdir)\n', (9025, 9036), False, 'import os\n')]
|
#!/usr/bin/env python
"""
I2C ROS 2 Node.
This node subscribes ROS-Messages to send data over I2C
I2C:
http://www.netzmafia.de/skripten/hardware/RasPi/RasPi_I2C.html
and
https://raspberry-projects.com/pi/programming-in-python/i2c-programming-in-python/using-the-i2c-interface-2
"""
import os
import sys
import smbus # System management bus ==> I2C compatible
import time
# ROS 2 Imports
import rclpy
from rclpy.node import Node
from std_msgs.msg import String
from systemcore.msg import I2Cwrite8, I2Cwrite16, I2CwriteArray
import threading
sem = threading.Semaphore()
"""
Some addresses of the system.
I2C Addresses --> 7 Bit Length
"""
JETSON_I2CADDRESS = 0x00
ARDUINO_I2CADDR = 0x08
AUDIO_MAX9744_I2CADDR = 0x4B # https://learn.adafruit.com/adafruit-20w-stereo-audio-amplifier-class-d-max9744/digital-control
OLED_DISPLAY_1_I2CADDR = 0x3C # https://www.adafruit.com/product/938
OLED_DISPLAY_2_I2CADDR = 0x3D
"""
Create smbus instance and open the instance.
The jetson nano has two I2C busses.
I2C Bus 0:
SDA --> Pin 27
SCL --> Pin 28
I2C Bus 1:
SDA --> Pin 3
SCL --> Pin 5
If there are any I2C devices attached, you can scan that bus from the command line
$ i2cdetect -y -r 0
$ i2cdetect -y -r 1
"""
class I2CNode(Node):
def __init__(self, bus = 1):
super().__init__('i2cBridge_node')
self.get_logger().info("Try starting I2C Bridge Node")
rclpy.get_default_context().on_shutdown(self.onShutdown)
# Open I2C Bus
try:
self.bus = smbus.SMBus(bus)
self.get_logger().info("Successfully opened SMbus({})".format(bus))
except Exception as e:
self.get_logger().info("{}".format(e))
self.bus = BusSim(bus, self)
# Subscribe to I2C Bridge Topics
self.subI2C8 = self.create_subscription(I2Cwrite8, "system/i2c/write8", self.onWrite8, 10)
self.subI2C16 = self.create_subscription(I2Cwrite16, "system/i2c/write16", self.onWrite16, 10)
self.subI2CArr = self.create_subscription(I2CwriteArray, "system/i2c/writeArray", self.onWriteArray, 10)
################################
### LOG INFO ###################
self.get_logger().info("Subscribed: system/i2c/write8 | Msg: system/I2Cwrite8")
self.get_logger().info("Subscribed: system/i2c/write16 | Msg: system/I2Cwrite16")
self.get_logger().info("Subscribed: system/i2c/writeArray | Msg: system/I2CwriteArray")
################################
def onShutdown(self):
self.bus.close()
def onWrite16(self, msg):
info = "I2C write16: addr: {} cmd: {} data: {}".format(str(msg.address), str(msg.command), str(msg.data))
self.get_logger().info(info)
try:
self.bus.write_word_data(msg.address, msg.command, msg.data)
except Exception as e:
self.get_logger().error(str(e))
# sem.acquire()
# try:
# self.bus.write_word_data(msg.address, msg.command, msg.data)
# except expression as identifier:
# pass
# finally:
# time.sleep(0.005)
# sem.release()
def onWrite8(self, msg):
info = "I2C write8: addr: {} cmd: {} data: {}".format(str(msg.address), str(msg.command), str(msg.data))
self.get_logger().info(info)
try:
self.bus.write_byte_data(msg.address, msg.command, msg.data)
except Exception as e:
self.get_logger().error(str(e))
# sem.acquire()
# try:
# self.bus.write_byte_data(msg.address, msg.command, msg.data)
# except expression as identifier:
# pass
# finally:
# time.sleep(0.005)
# sem.release()
#time.sleep(0.005)
def onWriteArray(self, msg):
# self.get_logger().info(type(msg.data))
data = []
for d in msg.data:
data.append(int(d))
for i in range(2):
try:
self.bus.write_block_data(msg.address, msg.command, data)
info = "I2C writeArray: addr: {} cmd: {} data: {}".format(str(msg.address), str(msg.command), list(msg.data))
self.get_logger().info(info)
break
except Exception as e:
self.get_logger().error(str(e) + " ... Trying again")
time.sleep(0.05)
# sem.acquire()
# try:
# self.bus.write_block_data(msg.address, msg.command, data)
# except expression as identifier:
# pass
# finally:
# time.sleep(0.005)
# sem.release()
#time.sleep(0.005)
def main(args=None):
# Announce node
# rclpy.init(args=args) # 'i2cBridge_node'
rclpy.init(args=args)
node = I2CNode()
# Run forever
rclpy.spin(node)
node.onShutdown()
node.destroy_node()
rclpy.shutdown()
class BusSim(object):
def __init__(self, num, rosNode):
self.num = num
self.node = rosNode
self.log = self.node.get_logger()
self.log.info("####### ERROR OPENING SMbus #######")
self.log.info("Starting SMbus Simulator ({})".format(self.num))
def write_block_data(self, addr, cmd, data):
self.log.info("Write @ {} CMD: {} Data: {}".format(addr, cmd, data))
def write_byte_data(self, addr, cmd, data):
self.log.info("Write @ {} CMD: {} Data: {}".format(addr, cmd, data))
def write_word_data(self, addr, cmd, data):
self.log.info("Write @ {} CMD: {} Data: {}".format(addr, cmd, data))
if __name__ == '__main__':
main()
"""
### Address of the slave device
addr = 0x08
### Example values
cmd = 0x55 # Command or register
byteVal = 0x1A # byte value
wordVal = 0xABCD # 2 byte value
listVal = [5, 10, 15, 50] # value list
### Write operations
bus.write_byte(addr, byteVal)
bus.write_byte_data(addr, cmd, byteVal)
bus.write_word_data(addr, cmd, wordVal)
bus.write_block_data(addr, cmd, listVal)
### Read operations
readByte = bus.read_byte(addr)
readByte = bus.read_byte_data(addr, cmd)
readWord = bus.read_word_data(addr, cmd)
readList = bus.read_block_data(addr, cmd)
"""
""" List of smbus commands
Send only the read / write bit
long write_quick(int addr)
Read a single byte from a device, without specifying a device register.
long read_byte(int addr)
Send a single byte to a device (without command or register)
long write_byte(int addr, char val)
Read a single byte from a device (cmd is the command or register declaration)
long read_byte_data(int addr, char cmd)
Send a single byte to a device (cmd is the command or register declaration)
long write_byte_data(int addr, char cmd, char val)
Read a 16 Bit word from a device (cmd is the command or register declaration)
long read_word_data(int addr, char cmd)
Send a 16 Bit word from to a device (cmd is the command or register declaration)
long write_word_data(int addr, char cmd, int val)
Read a block of data from a device (cmd is the command or register declaration)
long[] read_block_data(int addr, char cmd)
Send a block of data to a device (cmd is the command or register declaration).
The data block should be maximum 31 Byte.
The function adds a length byte before the data bytes.
write_block_data(int addr, char cmd, long vals[])
Process Call transaction
long process_call(int addr, char cmd, int val)
Block Process Call transaction
long[] block_process_call(int addr, char cmd, long vals[])
Read a block of raw data from a device (cmd is the command or register declaration)
long[] read_i2c_block_data(int addr, char cmd)
Send a block of raw data to a device (cmd is the command or register declaration).
write_i2c_block_data(int addr,char cmd, long vals[])
"""
|
[
"rclpy.spin",
"rclpy.init",
"time.sleep",
"rclpy.shutdown",
"threading.Semaphore",
"rclpy.get_default_context",
"smbus.SMBus"
] |
[((561, 582), 'threading.Semaphore', 'threading.Semaphore', ([], {}), '()\n', (580, 582), False, 'import threading\n'), ((4494, 4515), 'rclpy.init', 'rclpy.init', ([], {'args': 'args'}), '(args=args)\n', (4504, 4515), False, 'import rclpy\n'), ((4555, 4571), 'rclpy.spin', 'rclpy.spin', (['node'], {}), '(node)\n', (4565, 4571), False, 'import rclpy\n'), ((4616, 4632), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (4630, 4632), False, 'import rclpy\n'), ((1497, 1513), 'smbus.SMBus', 'smbus.SMBus', (['bus'], {}), '(bus)\n', (1508, 1513), False, 'import smbus\n'), ((1394, 1421), 'rclpy.get_default_context', 'rclpy.get_default_context', ([], {}), '()\n', (1419, 1421), False, 'import rclpy\n'), ((4126, 4142), 'time.sleep', 'time.sleep', (['(0.05)'], {}), '(0.05)\n', (4136, 4142), False, 'import time\n')]
|
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2015, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Library imports
# ---------------
import os
import sys
from ctypes import CDLL
# Local imports
# -------------
from pyi_get_datadir import get_data_dir
# Library name based on platform.
if sys.platform.startswith('win32'):
name = 'ctypes_dylib.dll'
elif sys.platform.startswith("darwin"):
name = 'ctypes_dylib.dylib'
else:
name = 'ctypes_dylib.so'
# Test resolving dynamic libraries loaded in Python code at runtime
# by Python module 'ctypes'.
tct = CDLL(os.path.join(get_data_dir(), 'ctypes_dylib', name))
# The "dummy" function in ctypes_dylib returning value + 12.
assert tct.dummy(42) == (42 + 12)
|
[
"sys.platform.startswith",
"pyi_get_datadir.get_data_dir"
] |
[((602, 634), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win32"""'], {}), "('win32')\n", (625, 634), False, 'import sys\n'), ((671, 704), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (694, 704), False, 'import sys\n'), ((895, 909), 'pyi_get_datadir.get_data_dir', 'get_data_dir', ([], {}), '()\n', (907, 909), False, 'from pyi_get_datadir import get_data_dir\n')]
|
#!/usr/bin/env python3
import requests
from requests.auth import HTTPBasicAuth
from flask import Flask, request, jsonify
NEXMO_API_KEY = ''
NEXMO_API_SECRET = ''
app = Flask(__name__)
template1 = '''
<html>
<head>
<title>Audit Event Types</title>
</head>
<body>
<form action='/events' method='get'>
<select name='event_type'>
<option value='ALL'>ALL -- All the event types</option>
{SELECT_OPTIONS}
</select>
<input type='submit'>
</form>
</body>
</html>
'''
template2 = '''
<html>
<head>
<title>Audit Events Listing</title>
</head>
<body>
<table border='1'>
<tr><th>Audit Event Type</th><th>Date/time of event</th><th>Event source</th><th>Context</th></tr>
{TABLE_ROWS}
</table>
</body>
</html>
'''
@app.route("/")
def root():
r = requests.options('https://api.nexmo.com/beta/audit/events', auth=HTTPBasicAuth(NEXMO_API_KEY, NEXMO_API_SECRET))
j = r.json()
event_types = j['eventTypes']
select_options = ""
for evt_t in event_types:
select_options = select_options + "<option value='" + evt_t['type'] + "'>" + evt_t['type'] + " -- " + evt_t['description'] + "</option>"
html = template1.format(SELECT_OPTIONS=select_options)
return (html)
@app.route("/events")
def events():
params = request.args
EVT_TYPE = params['event_type']
if EVT_TYPE == 'ALL':
r = requests.get('https://api.nexmo.com/beta/audit/events', auth=HTTPBasicAuth(NEXMO_API_KEY, NEXMO_API_SECRET))
else:
r = requests.get('https://api.nexmo.com/beta/audit/events?event_type='+EVT_TYPE, auth=HTTPBasicAuth(NEXMO_API_KEY, NEXMO_API_SECRET))
j = r.json()
if '_embedded' in j:
events = j['_embedded']['events']
else:
return ("No Events Found")
table_rows = ""
for evt in events:
if 'context' in evt:
event_context = str(evt['context'])
else:
event_context = 'None'
table_rows = table_rows + "<tr><td>" + (evt['event_type'] + "</td><td>" + evt['created_at'] + "</td><td>" + evt['source'] + "</td><td>" + event_context + "</td></tr>")
html = template2.format(TABLE_ROWS=table_rows)
return(html)
if __name__ == '__main__':
app.run(port=9000)
|
[
"requests.auth.HTTPBasicAuth",
"flask.Flask"
] |
[((171, 186), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (176, 186), False, 'from flask import Flask, request, jsonify\n'), ((896, 942), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['NEXMO_API_KEY', 'NEXMO_API_SECRET'], {}), '(NEXMO_API_KEY, NEXMO_API_SECRET)\n', (909, 942), False, 'from requests.auth import HTTPBasicAuth\n'), ((1476, 1522), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['NEXMO_API_KEY', 'NEXMO_API_SECRET'], {}), '(NEXMO_API_KEY, NEXMO_API_SECRET)\n', (1489, 1522), False, 'from requests.auth import HTTPBasicAuth\n'), ((1629, 1675), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['NEXMO_API_KEY', 'NEXMO_API_SECRET'], {}), '(NEXMO_API_KEY, NEXMO_API_SECRET)\n', (1642, 1675), False, 'from requests.auth import HTTPBasicAuth\n')]
|
'''
实验名称:OLED显示屏(I2C总线)
版本:v1.0
日期:2021.4
作者:01Studio
实验平台:达芬奇
社区:www.01studio.org
'''
from machine import SoftI2C,Pin #从machine模块导入SoftI2C、Pin子模块
from ssd1306 import SSD1306_I2C #从ssd1306模块中导入SSD1306_I2C子模块
i2c = SoftI2C(scl='B2', sda='B0') #SoftI2C初始化:scl--> PB2, sda --> PB0
oled = SSD1306_I2C(128, 64, i2c, addr=0x3c) #OLED显示屏初始化:128*64分辨率,OLED的I2C地址是0x3c
oled.text("Hello World!", 0, 0) #写入第1行内容
oled.text("MicroPython", 0, 20) #写入第2行内容
oled.text("By 01Studio", 0, 50) #写入第3行内容
oled.show() #OLED执行显示
|
[
"ssd1306.SSD1306_I2C",
"machine.SoftI2C"
] |
[((229, 256), 'machine.SoftI2C', 'SoftI2C', ([], {'scl': '"""B2"""', 'sda': '"""B0"""'}), "(scl='B2', sda='B0')\n", (236, 256), False, 'from machine import SoftI2C, Pin\n'), ((302, 336), 'ssd1306.SSD1306_I2C', 'SSD1306_I2C', (['(128)', '(64)', 'i2c'], {'addr': '(60)'}), '(128, 64, i2c, addr=60)\n', (313, 336), False, 'from ssd1306 import SSD1306_I2C\n')]
|
import copy
from typing import List, Dict
import numpy as np
from prettytable import PrettyTable
from ase import Atoms
from dscribe.descriptors import SineMatrix
from dscribe.descriptors import CoulombMatrix
from dscribe.descriptors import ACSF
from dscribe.descriptors import SOAP
from matminer.featurizers.composition import ElementProperty
from matminer.featurizers.site import ChemicalSRO
from matminer.featurizers.site import OPSiteFingerprint
from matminer.featurizers.site import CrystalNNFingerprint
from pymatgen.io.ase import AseAtomsAdaptor
from pymatgen.core.periodic_table import Element
SUPPORTED_MATMINER_CLASSES = [
ElementProperty,
ChemicalSRO,
OPSiteFingerprint,
CrystalNNFingerprint,
]
SUPPORTED_DSCRIBE_CLASSES = [SineMatrix, CoulombMatrix, ACSF, SOAP]
class FeaturizerError(Exception):
pass
class Featurizer:
def __init__(
self,
featurizer_class=None, # black
design_space_structures: List[Atoms] = None,
species_list: List[str] = None,
max_size: int = None,
preset: str = None,
kwargs: Dict = None,
):
self._featurizer_class = SineMatrix
self.featurizer_class = featurizer_class
self._preset = None
self.preset = preset
self._kwargs = None
self.kwargs = kwargs
self._max_size = 100
self.max_size = max_size
self._species_list = ["Fe", "Ni", "Pt", "Pd", "Cu", "C", "N", "O", "H"]
self.species_list = species_list
# overrides max_size and species_list if given
self._design_space_structures = None
self.design_space_structures = design_space_structures
def __eq__(self, other: object) -> bool:
if isinstance(other, Featurizer):
for attr in [
"featurizer_class",
"species_list",
"max_size",
"preset",
"kwargs",
]:
if getattr(self, attr) != getattr(other, attr):
return False
return True
return False
def __repr__(self) -> str:
pt = PrettyTable()
pt.field_names = ["", "Featurizer"]
class_name = (
self.featurizer_class.__module__ + "." + self.featurizer_class.__name__
)
pt.add_row(["class", class_name])
pt.add_row(["kwargs", self.kwargs])
pt.add_row(["species list", self.species_list])
pt.add_row(["maximum structure size", self.max_size])
pt.add_row(["preset", self.preset])
pt.add_row(
[
"design space structures provided?",
self.design_space_structures is not None,
]
)
pt.max_width = 70
return str(pt)
def copy(self):
"""
Returns a copy of the featurizer
"""
ds_structs_copy = (
[struct.copy() for struct in self.design_space_structures]
if self.design_space_structures
else None
)
feat = self.__class__(
featurizer_class=self.featurizer_class,
design_space_structures=ds_structs_copy,
species_list=self.species_list.copy(),
max_size=self.max_size,
kwargs=copy.deepcopy(self.kwargs) if self.kwargs else None,
)
return feat
@property
def featurizer_class(self):
return self._featurizer_class
@featurizer_class.setter
def featurizer_class(self, featurizer_class):
if (
featurizer_class in SUPPORTED_MATMINER_CLASSES
or featurizer_class in SUPPORTED_DSCRIBE_CLASSES
):
self._featurizer_class = featurizer_class
self._preset = None
self._kwargs = None
else:
msg = f"Featurization class {featurizer_class} is not currently supported."
raise FeaturizerError(msg)
@property
def preset(self):
return self._preset
@preset.setter
def preset(self, preset):
if self.featurizer_class in [CrystalNNFingerprint, ElementProperty]:
self._preset = preset
elif preset is None:
self._preset = preset
else:
msg = f"Presets are not supported for {self.featurizer_class.__module__}"
raise FeaturizerError(msg)
@property
def kwargs(self):
return self._kwargs
@kwargs.setter
def kwargs(self, kwargs):
if kwargs is not None:
self._kwargs = kwargs.copy()
@property
def design_space_structures(self):
return self._design_space_structures
@design_space_structures.setter
def design_space_structures(self, design_space_structures: List[Atoms]):
if design_space_structures is not None:
self._design_space_structures = [
struct.copy() for struct in design_space_structures
]
# analyze new design space
ds_structs = design_space_structures
_species_list = []
for s in ds_structs:
# get all unique species
found_species = np.unique(s.get_chemical_symbols()).tolist()
new_species = [
spec for spec in found_species if spec not in _species_list
]
_species_list.extend(new_species)
# sort species list
sorted_species_list = sorted(
_species_list, key=lambda el: Element(el).mendeleev_no
)
self._max_size = max([len(s) for s in ds_structs])
self._species_list = sorted_species_list
@property
def max_size(self):
return self._max_size
@max_size.setter
def max_size(self, max_size):
if max_size is not None:
self._max_size = max_size
@property
def species_list(self):
return self._species_list
@species_list.setter
def species_list(self, species_list: List[str]):
if species_list is not None:
_species_list = species_list.copy()
# sort species list by mendeleev number
sorted_species_list = sorted(
_species_list, key=lambda el: Element(el).mendeleev_no
)
self._species_list = sorted_species_list
# TODO: "get_featurization_object" -> "get_featurizer"
@property
def featurization_object(self):
return self._get_featurization_object()
def _get_featurization_object(self):
# instantiate featurizer object
if hasattr(self.featurizer_class, "from_preset") and self.preset is not None:
return self.featurizer_class.from_preset(self.preset)
if self.featurizer_class in [SineMatrix, CoulombMatrix]:
return self.featurizer_class(
n_atoms_max=self.max_size, permutation="none", **self.kwargs or {},
)
if self.featurizer_class in [SOAP, ACSF]:
return self.featurizer_class(species=self.species_list, **self.kwargs or {})
return self.featurizer_class(**self.kwargs or {})
def featurize_single(self, structure: Atoms):
"""
Featurize a single structure. Returns a single vector
Parameters
----------
structure:
ase.Atoms object of structure to be featurized
Returns
-------
representation:
Numpy array of feature vector (not flattened)
"""
feat_class = self.featurizer_class
featurization_object = self.featurization_object
# dscribe classes
if feat_class in [SOAP, ACSF]:
adsorbate_indices = np.where(structure.get_tags() <= 0)[0].tolist()
return featurization_object.create(structure, positions=adsorbate_indices,)
if feat_class in [SineMatrix, CoulombMatrix]:
return featurization_object.create(structure).reshape(-1,)
# matminer classes
pym_struct = AseAtomsAdaptor().get_structure(structure)
if feat_class == ElementProperty:
return np.array(featurization_object.featurize(pym_struct.composition))
representation = np.array([])
if feat_class in [CrystalNNFingerprint, OPSiteFingerprint]:
adsorbate_indices = np.where(structure.get_tags() <= 0)[0].tolist()
for idx in adsorbate_indices:
feat = featurization_object.featurize(pym_struct, idx)
representation = np.concatenate((representation, feat))
return representation
if feat_class == ChemicalSRO:
adsorbate_indices = np.where(structure.get_tags() <= 0)[0].tolist()
formatted_list = [[pym_struct, idx] for idx in adsorbate_indices]
featurization_object.fit(formatted_list)
for idx in adsorbate_indices:
feat = featurization_object.featurize(pym_struct, idx)
representation = np.concatenate((representation, feat))
return representation
return None
def featurize_multiple(self, structures: List[Atoms]):
"""
Featurize multiple structures. Returns a matrix where each
row is the flattened feature vector of each system
Parameters
----------
structures:
List of ase.Atoms structures to be featurized
Returns
-------
X:
Numpy array of shape (number of structures, number of features)
"""
first_vec = self.featurize_single(structures[0]).flatten()
num_features = len(first_vec)
# if adsorbate featurization, assumes only 1 adsorbate in design space
# (otherwise would require padding)
X = np.zeros((len(structures), num_features))
X[0, :] = first_vec.copy()
for i in range(1, len(structures)):
X[i, :] = self.featurize_single(structures[i]).flatten()
return X
|
[
"copy.deepcopy",
"pymatgen.io.ase.AseAtomsAdaptor",
"numpy.array",
"prettytable.PrettyTable",
"pymatgen.core.periodic_table.Element",
"numpy.concatenate"
] |
[((2141, 2154), 'prettytable.PrettyTable', 'PrettyTable', ([], {}), '()\n', (2152, 2154), False, 'from prettytable import PrettyTable\n'), ((8201, 8213), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (8209, 8213), True, 'import numpy as np\n'), ((8007, 8024), 'pymatgen.io.ase.AseAtomsAdaptor', 'AseAtomsAdaptor', ([], {}), '()\n', (8022, 8024), False, 'from pymatgen.io.ase import AseAtomsAdaptor\n'), ((8508, 8546), 'numpy.concatenate', 'np.concatenate', (['(representation, feat)'], {}), '((representation, feat))\n', (8522, 8546), True, 'import numpy as np\n'), ((8976, 9014), 'numpy.concatenate', 'np.concatenate', (['(representation, feat)'], {}), '((representation, feat))\n', (8990, 9014), True, 'import numpy as np\n'), ((3285, 3311), 'copy.deepcopy', 'copy.deepcopy', (['self.kwargs'], {}), '(self.kwargs)\n', (3298, 3311), False, 'import copy\n'), ((5510, 5521), 'pymatgen.core.periodic_table.Element', 'Element', (['el'], {}), '(el)\n', (5517, 5521), False, 'from pymatgen.core.periodic_table import Element\n'), ((6243, 6254), 'pymatgen.core.periodic_table.Element', 'Element', (['el'], {}), '(el)\n', (6250, 6254), False, 'from pymatgen.core.periodic_table import Element\n')]
|
from django.contrib import admin
from omap.assets.models import SimpleAsset, RelationType, AssetRelation
admin.site.register(SimpleAsset)
admin.site.register(RelationType)
admin.site.register(AssetRelation)
|
[
"django.contrib.admin.site.register"
] |
[((107, 139), 'django.contrib.admin.site.register', 'admin.site.register', (['SimpleAsset'], {}), '(SimpleAsset)\n', (126, 139), False, 'from django.contrib import admin\n'), ((140, 173), 'django.contrib.admin.site.register', 'admin.site.register', (['RelationType'], {}), '(RelationType)\n', (159, 173), False, 'from django.contrib import admin\n'), ((174, 208), 'django.contrib.admin.site.register', 'admin.site.register', (['AssetRelation'], {}), '(AssetRelation)\n', (193, 208), False, 'from django.contrib import admin\n')]
|
# _________________________________________________________________________
#
# PyUtilib: A Python utility library.
# Copyright (c) 2008 Sandia Corporation.
# This software is distributed under the BSD License.
# Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
# the U.S. Government retains certain rights in this software.
# _________________________________________________________________________
"""
The pyutilib.component.config package includes utilities to configure
the PyUtilib Component Architecture. This includes facilities for using
configuration files, controlling logging, and specifying component options.
"""
from pyutilib.component.core import PluginGlobals
PluginGlobals.add_env("pca")
from pyutilib.component.config.env_config import EnvironmentConfig
from pyutilib.component.config.options import ExecutableOption, declare_option, Option
from pyutilib.component.config.managed_plugin import ManagedPlugin, ManagedSingletonPlugin
from pyutilib.component.config.configuration import Configuration, ConfigurationError
from pyutilib.component.config.logging_config import LoggingConfig
from pyutilib.component.config.tempfiles import ITempfileManager, TempfileManagerPlugin, TempfileManager
import pyutilib.component.config.plugin_ConfigParser
PluginGlobals.pop_env()
|
[
"pyutilib.component.core.PluginGlobals.pop_env",
"pyutilib.component.core.PluginGlobals.add_env"
] |
[((708, 736), 'pyutilib.component.core.PluginGlobals.add_env', 'PluginGlobals.add_env', (['"""pca"""'], {}), "('pca')\n", (729, 736), False, 'from pyutilib.component.core import PluginGlobals\n'), ((1295, 1318), 'pyutilib.component.core.PluginGlobals.pop_env', 'PluginGlobals.pop_env', ([], {}), '()\n', (1316, 1318), False, 'from pyutilib.component.core import PluginGlobals\n')]
|
#!/usr/bin/env python
from django.utils.translation import ugettext as _
class DatasetLockedError(Exception):
"""
Exception raised when a lock can not be acquired on a dataset.
"""
pass
class DataUploadNotDeletable(Exception):
"""
Exception raised when a DataUpload can not be deleted.
"""
pass
class DataSamplingError(Exception):
"""
Exception raised when data can't be sampled from a file,
such as when unexpected encodings are encountered.
"""
pass
class DataImportError(Exception):
"""
Exception raised when a DataImport fails synchronously
due to an unsupported file type, mismatched columns, etc.
"""
pass
class NotSniffableError(Exception):
"""
Exception raised when a file's dialect could not be inferred
automatically.
"""
pass
class TypeInferenceError(Exception):
"""
Exception raised when a column's type can not be inferred.
"""
pass
class TypeCoercionError(Exception):
"""
Exception raised when a value can not be coerced to a given type.
"""
def __init__(self, value, normal_type):
self.value = value
self.normal_type = normal_type
msg = _('Unable to convert "%(value)s" to type %(normal_type)s') \
% {'value': value, 'normal_type': normal_type}
super(TypeCoercionError, self).__init__(value, normal_type)
|
[
"django.utils.translation.ugettext"
] |
[((1207, 1265), 'django.utils.translation.ugettext', '_', (['"""Unable to convert "%(value)s" to type %(normal_type)s"""'], {}), '(\'Unable to convert "%(value)s" to type %(normal_type)s\')\n', (1208, 1265), True, 'from django.utils.translation import ugettext as _\n')]
|
#!/usr/bin/env python
from __future__ import unicode_literals
import unittest
from securetrading.test import abstract_test
import glob
import os
try:
import pep8
except ImportError:
pep8 = None
class Test_CodeFormat(abstract_test.TestCase):
def get_files(self, directory):
return glob.glob(os.path.join(directory, "*.py"))
def test_pep8_conformance(self):
"""Test that we conform to PEP8."""
if pep8 is not None:
ignore_error_codes = []
base_path = self.get_package_path()
test_cases = ["",
"test",
]
for directory in test_cases:
path = os.path.join(base_path, directory)
if os.path.exists(path):
files = self.get_files(path)
results = []
# Need to check if pep8 is installed before running
for f in sorted(files):
pep8_style = pep8.StyleGuide(quiet=True,
ignore=ignore_error_codes)
result = pep8_style.check_files([f])
if result.total_errors != 0:
results.append("Found code style errors (and warnings)\
Run 'pep8 --show-source {0}'.".format(f))
self.assertEqual(0, len(results),
"results {0}".format(results))
else:
print("PEP8 module is not installed skipping test.")
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"pep8.StyleGuide",
"os.path.join",
"os.path.exists"
] |
[((1600, 1615), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1613, 1615), False, 'import unittest\n'), ((314, 345), 'os.path.join', 'os.path.join', (['directory', '"""*.py"""'], {}), "(directory, '*.py')\n", (326, 345), False, 'import os\n'), ((701, 735), 'os.path.join', 'os.path.join', (['base_path', 'directory'], {}), '(base_path, directory)\n', (713, 735), False, 'import os\n'), ((755, 775), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (769, 775), False, 'import os\n'), ((1013, 1067), 'pep8.StyleGuide', 'pep8.StyleGuide', ([], {'quiet': '(True)', 'ignore': 'ignore_error_codes'}), '(quiet=True, ignore=ignore_error_codes)\n', (1028, 1067), False, 'import pep8\n')]
|
import os
import re
import zmq
from zmq.utils import jsonapi
from tornado.queues import Queue
from tornado.locks import Event
from IPython.core.getipython import get_ipython
from IPython.core.inputtransformer2 import leading_empty_lines
try:
from jupyter_client.jsonutil import json_default
except ImportError:
from jupyter_client.jsonutil import date_default as json_default
from .compiler import (get_file_name, get_tmp_directory, get_tmp_hash_seed)
# This import is required to have the next ones working...
from debugpy.server import api # noqa
from _pydevd_bundle import pydevd_frame_utils
from _pydevd_bundle.pydevd_suspended_frames import SuspendedFramesManager, _FramesTracker
# Required for backwards compatiblity
ROUTING_ID = getattr(zmq, 'ROUTING_ID', None) or zmq.IDENTITY
class _FakeCode:
def __init__(self, co_filename, co_name):
self.co_filename = co_filename
self.co_name = co_name
class _FakeFrame:
def __init__(self, f_code, f_globals, f_locals):
self.f_code = f_code
self.f_globals = f_globals
self.f_locals = f_locals
self.f_back = None
class _DummyPyDB:
def __init__(self):
from _pydevd_bundle.pydevd_api import PyDevdAPI
self.variable_presentation = PyDevdAPI.VariablePresentation()
class VariableExplorer:
def __init__(self):
self.suspended_frame_manager = SuspendedFramesManager()
self.py_db = _DummyPyDB()
self.tracker = _FramesTracker(self.suspended_frame_manager, self.py_db)
self.frame = None
def track(self):
var = get_ipython().user_ns
self.frame = _FakeFrame(_FakeCode('<module>', get_file_name('sys._getframe()')), var, var)
self.tracker.track('thread1', pydevd_frame_utils.create_frames_list_from_frame(self.frame))
def untrack_all(self):
self.tracker.untrack_all()
def get_children_variables(self, variable_ref = None):
var_ref = variable_ref
if not var_ref:
var_ref = id(self.frame)
variables = self.suspended_frame_manager.get_variable(var_ref)
return [x.get_var_data() for x in variables.get_children_variables()]
class DebugpyMessageQueue:
HEADER = 'Content-Length: '
HEADER_LENGTH = 16
SEPARATOR = '\r\n\r\n'
SEPARATOR_LENGTH = 4
def __init__(self, event_callback, log):
self.tcp_buffer = ''
self._reset_tcp_pos()
self.event_callback = event_callback
self.message_queue = Queue()
self.log = log
def _reset_tcp_pos(self):
self.header_pos = -1
self.separator_pos = -1
self.message_size = 0
self.message_pos = -1
def _put_message(self, raw_msg):
self.log.debug('QUEUE - _put_message:')
msg = jsonapi.loads(raw_msg)
if msg['type'] == 'event':
self.log.debug('QUEUE - received event:')
self.log.debug(msg)
self.event_callback(msg)
else:
self.log.debug('QUEUE - put message:')
self.log.debug(msg)
self.message_queue.put_nowait(msg)
def put_tcp_frame(self, frame):
self.tcp_buffer += frame
self.log.debug('QUEUE - received frame')
while True:
# Finds header
if self.header_pos == -1:
self.header_pos = self.tcp_buffer.find(DebugpyMessageQueue.HEADER)
if self.header_pos == -1:
return
self.log.debug('QUEUE - found header at pos %i', self.header_pos)
#Finds separator
if self.separator_pos == -1:
hint = self.header_pos + DebugpyMessageQueue.HEADER_LENGTH
self.separator_pos = self.tcp_buffer.find(DebugpyMessageQueue.SEPARATOR, hint)
if self.separator_pos == -1:
return
self.log.debug('QUEUE - found separator at pos %i', self.separator_pos)
if self.message_pos == -1:
size_pos = self.header_pos + DebugpyMessageQueue.HEADER_LENGTH
self.message_pos = self.separator_pos + DebugpyMessageQueue.SEPARATOR_LENGTH
self.message_size = int(self.tcp_buffer[size_pos:self.separator_pos])
self.log.debug('QUEUE - found message at pos %i', self.message_pos)
self.log.debug('QUEUE - message size is %i', self.message_size)
if len(self.tcp_buffer) - self.message_pos < self.message_size:
return
self._put_message(self.tcp_buffer[self.message_pos:self.message_pos + self.message_size])
if len(self.tcp_buffer) - self.message_pos == self.message_size:
self.log.debug('QUEUE - resetting tcp_buffer')
self.tcp_buffer = ''
self._reset_tcp_pos()
return
else:
self.tcp_buffer = self.tcp_buffer[self.message_pos + self.message_size:]
self.log.debug('QUEUE - slicing tcp_buffer: %s', self.tcp_buffer)
self._reset_tcp_pos()
async def get_message(self):
return await self.message_queue.get()
class DebugpyClient:
def __init__(self, log, debugpy_stream, event_callback):
self.log = log
self.debugpy_stream = debugpy_stream
self.event_callback = event_callback
self.message_queue = DebugpyMessageQueue(self._forward_event, self.log)
self.debugpy_host = '127.0.0.1'
self.debugpy_port = -1
self.routing_id = None
self.wait_for_attach = True
self.init_event = Event()
self.init_event_seq = -1
def _get_endpoint(self):
host, port = self.get_host_port()
return 'tcp://' + host + ':' + str(port)
def _forward_event(self, msg):
if msg['event'] == 'initialized':
self.init_event.set()
self.init_event_seq = msg['seq']
self.event_callback(msg)
def _send_request(self, msg):
if self.routing_id is None:
self.routing_id = self.debugpy_stream.socket.getsockopt(ROUTING_ID)
content = jsonapi.dumps(
msg,
default=json_default,
ensure_ascii=False,
allow_nan=False,
)
content_length = str(len(content))
buf = (DebugpyMessageQueue.HEADER + content_length + DebugpyMessageQueue.SEPARATOR).encode('ascii')
buf += content
self.log.debug("DEBUGPYCLIENT:")
self.log.debug(self.routing_id)
self.log.debug(buf)
self.debugpy_stream.send_multipart((self.routing_id, buf))
async def _wait_for_response(self):
# Since events are never pushed to the message_queue
# we can safely assume the next message in queue
# will be an answer to the previous request
return await self.message_queue.get_message()
async def _handle_init_sequence(self):
# 1] Waits for initialized event
await self.init_event.wait()
# 2] Sends configurationDone request
configurationDone = {
'type': 'request',
'seq': int(self.init_event_seq) + 1,
'command': 'configurationDone'
}
self._send_request(configurationDone)
# 3] Waits for configurationDone response
await self._wait_for_response()
# 4] Waits for attachResponse and returns it
attach_rep = await self._wait_for_response()
return attach_rep
def get_host_port(self):
if self.debugpy_port == -1:
socket = self.debugpy_stream.socket
socket.bind_to_random_port('tcp://' + self.debugpy_host)
self.endpoint = socket.getsockopt(zmq.LAST_ENDPOINT).decode('utf-8')
socket.unbind(self.endpoint)
index = self.endpoint.rfind(':')
self.debugpy_port = self.endpoint[index+1:]
return self.debugpy_host, self.debugpy_port
def connect_tcp_socket(self):
self.debugpy_stream.socket.connect(self._get_endpoint())
self.routing_id = self.debugpy_stream.socket.getsockopt(ROUTING_ID)
def disconnect_tcp_socket(self):
self.debugpy_stream.socket.disconnect(self._get_endpoint())
self.routing_id = None
self.init_event = Event()
self.init_event_seq = -1
self.wait_for_attach = True
def receive_dap_frame(self, frame):
self.message_queue.put_tcp_frame(frame)
async def send_dap_request(self, msg):
self._send_request(msg)
if self.wait_for_attach and msg['command'] == 'attach':
rep = await self._handle_init_sequence()
self.wait_for_attach = False
return rep
else:
rep = await self._wait_for_response()
self.log.debug('DEBUGPYCLIENT - returning:')
self.log.debug(rep)
return rep
class Debugger:
# Requests that requires that the debugger has started
started_debug_msg_types = [
'dumpCell', 'setBreakpoints',
'source', 'stackTrace',
'variables', 'attach',
'configurationDone'
]
# Requests that can be handled even if the debugger is not running
static_debug_msg_types = [
'debugInfo', 'inspectVariables', 'richInspectVariables'
]
def __init__(self, log, debugpy_stream, event_callback, shell_socket, session):
self.log = log
self.debugpy_client = DebugpyClient(log, debugpy_stream, self._handle_event)
self.shell_socket = shell_socket
self.session = session
self.is_started = False
self.event_callback = event_callback
self.started_debug_handlers = {}
for msg_type in Debugger.started_debug_msg_types:
self.started_debug_handlers[msg_type] = getattr(self, msg_type)
self.static_debug_handlers = {}
for msg_type in Debugger.static_debug_msg_types:
self.static_debug_handlers[msg_type] = getattr(self, msg_type)
self.breakpoint_list = {}
self.stopped_threads = []
self.debugpy_initialized = False
self._removed_cleanup = {}
self.debugpy_host = '127.0.0.1'
self.debugpy_port = 0
self.endpoint = None
self.variable_explorer = VariableExplorer()
def _handle_event(self, msg):
if msg['event'] == 'stopped':
self.stopped_threads.append(msg['body']['threadId'])
elif msg['event'] == 'continued':
try:
self.stopped_threads.remove(msg['body']['threadId'])
except Exception:
pass
self.event_callback(msg)
async def _forward_message(self, msg):
return await self.debugpy_client.send_dap_request(msg)
def _build_variables_response(self, request, variables):
var_list = [var for var in variables if self.accept_variable(var['name'])]
reply = {
'seq': request['seq'],
'type': 'response',
'request_seq': request['seq'],
'success': True,
'command': request['command'],
'body': {
'variables': var_list
}
}
return reply
@property
def tcp_client(self):
return self.debugpy_client
def start(self):
if not self.debugpy_initialized:
tmp_dir = get_tmp_directory()
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
host, port = self.debugpy_client.get_host_port()
code = 'import debugpy;'
code += 'debugpy.listen(("' + host + '",' + port + '))'
content = {
'code': code,
'silent': True
}
self.session.send(self.shell_socket, 'execute_request', content,
None, (self.shell_socket.getsockopt(ROUTING_ID)))
ident, msg = self.session.recv(self.shell_socket, mode=0)
self.debugpy_initialized = msg['content']['status'] == 'ok'
# Don't remove leading empty lines when debugging so the breakpoints are correctly positioned
cleanup_transforms = get_ipython().input_transformer_manager.cleanup_transforms
if leading_empty_lines in cleanup_transforms:
index = cleanup_transforms.index(leading_empty_lines)
self._removed_cleanup[index] = cleanup_transforms.pop(index)
self.debugpy_client.connect_tcp_socket()
return self.debugpy_initialized
def stop(self):
self.debugpy_client.disconnect_tcp_socket()
# Restore remove cleanup transformers
cleanup_transforms = get_ipython().input_transformer_manager.cleanup_transforms
for index in sorted(self._removed_cleanup):
func = self._removed_cleanup.pop(index)
cleanup_transforms.insert(index, func)
async def dumpCell(self, message):
code = message['arguments']['code']
file_name = get_file_name(code)
with open(file_name, 'w', encoding='utf-8') as f:
f.write(code)
reply = {
'type': 'response',
'request_seq': message['seq'],
'success': True,
'command': message['command'],
'body': {
'sourcePath': file_name
}
}
return reply
async def setBreakpoints(self, message):
source = message["arguments"]["source"]["path"]
self.breakpoint_list[source] = message["arguments"]["breakpoints"]
return await self._forward_message(message)
async def source(self, message):
reply = {
'type': 'response',
'request_seq': message['seq'],
'command': message['command']
}
source_path = message["arguments"]["source"]["path"]
if os.path.isfile(source_path):
with open(source_path, encoding='utf-8') as f:
reply['success'] = True
reply['body'] = {
'content': f.read()
}
else:
reply['success'] = False
reply['message'] = 'source unavailable'
reply['body'] = {}
return reply
async def stackTrace(self, message):
reply = await self._forward_message(message)
# The stackFrames array can have the following content:
# { frames from the notebook}
# ...
# { 'id': xxx, 'name': '<module>', ... } <= this is the first frame of the code from the notebook
# { frames from ipykernel }
# ...
# {'id': yyy, 'name': '<module>', ... } <= this is the first frame of ipykernel code
# or only the frames from the notebook.
# We want to remove all the frames from ipykernel when they are present.
try:
sf_list = reply["body"]["stackFrames"]
module_idx = len(sf_list) - next(
i
for i, v in enumerate(reversed(sf_list), 1)
if v["name"] == "<module>" and i != 1
)
reply["body"]["stackFrames"] = reply["body"]["stackFrames"][
: module_idx + 1
]
except StopIteration:
pass
return reply
def accept_variable(self, variable_name):
forbid_list = [
'__name__',
'__doc__',
'__package__',
'__loader__',
'__spec__',
'__annotations__',
'__builtins__',
'__builtin__',
'__display__',
'get_ipython',
'debugpy',
'exit',
'quit',
'In',
'Out',
'_oh',
'_dh',
'_',
'__',
'___'
]
cond = variable_name not in forbid_list
cond = cond and not bool(re.search(r'^_\d', variable_name))
cond = cond and variable_name[0:2] != '_i'
return cond
async def variables(self, message):
reply = {}
if not self.stopped_threads:
variables = self.variable_explorer.get_children_variables(message['arguments']['variablesReference'])
return self._build_variables_response(message, variables)
else:
reply = await self._forward_message(message)
# TODO : check start and count arguments work as expected in debugpy
reply['body']['variables'] = \
[var for var in reply['body']['variables'] if self.accept_variable(var['name'])]
return reply
async def attach(self, message):
host, port = self.debugpy_client.get_host_port()
message['arguments']['connect'] = {
'host': host,
'port': port
}
message['arguments']['logToFile'] = True
# Reverts that option for now since it leads to spurious break of the code
# in ipykernel source and resuming the execution leads to several errors
# in the kernel.
# Set debugOptions for breakpoints in python standard library source.
# message['arguments']['debugOptions'] = [ 'DebugStdLib' ]
return await self._forward_message(message)
async def configurationDone(self, message):
reply = {
'seq': message['seq'],
'type': 'response',
'request_seq': message['seq'],
'success': True,
'command': message['command']
}
return reply
async def debugInfo(self, message):
breakpoint_list = []
for key, value in self.breakpoint_list.items():
breakpoint_list.append({
'source': key,
'breakpoints': value
})
reply = {
'type': 'response',
'request_seq': message['seq'],
'success': True,
'command': message['command'],
'body': {
'isStarted': self.is_started,
'hashMethod': 'Murmur2',
'hashSeed': get_tmp_hash_seed(),
'tmpFilePrefix': get_tmp_directory() + os.sep,
'tmpFileSuffix': '.py',
'breakpoints': breakpoint_list,
'stoppedThreads': self.stopped_threads,
'richRendering': True,
'exceptionPaths': ['Python Exceptions']
}
}
return reply
async def inspectVariables(self, message):
self.variable_explorer.untrack_all()
# looks like the implementation of untrack_all in ptvsd
# destroys objects we nee din track. We have no choice but
# reinstantiate the object
self.variable_explorer = VariableExplorer()
self.variable_explorer.track()
variables = self.variable_explorer.get_children_variables()
return self._build_variables_response(message, variables)
async def richInspectVariables(self, message):
reply = {
"type": "response",
"sequence_seq": message["seq"],
"success": False,
"command": message["command"],
}
var_name = message["arguments"]["variableName"]
valid_name = str.isidentifier(var_name)
if not valid_name:
reply["body"] = {"data": {}, "metadata": {}}
if var_name == "special variables" or var_name == "function variables":
reply["success"] = True
return reply
repr_data = {}
repr_metadata = {}
if not self.stopped_threads:
# The code did not hit a breakpoint, we use the intepreter
# to get the rich representation of the variable
result = get_ipython().user_expressions({var_name: var_name})[var_name]
if result.get("status", "error") == "ok":
repr_data = result.get("data", {})
repr_metadata = result.get("metadata", {})
else:
# The code has stopped on a breakpoint, we use the setExpression
# request to get the rich representation of the variable
code = f"get_ipython().display_formatter.format({var_name})"
frame_id = message["arguments"]["frameId"]
seq = message["seq"]
reply = await self._forward_message(
{
"type": "request",
"command": "evaluate",
"seq": seq + 1,
"arguments": {"expression": code, "frameId": frame_id},
}
)
if reply["success"]:
repr_data, repr_metadata = eval(reply["body"]["result"], {}, {})
body = {
"data": repr_data,
"metadata": {k: v for k, v in repr_metadata.items() if k in repr_data},
}
reply["body"] = body
reply["success"] = True
return reply
async def process_request(self, message):
reply = {}
if message['command'] == 'initialize':
if self.is_started:
self.log.info('The debugger has already started')
else:
self.is_started = self.start()
if self.is_started:
self.log.info('The debugger has started')
else:
reply = {
'command': 'initialize',
'request_seq': message['seq'],
'seq': 3,
'success': False,
'type': 'response'
}
handler = self.static_debug_handlers.get(message['command'], None)
if handler is not None:
reply = await handler(message)
elif self.is_started:
handler = self.started_debug_handlers.get(message['command'], None)
if handler is not None:
reply = await handler(message)
else:
reply = await self._forward_message(message)
if message['command'] == 'disconnect':
self.stop()
self.breakpoint_list = {}
self.stopped_threads = []
self.is_started = False
self.log.info('The debugger has stopped')
return reply
|
[
"zmq.utils.jsonapi.dumps",
"IPython.core.getipython.get_ipython",
"os.makedirs",
"_pydevd_bundle.pydevd_frame_utils.create_frames_list_from_frame",
"tornado.queues.Queue",
"os.path.exists",
"zmq.utils.jsonapi.loads",
"os.path.isfile",
"_pydevd_bundle.pydevd_suspended_frames._FramesTracker",
"_pydevd_bundle.pydevd_suspended_frames.SuspendedFramesManager",
"tornado.locks.Event",
"_pydevd_bundle.pydevd_api.PyDevdAPI.VariablePresentation",
"re.search"
] |
[((1270, 1302), '_pydevd_bundle.pydevd_api.PyDevdAPI.VariablePresentation', 'PyDevdAPI.VariablePresentation', ([], {}), '()\n', (1300, 1302), False, 'from _pydevd_bundle.pydevd_api import PyDevdAPI\n'), ((1392, 1416), '_pydevd_bundle.pydevd_suspended_frames.SuspendedFramesManager', 'SuspendedFramesManager', ([], {}), '()\n', (1414, 1416), False, 'from _pydevd_bundle.pydevd_suspended_frames import SuspendedFramesManager, _FramesTracker\n'), ((1474, 1530), '_pydevd_bundle.pydevd_suspended_frames._FramesTracker', '_FramesTracker', (['self.suspended_frame_manager', 'self.py_db'], {}), '(self.suspended_frame_manager, self.py_db)\n', (1488, 1530), False, 'from _pydevd_bundle.pydevd_suspended_frames import SuspendedFramesManager, _FramesTracker\n'), ((2494, 2501), 'tornado.queues.Queue', 'Queue', ([], {}), '()\n', (2499, 2501), False, 'from tornado.queues import Queue\n'), ((2777, 2799), 'zmq.utils.jsonapi.loads', 'jsonapi.loads', (['raw_msg'], {}), '(raw_msg)\n', (2790, 2799), False, 'from zmq.utils import jsonapi\n'), ((5565, 5572), 'tornado.locks.Event', 'Event', ([], {}), '()\n', (5570, 5572), False, 'from tornado.locks import Event\n'), ((6086, 6163), 'zmq.utils.jsonapi.dumps', 'jsonapi.dumps', (['msg'], {'default': 'json_default', 'ensure_ascii': '(False)', 'allow_nan': '(False)'}), '(msg, default=json_default, ensure_ascii=False, allow_nan=False)\n', (6099, 6163), False, 'from zmq.utils import jsonapi\n'), ((8237, 8244), 'tornado.locks.Event', 'Event', ([], {}), '()\n', (8242, 8244), False, 'from tornado.locks import Event\n'), ((13781, 13808), 'os.path.isfile', 'os.path.isfile', (['source_path'], {}), '(source_path)\n', (13795, 13808), False, 'import os\n'), ((1593, 1606), 'IPython.core.getipython.get_ipython', 'get_ipython', ([], {}), '()\n', (1604, 1606), False, 'from IPython.core.getipython import get_ipython\n'), ((1752, 1812), '_pydevd_bundle.pydevd_frame_utils.create_frames_list_from_frame', 'pydevd_frame_utils.create_frames_list_from_frame', (['self.frame'], {}), '(self.frame)\n', (1800, 1812), False, 'from _pydevd_bundle import pydevd_frame_utils\n'), ((11351, 11374), 'os.path.exists', 'os.path.exists', (['tmp_dir'], {}), '(tmp_dir)\n', (11365, 11374), False, 'import os\n'), ((11392, 11412), 'os.makedirs', 'os.makedirs', (['tmp_dir'], {}), '(tmp_dir)\n', (11403, 11412), False, 'import os\n'), ((12110, 12123), 'IPython.core.getipython.get_ipython', 'get_ipython', ([], {}), '()\n', (12121, 12123), False, 'from IPython.core.getipython import get_ipython\n'), ((12601, 12614), 'IPython.core.getipython.get_ipython', 'get_ipython', ([], {}), '()\n', (12612, 12614), False, 'from IPython.core.getipython import get_ipython\n'), ((15807, 15840), 're.search', 're.search', (['"""^_\\\\d"""', 'variable_name'], {}), "('^_\\\\d', variable_name)\n", (15816, 15840), False, 'import re\n'), ((19629, 19642), 'IPython.core.getipython.get_ipython', 'get_ipython', ([], {}), '()\n', (19640, 19642), False, 'from IPython.core.getipython import get_ipython\n')]
|
# coding=utf-8
# Copyright 2019 The Weak Disentangle Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# python3
"""Affine modules.
"""
import numpy as np
import tensorflow as tf
from collections import OrderedDict
from weak_disentangle.tensorsketch import utils as tsu
from weak_disentangle.tensorsketch.modules.base import build_with_name_scope
from weak_disentangle.tensorsketch.modules.base import Module
class Affine(Module):
"""Abstract class for modules that apply an affine transformation to input.
Affine includes several special functionalities to ensure that classes that
extend it are amenable to the injection of kernel normalizers (based on the
respects_kernel_norm flag). All classes that extend Affine should adhere to
the following contract: Never access self.orig_kernel directly in forward
call, and parameter initialization/building.
"""
def __init__(self, bias=True, name=None, initializer=None):
super().__init__(name=name)
self.use_bias = bias
self.kernel = None
self.bias = None
self.initializer = initializer
self.kernel_normalizers = OrderedDict()
@property
def normalized_kernel(self):
kernel = self.kernel
for km in self.kernel_normalizers.values():
kernel = km(kernel)
return kernel
@build_with_name_scope
def build_parameters(self, x):
raise NotImplementedError("Implement parameter building for Affine class")
def reset_parameters(self):
if self.initializer is not None:
self.initializer(self.kernel, self.bias)
return
# By default, all affine layers are initialized via
# Unif(-a, a), where a = sqrt(1 / fan_in)
fan_in, _ = tsu.compute_fan(self.kernel)
limit = np.sqrt(1 / fan_in)
self.kernel.assign(tf.random.uniform(self.kernel.shape, -limit, limit))
if self.use_bias:
self.bias.assign(tf.random.uniform(self.bias.shape, -limit, limit))
class Dense(Affine):
"""Applies a dense affine transformation to input.
"""
def __init__(self, out_dims, bias=True, initializer=None, name=None):
super().__init__(bias=bias, initializer=initializer, name=name)
self.out_dims = out_dims
@build_with_name_scope
def build_parameters(self, x):
self.in_dims = int(x.shape[-1])
self.kernel = tf.Variable(tf.random.normal((self.in_dims, self.out_dims)),
trainable=True)
if self.use_bias:
self.bias = tf.Variable(tf.random.normal([self.out_dims]), trainable=True)
self.reset_parameters()
def forward(self, x):
x = tf.matmul(x, self.normalized_kernel)
if self.bias is not None:
x = tf.nn.bias_add(x, self.bias)
return x
def extra_repr(self):
return "({}, bias={})".format(self.out_dims, self.use_bias)
class Conv2d(Affine):
"""Applies 2d convolutional transformation (and bias) to input.
"""
def __init__(self,
out_channels,
kernel_size,
strides,
padding="same",
dilation=1,
bias=True,
initializer=None,
name=None):
super().__init__(bias=bias, initializer=initializer, name=name)
self.out_channels = out_channels
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding
self.dilation = dilation
@build_with_name_scope
def build_parameters(self, x):
self.in_channels = int(x.shape[-1])
self.kernel = tf.Variable(tf.random.normal((self.kernel_size,
self.kernel_size,
self.in_channels,
self.out_channels)),
trainable=True)
if self.use_bias:
self.bias = tf.Variable(tf.random.normal([self.out_channels]),
trainable=True)
self.reset_parameters()
def forward(self, x):
x = tf.nn.conv2d(
x, filter=self.normalized_kernel,
strides=self.strides,
padding=self.padding.upper(),
dilations=self.dilation)
if self.use_bias:
x = tf.nn.bias_add(x, self.bias)
return x
def extra_repr(self):
return "({}, {}, {}, {}, bias={})".format(self.out_channels,
self.kernel_size,
self.strides,
self.padding,
self.use_bias)
class ConvTranspose2d(Affine):
"""Applies 2d transposed convolutional transformation (and bias) to input.
"""
def __init__(self,
out_channels,
kernel_size,
strides,
padding="same",
output_padding=None,
dilation=1,
bias=True,
initializer=None,
name=None):
super().__init__(bias=bias, initializer=initializer, name=name)
self.out_channels = out_channels
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding
self.output_padding = output_padding
self.dilation = dilation
@build_with_name_scope
def build_parameters(self, x):
self.in_channels = int(x.shape[-1])
self.kernel = tf.Variable(tf.random.normal((self.kernel_size,
self.kernel_size,
self.out_channels,
self.in_channels)),
trainable=True)
if self.use_bias:
self.bias = tf.Variable(tf.random.normal([self.out_channels]),
trainable=True)
self.reset_parameters()
def forward(self, x):
n, h, w, _ = x.shape
h = tsu.compute_out_dims(h, self.kernel_size,
self.strides,
self.padding,
self.output_padding,
self.dilation)
w = tsu.compute_out_dims(w, self.kernel_size,
self.strides,
self.padding,
self.output_padding,
self.dilation)
output_shape = (n, h, w, self.out_channels)
x = tf.nn.conv2d_transpose(
x, filter=self.normalized_kernel,
strides=self.strides,
padding=self.padding.upper(),
output_shape=output_shape,
dilations=self.dilation)
if self.use_bias:
x = tf.nn.bias_add(x, self.bias)
return x
def extra_repr(self):
return "({}, {}, {}, {}, bias={})".format(self.out_channels,
self.kernel_size,
self.strides,
self.padding,
self.use_bias)
|
[
"tensorflow.random.uniform",
"tensorflow.random.normal",
"weak_disentangle.tensorsketch.utils.compute_fan",
"weak_disentangle.tensorsketch.utils.compute_out_dims",
"tensorflow.matmul",
"collections.OrderedDict",
"tensorflow.nn.bias_add",
"numpy.sqrt"
] |
[((1612, 1625), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1623, 1625), False, 'from collections import OrderedDict\n'), ((2172, 2200), 'weak_disentangle.tensorsketch.utils.compute_fan', 'tsu.compute_fan', (['self.kernel'], {}), '(self.kernel)\n', (2187, 2200), True, 'from weak_disentangle.tensorsketch import utils as tsu\n'), ((2213, 2232), 'numpy.sqrt', 'np.sqrt', (['(1 / fan_in)'], {}), '(1 / fan_in)\n', (2220, 2232), True, 'import numpy as np\n'), ((3044, 3080), 'tensorflow.matmul', 'tf.matmul', (['x', 'self.normalized_kernel'], {}), '(x, self.normalized_kernel)\n', (3053, 3080), True, 'import tensorflow as tf\n'), ((6296, 6406), 'weak_disentangle.tensorsketch.utils.compute_out_dims', 'tsu.compute_out_dims', (['h', 'self.kernel_size', 'self.strides', 'self.padding', 'self.output_padding', 'self.dilation'], {}), '(h, self.kernel_size, self.strides, self.padding, self.\n output_padding, self.dilation)\n', (6316, 6406), True, 'from weak_disentangle.tensorsketch import utils as tsu\n'), ((6527, 6637), 'weak_disentangle.tensorsketch.utils.compute_out_dims', 'tsu.compute_out_dims', (['w', 'self.kernel_size', 'self.strides', 'self.padding', 'self.output_padding', 'self.dilation'], {}), '(w, self.kernel_size, self.strides, self.padding, self.\n output_padding, self.dilation)\n', (6547, 6637), True, 'from weak_disentangle.tensorsketch import utils as tsu\n'), ((2256, 2307), 'tensorflow.random.uniform', 'tf.random.uniform', (['self.kernel.shape', '(-limit)', 'limit'], {}), '(self.kernel.shape, -limit, limit)\n', (2273, 2307), True, 'import tensorflow as tf\n'), ((2783, 2830), 'tensorflow.random.normal', 'tf.random.normal', (['(self.in_dims, self.out_dims)'], {}), '((self.in_dims, self.out_dims))\n', (2799, 2830), True, 'import tensorflow as tf\n'), ((3122, 3150), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'self.bias'], {}), '(x, self.bias)\n', (3136, 3150), True, 'import tensorflow as tf\n'), ((3948, 4043), 'tensorflow.random.normal', 'tf.random.normal', (['(self.kernel_size, self.kernel_size, self.in_channels, self.out_channels)'], {}), '((self.kernel_size, self.kernel_size, self.in_channels,\n self.out_channels))\n', (3964, 4043), True, 'import tensorflow as tf\n'), ((4620, 4648), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'self.bias'], {}), '(x, self.bias)\n', (4634, 4648), True, 'import tensorflow as tf\n'), ((5789, 5884), 'tensorflow.random.normal', 'tf.random.normal', (['(self.kernel_size, self.kernel_size, self.out_channels, self.in_channels)'], {}), '((self.kernel_size, self.kernel_size, self.out_channels,\n self.in_channels))\n', (5805, 5884), True, 'import tensorflow as tf\n'), ((7041, 7069), 'tensorflow.nn.bias_add', 'tf.nn.bias_add', (['x', 'self.bias'], {}), '(x, self.bias)\n', (7055, 7069), True, 'import tensorflow as tf\n'), ((2355, 2404), 'tensorflow.random.uniform', 'tf.random.uniform', (['self.bias.shape', '(-limit)', 'limit'], {}), '(self.bias.shape, -limit, limit)\n', (2372, 2404), True, 'import tensorflow as tf\n'), ((2931, 2964), 'tensorflow.random.normal', 'tf.random.normal', (['[self.out_dims]'], {}), '([self.out_dims])\n', (2947, 2964), True, 'import tensorflow as tf\n'), ((4284, 4321), 'tensorflow.random.normal', 'tf.random.normal', (['[self.out_channels]'], {}), '([self.out_channels])\n', (4300, 4321), True, 'import tensorflow as tf\n'), ((6125, 6162), 'tensorflow.random.normal', 'tf.random.normal', (['[self.out_channels]'], {}), '([self.out_channels])\n', (6141, 6162), True, 'import tensorflow as tf\n')]
|
# -*- coding: utf-8 -*-
"""
sphinx.builders.htmlhelp
~~~~~~~~~~~~~~~~~~~~~~~~
Build HTML help support files.
Parts adapted from Python's Doc/tools/prechm.py.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import os
import codecs
from os import path
from docutils import nodes
from sphinx import addnodes
from sphinx.builders.html import StandaloneHTMLBuilder
from sphinx.util.pycompat import htmlescape
# Project file (*.hhp) template. 'outname' is the file basename (like
# the pythlp in pythlp.hhp); 'version' is the doc version number (like
# the 2.2 in Python 2.2).
# The magical numbers in the long line under [WINDOWS] set most of the
# user-visible features (visible buttons, tabs, etc).
# About 0x10384e: This defines the buttons in the help viewer. The
# following defns are taken from htmlhelp.h. Not all possibilities
# actually work, and not all those that work are available from the Help
# Workshop GUI. In particular, the Zoom/Font button works and is not
# available from the GUI. The ones we're using are marked with 'x':
#
# 0x000002 Hide/Show x
# 0x000004 Back x
# 0x000008 Forward x
# 0x000010 Stop
# 0x000020 Refresh
# 0x000040 Home x
# 0x000080 Forward
# 0x000100 Back
# 0x000200 Notes
# 0x000400 Contents
# 0x000800 Locate x
# 0x001000 Options x
# 0x002000 Print x
# 0x004000 Index
# 0x008000 Search
# 0x010000 History
# 0x020000 Favorites
# 0x040000 Jump 1
# 0x080000 Jump 2
# 0x100000 Zoom/Font x
# 0x200000 TOC Next
# 0x400000 TOC Prev
project_template = '''\
[OPTIONS]
Binary TOC=No
Binary Index=No
Compiled file=%(outname)s.chm
Contents file=%(outname)s.hhc
Default Window=%(outname)s
Default topic=index.html
Display compile progress=No
Full text search stop list file=%(outname)s.stp
Full-text search=Yes
Index file=%(outname)s.hhk
Language=%(lcid)#x
Title=%(title)s
[WINDOWS]
%(outname)s="%(title)s","%(outname)s.hhc","%(outname)s.hhk",\
"index.html","index.html",,,,,0x63520,220,0x10384e,[0,0,1024,768],,,,,,,0
[FILES]
'''
contents_header = '''\
<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
<HTML>
<HEAD>
<meta name="GENERATOR" content="Microsoft® HTML Help Workshop 4.1">
<!-- Sitemap 1.0 -->
</HEAD><BODY>
<OBJECT type="text/site properties">
<param name="Window Styles" value="0x801227">
<param name="ImageType" value="Folder">
</OBJECT>
<UL>
'''
contents_footer = '''\
</UL></BODY></HTML>
'''
object_sitemap = '''\
<OBJECT type="text/sitemap">
<param name="Name" value="%s">
<param name="Local" value="%s">
</OBJECT>
'''
# List of words the full text search facility shouldn't index. This
# becomes file outname.stp. Note that this list must be pretty small!
# Different versions of the MS docs claim the file has a maximum size of
# 256 or 512 bytes (including \r\n at the end of each line).
# Note that "and", "or", "not" and "near" are operators in the search
# language, so no point indexing them even if we wanted to.
stopwords = """
a and are as at
be but by
for
if in into is it
near no not
of on or
such
that the their then there these they this to
was will with
""".split()
# The following list includes only languages supported by Sphinx.
# See http://msdn.microsoft.com/en-us/library/ms930130.aspx for more.
chm_locales = {
# lang: LCID, encoding
'ca': (0x403, 'cp1252'),
'cs': (0x405, 'cp1250'),
'da': (0x406, 'cp1252'),
'de': (0x407, 'cp1252'),
'en': (0x409, 'cp1252'),
'es': (0x40a, 'cp1252'),
'et': (0x425, 'cp1257'),
'fa': (0x429, 'cp1256'),
'fi': (0x40b, 'cp1252'),
'fr': (0x40c, 'cp1252'),
'hr': (0x41a, 'cp1250'),
'hu': (0x40e, 'cp1250'),
'it': (0x410, 'cp1252'),
'ja': (0x411, 'cp932'),
'ko': (0x412, 'cp949'),
'lt': (0x427, 'cp1257'),
'lv': (0x426, 'cp1257'),
'nl': (0x413, 'cp1252'),
'no_NB': (0x414, 'cp1252'),
'pl': (0x415, 'cp1250'),
'pt_BR': (0x416, 'cp1252'),
'ru': (0x419, 'cp1251'),
'sk': (0x41b, 'cp1250'),
'sl': (0x424, 'cp1250'),
'sv': (0x41d, 'cp1252'),
'tr': (0x41f, 'cp1254'),
'uk_UA': (0x422, 'cp1251'),
'zh_CN': (0x804, 'cp936'),
'zh_TW': (0x404, 'cp950'),
}
class HTMLHelpBuilder(StandaloneHTMLBuilder):
"""
Builder that also outputs Windows HTML help project, contents and
index files. Adapted from the original Doc/tools/prechm.py.
"""
name = 'htmlhelp'
# don't copy the reST source
copysource = False
supported_image_types = ['image/png', 'image/gif', 'image/jpeg']
# don't add links
add_permalinks = False
# don't add sidebar etc.
embedded = True
# don't generate search index or include search page
search = False
lcid = 0x409
encoding = 'cp1252'
def init(self):
StandaloneHTMLBuilder.init(self)
# the output files for HTML help must be .html only
self.out_suffix = '.html'
# determine the correct locale setting
locale = chm_locales.get(self.config.language)
if locale is not None:
self.lcid, self.encoding = locale
def open_file(self, outdir, basename, mode='w'):
# open a file with the correct encoding for the selected language
return codecs.open(path.join(outdir, basename), mode,
self.encoding, 'xmlcharrefreplace')
def handle_finish(self):
self.build_hhx(self.outdir, self.config.htmlhelp_basename)
def build_hhx(self, outdir, outname):
self.info('dumping stopword list...')
f = self.open_file(outdir, outname+'.stp')
try:
for word in sorted(stopwords):
print(word, file=f)
finally:
f.close()
self.info('writing project file...')
f = self.open_file(outdir, outname+'.hhp')
try:
f.write(project_template % {'outname': outname,
'title': self.config.html_title,
'version': self.config.version,
'project': self.config.project,
'lcid': self.lcid})
if not outdir.endswith(os.sep):
outdir += os.sep
olen = len(outdir)
for root, dirs, files in os.walk(outdir):
staticdir = root.startswith(path.join(outdir, '_static'))
for fn in files:
if (staticdir and not fn.endswith('.js')) or \
fn.endswith('.html'):
print(path.join(root, fn)[olen:].replace(os.sep, '\\'),
file=f)
finally:
f.close()
self.info('writing TOC file...')
f = self.open_file(outdir, outname+'.hhc')
try:
f.write(contents_header)
# special books
f.write('<LI> ' + object_sitemap % (self.config.html_short_title,
'index.html'))
for indexname, indexcls, content, collapse in self.domain_indices:
f.write('<LI> ' + object_sitemap % (indexcls.localname,
'%s.html' % indexname))
# the TOC
tocdoc = self.env.get_and_resolve_doctree(
self.config.master_doc, self, prune_toctrees=False)
def write_toc(node, ullevel=0):
if isinstance(node, nodes.list_item):
f.write('<LI> ')
for subnode in node:
write_toc(subnode, ullevel)
elif isinstance(node, nodes.reference):
link = node['refuri']
title = htmlescape(node.astext()).replace('"', '"')
f.write(object_sitemap % (title, link))
elif isinstance(node, nodes.bullet_list):
if ullevel != 0:
f.write('<UL>\n')
for subnode in node:
write_toc(subnode, ullevel+1)
if ullevel != 0:
f.write('</UL>\n')
elif isinstance(node, addnodes.compact_paragraph):
for subnode in node:
write_toc(subnode, ullevel)
def istoctree(node):
return isinstance(node, addnodes.compact_paragraph) and \
'toctree' in node
for node in tocdoc.traverse(istoctree):
write_toc(node)
f.write(contents_footer)
finally:
f.close()
self.info('writing index file...')
index = self.env.create_index(self)
f = self.open_file(outdir, outname+'.hhk')
try:
f.write('<UL>\n')
def write_index(title, refs, subitems):
def write_param(name, value):
item = ' <param name="%s" value="%s">\n' % \
(name, value)
f.write(item)
title = htmlescape(title)
f.write('<LI> <OBJECT type="text/sitemap">\n')
write_param('Keyword', title)
if len(refs) == 0:
write_param('See Also', title)
elif len(refs) == 1:
write_param('Local', refs[0][1])
else:
for i, ref in enumerate(refs):
# XXX: better title?
write_param('Name', '[%d] %s' % (i, ref[1]))
write_param('Local', ref[1])
f.write('</OBJECT>\n')
if subitems:
f.write('<UL> ')
for subitem in subitems:
write_index(subitem[0], subitem[1], [])
f.write('</UL>')
for (key, group) in index:
for title, (refs, subitems, key_) in group:
write_index(title, refs, subitems)
f.write('</UL>\n')
finally:
f.close()
|
[
"sphinx.builders.html.StandaloneHTMLBuilder.init",
"os.walk",
"os.path.join",
"sphinx.util.pycompat.htmlescape"
] |
[((5069, 5101), 'sphinx.builders.html.StandaloneHTMLBuilder.init', 'StandaloneHTMLBuilder.init', (['self'], {}), '(self)\n', (5095, 5101), False, 'from sphinx.builders.html import StandaloneHTMLBuilder\n'), ((5530, 5557), 'os.path.join', 'path.join', (['outdir', 'basename'], {}), '(outdir, basename)\n', (5539, 5557), False, 'from os import path\n'), ((6588, 6603), 'os.walk', 'os.walk', (['outdir'], {}), '(outdir)\n', (6595, 6603), False, 'import os\n'), ((9352, 9369), 'sphinx.util.pycompat.htmlescape', 'htmlescape', (['title'], {}), '(title)\n', (9362, 9369), False, 'from sphinx.util.pycompat import htmlescape\n'), ((6649, 6677), 'os.path.join', 'path.join', (['outdir', '"""_static"""'], {}), "(outdir, '_static')\n", (6658, 6677), False, 'from os import path\n'), ((6854, 6873), 'os.path.join', 'path.join', (['root', 'fn'], {}), '(root, fn)\n', (6863, 6873), False, 'from os import path\n')]
|
from django.http.response import Http404
from rest_framework.response import Response
from rest_framework import status
from core.abstract.viewsets import AbstractViewSet
from core.comment.models import Comment
from core.comment.serializers import CommentSerializer
from core.auth.permissions import UserPermission
class CommentViewSet(AbstractViewSet):
http_method_names = ('post', 'get', 'put', 'delete')
permission_classes = (UserPermission,)
serializer_class = CommentSerializer
def get_queryset(self):
if self.request.user.is_superuser:
return Comment.objects.all()
post_pk = self.kwargs['post_pk']
if post_pk is None:
return Http404
queryset = Comment.objects.filter(post__public_id=post_pk)
return queryset
def get_object(self):
obj = Comment.objects.get_object_by_public_id(self.kwargs['pk'])
self.check_object_permissions(self.request, obj)
return obj
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
self.perform_create(serializer)
return Response(serializer.data, status=status.HTTP_201_CREATED)
|
[
"core.comment.models.Comment.objects.get_object_by_public_id",
"core.comment.models.Comment.objects.filter",
"rest_framework.response.Response",
"core.comment.models.Comment.objects.all"
] |
[((728, 775), 'core.comment.models.Comment.objects.filter', 'Comment.objects.filter', ([], {'post__public_id': 'post_pk'}), '(post__public_id=post_pk)\n', (750, 775), False, 'from core.comment.models import Comment\n'), ((842, 900), 'core.comment.models.Comment.objects.get_object_by_public_id', 'Comment.objects.get_object_by_public_id', (["self.kwargs['pk']"], {}), "(self.kwargs['pk'])\n", (881, 900), False, 'from core.comment.models import Comment\n'), ((1193, 1250), 'rest_framework.response.Response', 'Response', (['serializer.data'], {'status': 'status.HTTP_201_CREATED'}), '(serializer.data, status=status.HTTP_201_CREATED)\n', (1201, 1250), False, 'from rest_framework.response import Response\n'), ((590, 611), 'core.comment.models.Comment.objects.all', 'Comment.objects.all', ([], {}), '()\n', (609, 611), False, 'from core.comment.models import Comment\n')]
|
"""Provides a Random wrapper that has a seed access method."""
from random import Random
class RandomWithSeed(Random):
"""Stupid class just to be able to get the seed value, which doesnt work anyway"""
def __init__(self, x):
Random.__init__(self, x)
def seed(self, a=None, version=2):
self.the_seed = a
super(RandomWithSeed, self).seed(a, version)
def get_seed(self):
"""Return the seed used when initializing."""
return self.the_seed
if __name__ == '__main__':
TEST_PRNG = RandomWithSeed(100)
assert TEST_PRNG.get_seed() == 100
print("Yup, seed is 100.")
|
[
"random.Random.__init__"
] |
[((243, 267), 'random.Random.__init__', 'Random.__init__', (['self', 'x'], {}), '(self, x)\n', (258, 267), False, 'from random import Random\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Loads tilemaps, slices, returns a dict with x,y coordinates as keys
import sys
from math import ceil
import pygame
import pygame.locals
import argparse
# grab a module instance
this = sys.modules[__name__]
def cli():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--map', help='Path to tilemap')
parser.add_argument('-M', '--margin', help='Margin width in px',
default=1, type=int)
parser.add_argument('-s', '--scale', help='Specify image scale in px',
default='16x16')
return parser.parse_args()
class TileCache:
""" Lazily load tilesets into the global cache """
def __init__(self, width=16, height=None, margin=1):
self.width = width
self.height = height or width
self.margin = margin
self._cache = {}
def __getitem__(self, filename):
key = (filename, self.width, self.height)
try:
return self._cache[key]
except KeyError:
new_table = load_tile_table(filename, self.width,
self.height, self.margin)
self._cache[key] = new_table
return new_table
def load_tile_table(filename, w, h, m):
''' w=width(px), h=height(px), m=margin(px) '''
image = pygame.image.load(filename).convert_alpha()
img_width, img_height = image.get_size()
sheet_dims = (ceil(img_width / (w + m)),
ceil(img_height / (h + m)))
print('{}: {} x {}'.format(filename, *sheet_dims))
tile_table = {}
for x in range(sheet_dims[0]):
for y in range(sheet_dims[1]):
x_loc = x * (w + m)
y_loc = y * (h + m)
rect = (x_loc, y_loc, w, h)
tile_table[x,y] = image.subsurface(rect)
return tile_table
def split_dims(dim_str):
assert 'x' in dim_str
x, y = dim_str.split('x')
try:
return int(x), int(y)
except:
raise AssertionError('Invalid format, required: XxY, i.e. 16x16')
sys.exit(1)
if __name__ == '__main__':
this.cli_args = cli()
pygame.init()
screen = pygame.display.set_mode((1024, 768))
screen.fill((255, 255, 255))
column_px, row_px = split_dims(cli_args.scale.lower())
table = load_tile_table(cli_args.map, column_px, row_px, cli_args.margin)
for x, y in table:
x_loc = x * column_px
y_loc = y * row_px
screen.blit(table[x,y], (x_loc, y_loc))
pygame.display.flip()
# Wait for input before exiting
while pygame.event.wait().type != pygame.locals.QUIT:
pass
|
[
"pygame.event.wait",
"argparse.ArgumentParser",
"math.ceil",
"pygame.display.set_mode",
"pygame.init",
"pygame.display.flip",
"pygame.image.load",
"sys.exit"
] |
[((283, 308), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (306, 308), False, 'import argparse\n'), ((2092, 2105), 'pygame.init', 'pygame.init', ([], {}), '()\n', (2103, 2105), False, 'import pygame\n'), ((2119, 2155), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(1024, 768)'], {}), '((1024, 768))\n', (2142, 2155), False, 'import pygame\n'), ((2458, 2479), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (2477, 2479), False, 'import pygame\n'), ((1412, 1437), 'math.ceil', 'ceil', (['(img_width / (w + m))'], {}), '(img_width / (w + m))\n', (1416, 1437), False, 'from math import ceil\n'), ((1451, 1477), 'math.ceil', 'ceil', (['(img_height / (h + m))'], {}), '(img_height / (h + m))\n', (1455, 1477), False, 'from math import ceil\n'), ((1305, 1332), 'pygame.image.load', 'pygame.image.load', (['filename'], {}), '(filename)\n', (1322, 1332), False, 'import pygame\n'), ((2022, 2033), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2030, 2033), False, 'import sys\n'), ((2527, 2546), 'pygame.event.wait', 'pygame.event.wait', ([], {}), '()\n', (2544, 2546), False, 'import pygame\n')]
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Creates TOCO options to process a model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import traceback
import numpy as np
import tensorflow as tf
from tensorflow.lite.testing import zip_test_utils
def toco_options(data_types,
input_arrays,
output_arrays,
shapes,
extra_toco_options=None):
"""Create TOCO options to process a model.
Args:
data_types: input and inference types used by TOCO.
input_arrays: names of the input tensors
output_arrays: name of the output tensors
shapes: shapes of the input tensors
extra_toco_options: additional toco options
Returns:
the options in a string.
"""
if extra_toco_options is None:
extra_toco_options = zip_test_utils.ExtraTocoOptions()
shape_str = ":".join([",".join(str(y) for y in x) for x in shapes if x])
inference_type = "FLOAT"
# TODO(ahentz): if we get multi-input quantization to work we need this
# to change
if data_types[0] == "QUANTIZED_UINT8":
inference_type = "QUANTIZED_UINT8"
s = (" --input_data_types=%s" % ",".join(data_types) +
" --inference_type=%s" % inference_type +
" --input_format=TENSORFLOW_GRAPHDEF" + " --output_format=TFLITE" +
" --input_arrays=%s" % ",".join(input_arrays) +
" --output_arrays=%s" % ",".join(output_arrays))
if shape_str:
s += (" --input_shapes=%s" % shape_str)
if extra_toco_options.drop_control_dependency:
s += " --drop_control_dependency"
if extra_toco_options.allow_custom_ops:
s += " --allow_custom_ops"
if extra_toco_options.rnn_states:
s += (" --rnn_states='" + extra_toco_options.rnn_states + "'")
if extra_toco_options.split_tflite_lstm_inputs is not None:
if extra_toco_options.split_tflite_lstm_inputs:
s += " --split_tflite_lstm_inputs=true"
else:
s += " --split_tflite_lstm_inputs=false"
return s
def toco_convert(options, graph_def, input_tensors, output_tensors, **kwargs):
"""Convert a model's graph def into a tflite model.
NOTE: this currently shells out to the toco binary, but we would like
convert to Python API tooling in the future.
Args:
options: An Options instance.
graph_def: A GraphDef object.
input_tensors: List of input tensor tuples `(name, shape, type)`.
output_tensors: List of output tensors (names).
**kwargs: Extra options to be passed.
Returns:
output tflite model, log_txt from conversion
or None, log_txt if it did not convert properly.
"""
# Convert ophint ops if presented.
graph_def = tf.compat.v1.lite.experimental.convert_op_hints_to_stubs(
graph_def=graph_def)
graph_def_str = graph_def.SerializeToString()
extra_toco_options = kwargs.get("extra_toco_options",
zip_test_utils.ExtraTocoOptions())
test_params = kwargs.get("test_params", {})
input_arrays = [x[0] for x in input_tensors]
data_types = [zip_test_utils.TF_TYPE_INFO[x[2]][1] for x in input_tensors]
if test_params.get("fully_quantize", False):
# Read the input range for the representative dataset from parameters.
min_value, max_value = test_params.get("input_range", (-1, 1))
with tempfile.NamedTemporaryFile() as graphdef_file:
graphdef_file.write(graph_def_str)
graphdef_file.flush()
input_shapes = zip_test_utils.get_input_shapes_map(input_tensors)
converter = tf.compat.v1.lite.TocoConverter.from_frozen_graph(
graphdef_file.name, input_arrays, output_tensors, input_shapes)
def representative_dataset(input_tensors):
calibration_inputs = []
for _, shape, _ in input_tensors:
if shape:
dims = [dim.value for dim in shape.dims]
calibration_inputs.append(
np.random.uniform(min_value, max_value,
tuple(dims)).astype(np.float32))
return calibration_inputs
def representative_dataset_gen():
for _ in range(100):
yield representative_dataset(input_tensors)
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
]
converter.representative_dataset = representative_dataset_gen
if extra_toco_options.inference_input_type:
converter.inference_input_type = (
extra_toco_options.inference_input_type)
if extra_toco_options.inference_output_type:
converter.inference_output_type = (
extra_toco_options.inference_output_type)
try:
tflite_model = converter.convert()
return tflite_model, ""
except Exception as e:
log = "{0}\n{1}".format(str(e), traceback.format_exc())
return None, log
else:
opts = toco_options(
data_types=data_types,
input_arrays=input_arrays,
shapes=[x[1] for x in input_tensors],
output_arrays=output_tensors,
extra_toco_options=extra_toco_options)
with tempfile.NamedTemporaryFile() as graphdef_file, \
tempfile.NamedTemporaryFile() as output_file, \
tempfile.NamedTemporaryFile("w+") as stdout_file:
graphdef_file.write(graph_def_str)
graphdef_file.flush()
# TODO(aselle): Switch this to subprocess at some point.
if options.run_with_flex:
opts += " --enable_select_tf_ops --force_select_tf_ops"
cmd = ("%s --input_file=%s --output_file=%s %s > %s 2>&1" %
(options.toco, graphdef_file.name, output_file.name, opts,
stdout_file.name))
exit_code = os.system(cmd)
log = (
cmd + "exited with code %d" % exit_code + "\n------------------\n" +
stdout_file.read())
return (None if exit_code != 0 else output_file.read()), log
|
[
"tensorflow.lite.testing.zip_test_utils.get_input_shapes_map",
"tempfile.NamedTemporaryFile",
"tensorflow.compat.v1.lite.TocoConverter.from_frozen_graph",
"os.system",
"traceback.format_exc",
"tensorflow.compat.v1.lite.experimental.convert_op_hints_to_stubs",
"tensorflow.lite.testing.zip_test_utils.ExtraTocoOptions"
] |
[((3357, 3434), 'tensorflow.compat.v1.lite.experimental.convert_op_hints_to_stubs', 'tf.compat.v1.lite.experimental.convert_op_hints_to_stubs', ([], {'graph_def': 'graph_def'}), '(graph_def=graph_def)\n', (3413, 3434), True, 'import tensorflow as tf\n'), ((1542, 1575), 'tensorflow.lite.testing.zip_test_utils.ExtraTocoOptions', 'zip_test_utils.ExtraTocoOptions', ([], {}), '()\n', (1573, 1575), False, 'from tensorflow.lite.testing import zip_test_utils\n'), ((3581, 3614), 'tensorflow.lite.testing.zip_test_utils.ExtraTocoOptions', 'zip_test_utils.ExtraTocoOptions', ([], {}), '()\n', (3612, 3614), False, 'from tensorflow.lite.testing import zip_test_utils\n'), ((3986, 4015), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (4013, 4015), False, 'import tempfile\n'), ((4125, 4175), 'tensorflow.lite.testing.zip_test_utils.get_input_shapes_map', 'zip_test_utils.get_input_shapes_map', (['input_tensors'], {}), '(input_tensors)\n', (4160, 4175), False, 'from tensorflow.lite.testing import zip_test_utils\n'), ((4194, 4311), 'tensorflow.compat.v1.lite.TocoConverter.from_frozen_graph', 'tf.compat.v1.lite.TocoConverter.from_frozen_graph', (['graphdef_file.name', 'input_arrays', 'output_tensors', 'input_shapes'], {}), '(graphdef_file.name,\n input_arrays, output_tensors, input_shapes)\n', (4243, 4311), True, 'import tensorflow as tf\n'), ((5746, 5775), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (5773, 5775), False, 'import tempfile\n'), ((5805, 5834), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {}), '()\n', (5832, 5834), False, 'import tempfile\n'), ((5862, 5895), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', (['"""w+"""'], {}), "('w+')\n", (5889, 5895), False, 'import tempfile\n'), ((6330, 6344), 'os.system', 'os.system', (['cmd'], {}), '(cmd)\n', (6339, 6344), False, 'import os\n'), ((5456, 5478), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (5476, 5478), False, 'import traceback\n')]
|
import sys, threading
from queue import Queue
_current_fiber = None
def set_current_fiber(f):
global _current_fiber
_current_fiber = f
def current():
global _current_fiber
if _current_fiber is None:
_current_fiber = _create_main_fiber()
return _current_fiber
class Fiber:
def __init__(self, target=None, args=[], kwargs={}):
def _run():
try:
self._q.get()
self._exc_info = None
set_current_fiber(self)
return target(*args, **kwargs)
except:
self._exc_info = sys.exc_info()
finally:
self._ended = True
parent = self._get_active_parent()
if self._exc_info:
parent._q.put(self._exc_info)
else:
parent._q.put(0)
self._ended = False
self._q = Queue()
self._th = threading.Thread(target=_run, daemon=True)
self.parent = current() # only the root fiber's parent is None
self._th.start()
def _get_active_parent(self):
parent = self.parent
while True:
if parent is not None and not parent._ended:
break
parent = parent.parent
return parent
@classmethod
def current(cls):
return current()
@property
def parent(self):
return self.__dict__.get('parent', None)
@parent.setter
def parent(self, value):
if not isinstance(value, Fiber):
raise TypeError('parent must be a Fiber')
self.__dict__['parent'] = value
def switch(self):
if not self._th.is_alive():
raise Exception('Fiber has ended')
curr = current()
self._q.put(0)
set_current_fiber(self)
x = curr._q.get()
if x != 0:
raise x[1].with_traceback(x[2])
def is_alive(self):
return not self._ended
def __getstate__(self):
raise TypeError('cannot serialize Fiber object')
def _create_main_fiber():
main_fiber = Fiber.__new__(Fiber)
main_fiber.__dict__['parent'] = None
main_fiber.__dict__['_th'] = threading.current_thread()
main_fiber.__dict__['_q'] = Queue()
main_fiber.__dict__['_ended'] = False
return main_fiber
|
[
"threading.Thread",
"threading.current_thread",
"queue.Queue",
"sys.exc_info"
] |
[((2203, 2229), 'threading.current_thread', 'threading.current_thread', ([], {}), '()\n', (2227, 2229), False, 'import sys, threading\n'), ((2262, 2269), 'queue.Queue', 'Queue', ([], {}), '()\n', (2267, 2269), False, 'from queue import Queue\n'), ((922, 929), 'queue.Queue', 'Queue', ([], {}), '()\n', (927, 929), False, 'from queue import Queue\n'), ((949, 991), 'threading.Thread', 'threading.Thread', ([], {'target': '_run', 'daemon': '(True)'}), '(target=_run, daemon=True)\n', (965, 991), False, 'import sys, threading\n'), ((609, 623), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (621, 623), False, 'import sys, threading\n')]
|
#!/usr/bin/env python
#
# Copyright (c) 2015, Linaro Limited
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
def get_args():
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--key', required=True, help='Name of key file')
parser.add_argument('--in', required=True, dest='inf', \
help='Name of in file')
parser.add_argument('--out', required=True, help='Name of out file')
return parser.parse_args()
def main():
from Crypto.Signature import PKCS1_v1_5
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
import struct
args = get_args()
f = open(args.key, 'rb')
key = RSA.importKey(f.read())
f.close()
f = open(args.inf, 'rb')
img = f.read()
f.close()
signer = PKCS1_v1_5.new(key)
h = SHA256.new()
digest_len = h.digest_size
sig_len = len(signer.sign(h))
img_size = len(img)
magic = 0x4f545348 # SHDR_MAGIC
img_type = 0 # SHDR_TA
algo = 0x70004830 # TEE_ALG_RSASSA_PKCS1_V1_5_SHA256
shdr = struct.pack('<IIIIHH', \
magic, img_type, img_size, algo, digest_len, sig_len)
h.update(shdr)
h.update(img)
sig = signer.sign(h)
f = open(args.out, 'wb')
f.write(shdr)
f.write(h.digest())
f.write(sig)
f.write(img)
f.close()
if __name__ == "__main__":
main()
|
[
"Crypto.Hash.SHA256.new",
"Crypto.Signature.PKCS1_v1_5.new",
"argparse.ArgumentParser",
"struct.pack"
] |
[((1428, 1444), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (1442, 1444), False, 'from argparse import ArgumentParser\n'), ((1987, 2006), 'Crypto.Signature.PKCS1_v1_5.new', 'PKCS1_v1_5.new', (['key'], {}), '(key)\n', (2001, 2006), False, 'from Crypto.Signature import PKCS1_v1_5\n'), ((2012, 2024), 'Crypto.Hash.SHA256.new', 'SHA256.new', ([], {}), '()\n', (2022, 2024), False, 'from Crypto.Hash import SHA256\n'), ((2227, 2303), 'struct.pack', 'struct.pack', (['"""<IIIIHH"""', 'magic', 'img_type', 'img_size', 'algo', 'digest_len', 'sig_len'], {}), "('<IIIIHH', magic, img_type, img_size, algo, digest_len, sig_len)\n", (2238, 2303), False, 'import struct\n')]
|
import unittest
import ray
from ray.rllib.agents.registry import get_agent_class
from ray.rllib.examples.env.multi_agent import MultiAgentCartPole, \
MultiAgentMountainCar
from ray.rllib.utils.test_utils import framework_iterator
from ray.tune import register_env
def check_support_multiagent(alg, config):
register_env("multi_agent_mountaincar",
lambda _: MultiAgentMountainCar({"num_agents": 2}))
register_env("multi_agent_cartpole",
lambda _: MultiAgentCartPole({"num_agents": 2}))
config["log_level"] = "ERROR"
for _ in framework_iterator(config, frameworks=("tf", "torch")):
if alg in ["DDPG", "APEX_DDPG", "SAC"]:
a = get_agent_class(alg)(
config=config, env="multi_agent_mountaincar")
else:
a = get_agent_class(alg)(config=config, env="multi_agent_cartpole")
try:
a.train()
finally:
a.stop()
class ModelSupportedSpaces(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=4, ignore_reinit_error=True)
def tearDown(self):
ray.shutdown()
def test_a3c_multiagent(self):
check_support_multiagent("A3C", {
"num_workers": 1,
"optimizer": {
"grads_per_step": 1
}
})
def test_apex_multiagent(self):
check_support_multiagent(
"APEX", {
"num_workers": 2,
"timesteps_per_iteration": 1000,
"num_gpus": 0,
"min_iter_time_s": 1,
"learning_starts": 1000,
"target_network_update_freq": 100,
})
def test_apex_ddpg_multiagent(self):
check_support_multiagent(
"APEX_DDPG", {
"num_workers": 2,
"timesteps_per_iteration": 1000,
"num_gpus": 0,
"min_iter_time_s": 1,
"learning_starts": 1000,
"target_network_update_freq": 100,
"use_state_preprocessor": True,
})
def test_ddpg_multiagent(self):
check_support_multiagent(
"DDPG", {
"timesteps_per_iteration": 1,
"use_state_preprocessor": True,
"learning_starts": 500,
})
def test_dqn_multiagent(self):
check_support_multiagent("DQN", {"timesteps_per_iteration": 1})
def test_impala_multiagent(self):
check_support_multiagent("IMPALA", {"num_gpus": 0})
def test_pg_multiagent(self):
check_support_multiagent("PG", {"num_workers": 1, "optimizer": {}})
def test_ppo_multiagent(self):
check_support_multiagent(
"PPO", {
"num_workers": 1,
"num_sgd_iter": 1,
"train_batch_size": 10,
"rollout_fragment_length": 10,
"sgd_minibatch_size": 1,
})
def test_sac_multiagent(self):
check_support_multiagent("SAC", {
"num_workers": 0,
"normalize_actions": False,
})
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
[
"ray.init",
"ray.rllib.examples.env.multi_agent.MultiAgentCartPole",
"pytest.main",
"ray.rllib.utils.test_utils.framework_iterator",
"ray.shutdown",
"ray.rllib.examples.env.multi_agent.MultiAgentMountainCar",
"ray.rllib.agents.registry.get_agent_class"
] |
[((581, 635), 'ray.rllib.utils.test_utils.framework_iterator', 'framework_iterator', (['config'], {'frameworks': "('tf', 'torch')"}), "(config, frameworks=('tf', 'torch'))\n", (599, 635), False, 'from ray.rllib.utils.test_utils import framework_iterator\n'), ((1030, 1076), 'ray.init', 'ray.init', ([], {'num_cpus': '(4)', 'ignore_reinit_error': '(True)'}), '(num_cpus=4, ignore_reinit_error=True)\n', (1038, 1076), False, 'import ray\n'), ((1110, 1124), 'ray.shutdown', 'ray.shutdown', ([], {}), '()\n', (1122, 1124), False, 'import ray\n'), ((3180, 3209), 'pytest.main', 'pytest.main', (["['-v', __file__]"], {}), "(['-v', __file__])\n", (3191, 3209), False, 'import pytest\n'), ((385, 425), 'ray.rllib.examples.env.multi_agent.MultiAgentMountainCar', 'MultiAgentMountainCar', (["{'num_agents': 2}"], {}), "({'num_agents': 2})\n", (406, 425), False, 'from ray.rllib.examples.env.multi_agent import MultiAgentCartPole, MultiAgentMountainCar\n'), ((495, 532), 'ray.rllib.examples.env.multi_agent.MultiAgentCartPole', 'MultiAgentCartPole', (["{'num_agents': 2}"], {}), "({'num_agents': 2})\n", (513, 532), False, 'from ray.rllib.examples.env.multi_agent import MultiAgentCartPole, MultiAgentMountainCar\n'), ((701, 721), 'ray.rllib.agents.registry.get_agent_class', 'get_agent_class', (['alg'], {}), '(alg)\n', (716, 721), False, 'from ray.rllib.agents.registry import get_agent_class\n'), ((815, 835), 'ray.rllib.agents.registry.get_agent_class', 'get_agent_class', (['alg'], {}), '(alg)\n', (830, 835), False, 'from ray.rllib.agents.registry import get_agent_class\n')]
|
import numpy as np
import pandas as pd
import SimpleITK as sitk
import matplotlib.pyplot as plt
from sklearn import cluster
def show_one(img):
"""
Display a single 2D image without calling plt.show() to open in the browser
:param img: The 2D image to be shown
:return: None
"""
dpi = 40
margin = 0.05
nda = sitk.GetArrayFromImage(img)
spacing = img.GetSpacing()
extent = (0, nda.shape[1] * spacing[1], nda.shape[0] * spacing[0], 0)
figsize = (5, 5)
fig = plt.figure(figsize=figsize, dpi=dpi)
ax = fig.add_axes([margin, margin, 1 - 2 * margin, 1 - 2 * margin])
plt.set_cmap("gray")
ax.imshow(nda, extent=extent, interpolation=None)
def show_all(img, overlay=None, axis='z'):
"""
Take in all images and display them in the browser on any given axis
:param img: The image to be displayed
:param overlay: Any overlay of labels that one might want displayed. Defaults to none
:param axis: The axis in which to graph each image. Defaults to z
:return: None
"""
xlen, ylen, zlen = img.GetSize()
all_images = []
all_overlays = []
if axis == 'z':
all_images = [img[:, :, z] for z in xrange(zlen)]
if overlay:
all_overlays = [overlay[:, :, z] for z in xrange(zlen)]
elif axis == 'y':
all_images = [img[:, y, :] for y in xrange(ylen)]
if overlay:
all_overlays = [overlay[:, y, :] for y in xrange(ylen)]
elif axis == 'x':
all_images = [img[x, :, :] for x in xrange(xlen)]
if overlay:
all_overlays = [overlay[x, :, :] for x in xrange(xlen)]
else:
raise Exception('invalid axis')
for i, image in enumerate(all_images):
if overlay:
show_one(sitk.LabelOverlay(image, all_overlays[i]))
else:
show_one(image)
plt.show()
def make_empty_img_from_img(img, dimensions=3):
"""
Take an exising itk image and create a new, empty image from its dimensions
:param img: The image to find dimensions for
:param dimensions: The number of dimensions in the image
:return: The new image
"""
xlen, ylen, zlen = img.GetSize()
dupe = img[:, :, :]
for x in xrange(xlen):
for y in xrange(ylen):
if dimensions == 3:
for z in xrange(zlen):
dupe.SetPixel(x, y, z, 0)
else:
dupe.SetPixel(x, y, 0)
return dupe
def read_image(path):
"""
Read in a list of dcm images in a given directory
:param path: system path towards the directory
:return: sitk image with the origin reset to 0, 0, 0
"""
reader = sitk.ImageSeriesReader()
dicom_filenames = reader.GetGDCMSeriesFileNames(path)
reader.SetFileNames(dicom_filenames)
reader.LoadPrivateTagsOn()
img = reader.Execute()
img.SetOrigin((0, 0, 0))
return img
def retrieve_overlap(img1, img2, lbl1=1, lbl2=1):
"""
Take in two images of labels and return an image with only the overlap of the labels
:param img1: The first image of labels
:param img2: The second image of labels
:param lbl1: The label to retrieve from the first image
:param lbl2: The label to retrieve from the second image
:return: A new image of labels where overlap exists
"""
xlen, ylen, zlen = img1.GetSize()
# Make sure that our images are equal in size to prevent weird invisible bugs
xlen2, ylen2, zlen2 = img2.GetSize()
assert xlen == xlen2 and ylen == ylen2 and zlen == zlen2
# Copy our image as to not alter the original data
new_image = img1[:, :, :]
for z in xrange(zlen):
for y in xrange(ylen):
for x in xrange(xlen):
# Set any bit with overlap to 1, else set it to 0
overlap = img1.GetPixel(x, y, z) == lbl1 and img2.GetPixel(x, y, z) == lbl2
if overlap:
new_image.SetPixel(x, y, z, 1)
else:
new_image.SetPixel(x, y, z, 0)
return new_image
def get_df_from_img(img, dimensions=3):
"""
Create a pandas dataframe from any given image - useful for statistics operations such as clustering
:param img: The image to be converted into a dataframe
:param dimensions: The number of dimensions of the image - only supports 2D and 3D images at the moment
:return: A pandas dataframe containing the x, y, and z coordinates that exist in the image
"""
if dimensions == 3:
df_dict = {'x': [], 'y': [], 'z': []}
xlen, ylen, zlen = img.GetSize()
for x in xrange(xlen):
for y in xrange(ylen):
for z in xrange(zlen):
if img.GetPixel(x, y, z):
df_dict['x'].append(x)
df_dict['y'].append(y)
df_dict['z'].append(z)
df = pd.DataFrame.from_dict(df_dict)
return df
elif dimensions == 2:
df_dict = {'x': [], 'y': []}
xlen, ylen = img.GetSize()
for x in xrange(xlen):
for y in xrange(ylen):
if img.GetPixel(x, y):
df_dict['x'].append(x)
df_dict['y'].append(y)
df = pd.DataFrame.from_dict(df_dict)
return df
else:
raise Exception('Unsupported number of dimensions')
def update_img_from_df(df, image, keep=0, dimensions=3, colname='label', inside_value=1, outside_value=0):
"""
Take a given dataframe and itk image to be written over and update the image to only contain the labeled coordinates
:param df: The dataframe to read labels from
:param image: The image to be overwritten
:param keep: The label in the dattaframe to keep (since there may be multiple labels, e.g. clustering
:param dimensions: The number of dimensions in the image
:param colname: The name of the column containing the labels
:param inside_value: What to update labeled pixels to
:param outside_value: What to update unlabeled pixels to
:return: None
"""
for index, row in df.iterrows():
if dimensions == 2:
x, y, label = (row['x'], row['y'], row[colname])
if label == keep:
image.SetPixel(x, y, inside_value)
else:
image.SetPixel(x, y, outside_value)
elif dimensions == 3:
x, y, z, label = (row['x'], row['y'], row['z'], row[colname])
if label == keep:
image.SetPixel(x, y, z, inside_value)
else:
image.SetPixel(x, y, z, outside_value)
else:
raise Exception('Unsupported number of dimensions')
def dbscan_filter(img, eps, use_z=True):
df = get_df_from_img(img)
df_new = df
if not use_z:
df_new = df.drop('z', axis=1)
fit = cluster.DBSCAN(eps=eps).fit(df_new)
labels = fit.labels_
df['label'] = pd.Series(labels)
counts = df['label'].value_counts().to_dict()
# Remove all non-clusters
df = df[df.label != -1]
largest_cluster = max(counts.iterkeys(), key=(lambda key: counts[key]))
img_filtered = make_empty_img_from_img(img)
update_img_from_df(df, img_filtered, keep=largest_cluster)
return img_filtered
def kmeans_segment(img, num_segments=2, use_z=True):
df = get_df_from_img(img)
df_new = df
if not use_z:
df_new = df.drop('z', axis=1)
fit = cluster.KMeans(n_clusters=num_segments).fit(df_new)
labels = fit.labels_
df['label'] = pd.Series(labels)
all_images = [make_empty_img_from_img(img) for i in xrange(num_segments)]
x_max = [0 for i in xrange(num_segments)]
for index, row in df.iterrows():
x, y, z, label = (row['x'], row['y'], row['z'], row['label'])
all_images[label].SetPixel(x, y, z, 1)
x_max[label] = max((x_max[label], x))
return all_images, x_max
def count_labels(img):
xlen, ylen = img.GetSize()
count = 0
for x in xrange(xlen):
for y in xrange(ylen):
if img.GetPixel(x, y):
count += 1
return count
def filter_by_label_count(img, threshold):
start = 0
arr = sitk.GetArrayFromImage(img)
end = len(arr)
for z in xrange(end):
img_single = img[:, :, z]
if count_labels(img_single) < threshold:
if z == start:
start += 1
for z in reversed(xrange(end)):
img_single = img[:, :, z]
if count_labels(img_single) < threshold:
if z == end - 1:
end -= 1
return start, end
def main():
"""
Main function of our program. Executes all of the main steps written in our final paper
:return: None
"""
# Directory where the DICOM files are being stored (in this
input_path = './Inputs/valve'
# Original image from the filepath
img_original = read_image(input_path)
# Image with smoothing applied to reduce noise
img_smooth = sitk.CurvatureFlow(image1=img_original, timeStep=0.125, numberOfIterations=10)
# Create labels on our smoothed image for cardiac tissue and tissue with blood
labels_tissue = sitk.BinaryThreshold(image1=img_smooth, lowerThreshold=325, upperThreshold=470, insideValue=1)
labels_blood = sitk.BinaryThreshold(image1=img_smooth, lowerThreshold=450, upperThreshold=800, insideValue=1, outsideValue=0)
# IMPORTANT STEP: essentially, this is the key to our algorithm. By finding the "blood" without cardiac tissue,
# and then using binary hole filling with a fairly large radius, we are able to label a lot of the mitral valve
# area without labeling too much of the other cardiac tissue. Thus, THIS is what lets us single out the mitral
# valve tissue from the rest - all we need is the overlap of the two labels
labels_tissue_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_tissue, radius=[2] * 3, majorityThreshold=1, backgroundValue=0, foregroundValue=1)
labels_blood_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_blood, radius=[4] * 3, majorityThreshold=1, backgroundValue=0, foregroundValue=1)
labels_valve = retrieve_overlap(labels_blood_no_holes, labels_tissue_no_holes)
labels_valve_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_valve, radius=[2] * 3, majorityThreshold=1, backgroundValue=0, foregroundValue=1)
labels_valve_no_holes = sitk.VotingBinaryHoleFilling(image1=labels_valve_no_holes, radius=[1] * 3, majorityThreshold=0, backgroundValue=1, foregroundValue=0)
# Fix intensity scaling on our original smoothed image for pretty diagram purposes
img_smooth = sitk.Cast(sitk.RescaleIntensity(img_smooth), labels_tissue_no_holes.GetPixelID())
# Use a density-based clustering algorithm to attempt to remove as much noise as possible
labels_valve_filtered = dbscan_filter(labels_valve_no_holes, eps=2, use_z=False)
labels_valve_filtered = dbscan_filter(labels_valve_filtered, eps=4)
# Find likely start and end points of our image by setting a mininum number of labeled pixels
start, end = filter_by_label_count(labels_valve_filtered, 10)
img_smooth = img_smooth[:, :, start:end]
labels_valve_filtered = labels_valve_filtered[:, :, start:end]
# Remove all values distant from the center of our starting location by taking advantage of kmeans
df = get_df_from_img(labels_valve_filtered[:, :, 0], dimensions=2)
x_mid = df['x'].mean()
y_mid = df['y'].mean()
df = get_df_from_img(labels_valve_filtered)
distance_df = df.drop('z', axis=1)
distance_df['x_dist'] = abs(distance_df['x'] - x_mid)
distance_df['y_dist'] = abs(distance_df['y'] - y_mid)
fit = cluster.KMeans(n_clusters=2).fit(distance_df.drop(['x', 'y'], axis=1))
labels = fit.labels_
df['label'] = pd.Series(labels)
counts = df['label'].value_counts().to_dict()
largest_cluster = max(counts.iterkeys(), key=(lambda key: counts[key]))
update_img_from_df(df, labels_valve_filtered, keep=largest_cluster)
# Find likely start and end points of our image by setting a mininum number of labeled pixels
start, end = filter_by_label_count(labels_valve_filtered, 10)
img_smooth = img_smooth[:, :, start:end]
labels_valve_filtered = labels_valve_filtered[:, :, start:end]
# Use a segmentation-based clustering algorithm to attempt to find each valve
label_segments, x_max = kmeans_segment(labels_valve_filtered, use_z=False)
left, right = (label_segments[0], label_segments[1])
if x_max[0] > x_max[1]:
left, right = right, left
# Finally, we can simply take the furthest point from the likely start/end points in order to get our annulus
# this can be done by every z value
left_points = {'x': [], 'y': [], 'z': []}
right_points = {'x': [], 'y': [], 'z': []}
zlen = len(sitk.GetArrayFromImage(left))
for z in xrange(zlen):
left_df = get_df_from_img(left[:, :, z], dimensions=2)
if len(left_df['y']) > 0:
index = left_df['y'].idxmin()
row = left_df.iloc[index]
left_points['x'].append(int(row['x']))
left_points['y'].append(int(row['y']))
left_points['z'].append(z)
right_df = get_df_from_img(right[:, :, z], dimensions=2)
if len(right_df['x']) > 0:
index = right_df['x'].idxmax()
row = right_df.iloc[index]
right_points['x'].append(int(row['x']))
right_points['y'].append(int(row['y']))
right_points['z'].append(z)
# These both represent the coordinates of our annulus ring. A simple spline can be used for interpolation between
# points
final_left = pd.DataFrame.from_dict(left_points)
final_right = pd.DataFrame.from_dict(right_points)
print('Coordinates for one side of the ring')
print(final_left)
print('\n\nCoordinates for the other side of the ring')
print(final_right)
final_image = make_empty_img_from_img(left)
x = left_points['x'] + right_points['x']
y = left_points['y'] + right_points['y']
z = left_points['z'] + right_points['z']
for x, y, z in zip(x, y, z):
final_image.SetPixel(x, y, z, 1)
show_all(img_smooth, final_image)
if __name__ == '__main__':
main()
|
[
"SimpleITK.BinaryThreshold",
"matplotlib.pyplot.show",
"pandas.DataFrame.from_dict",
"sklearn.cluster.KMeans",
"SimpleITK.GetArrayFromImage",
"SimpleITK.RescaleIntensity",
"SimpleITK.LabelOverlay",
"matplotlib.pyplot.figure",
"SimpleITK.CurvatureFlow",
"matplotlib.pyplot.set_cmap",
"pandas.Series",
"SimpleITK.VotingBinaryHoleFilling",
"sklearn.cluster.DBSCAN",
"SimpleITK.ImageSeriesReader"
] |
[((342, 369), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img'], {}), '(img)\n', (364, 369), True, 'import SimpleITK as sitk\n'), ((506, 542), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': 'figsize', 'dpi': 'dpi'}), '(figsize=figsize, dpi=dpi)\n', (516, 542), True, 'import matplotlib.pyplot as plt\n'), ((620, 640), 'matplotlib.pyplot.set_cmap', 'plt.set_cmap', (['"""gray"""'], {}), "('gray')\n", (632, 640), True, 'import matplotlib.pyplot as plt\n'), ((1855, 1865), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1863, 1865), True, 'import matplotlib.pyplot as plt\n'), ((2675, 2699), 'SimpleITK.ImageSeriesReader', 'sitk.ImageSeriesReader', ([], {}), '()\n', (2697, 2699), True, 'import SimpleITK as sitk\n'), ((6931, 6948), 'pandas.Series', 'pd.Series', (['labels'], {}), '(labels)\n', (6940, 6948), True, 'import pandas as pd\n'), ((7530, 7547), 'pandas.Series', 'pd.Series', (['labels'], {}), '(labels)\n', (7539, 7547), True, 'import pandas as pd\n'), ((8177, 8204), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img'], {}), '(img)\n', (8199, 8204), True, 'import SimpleITK as sitk\n'), ((8974, 9052), 'SimpleITK.CurvatureFlow', 'sitk.CurvatureFlow', ([], {'image1': 'img_original', 'timeStep': '(0.125)', 'numberOfIterations': '(10)'}), '(image1=img_original, timeStep=0.125, numberOfIterations=10)\n', (8992, 9052), True, 'import SimpleITK as sitk\n'), ((9157, 9256), 'SimpleITK.BinaryThreshold', 'sitk.BinaryThreshold', ([], {'image1': 'img_smooth', 'lowerThreshold': '(325)', 'upperThreshold': '(470)', 'insideValue': '(1)'}), '(image1=img_smooth, lowerThreshold=325, upperThreshold=\n 470, insideValue=1)\n', (9177, 9256), True, 'import SimpleITK as sitk\n'), ((9271, 9386), 'SimpleITK.BinaryThreshold', 'sitk.BinaryThreshold', ([], {'image1': 'img_smooth', 'lowerThreshold': '(450)', 'upperThreshold': '(800)', 'insideValue': '(1)', 'outsideValue': '(0)'}), '(image1=img_smooth, lowerThreshold=450, upperThreshold=\n 800, insideValue=1, outsideValue=0)\n', (9291, 9386), True, 'import SimpleITK as sitk\n'), ((9845, 9974), 'SimpleITK.VotingBinaryHoleFilling', 'sitk.VotingBinaryHoleFilling', ([], {'image1': 'labels_tissue', 'radius': '([2] * 3)', 'majorityThreshold': '(1)', 'backgroundValue': '(0)', 'foregroundValue': '(1)'}), '(image1=labels_tissue, radius=[2] * 3,\n majorityThreshold=1, backgroundValue=0, foregroundValue=1)\n', (9873, 9974), True, 'import SimpleITK as sitk\n'), ((9999, 10127), 'SimpleITK.VotingBinaryHoleFilling', 'sitk.VotingBinaryHoleFilling', ([], {'image1': 'labels_blood', 'radius': '([4] * 3)', 'majorityThreshold': '(1)', 'backgroundValue': '(0)', 'foregroundValue': '(1)'}), '(image1=labels_blood, radius=[4] * 3,\n majorityThreshold=1, backgroundValue=0, foregroundValue=1)\n', (10027, 10127), True, 'import SimpleITK as sitk\n'), ((10235, 10363), 'SimpleITK.VotingBinaryHoleFilling', 'sitk.VotingBinaryHoleFilling', ([], {'image1': 'labels_valve', 'radius': '([2] * 3)', 'majorityThreshold': '(1)', 'backgroundValue': '(0)', 'foregroundValue': '(1)'}), '(image1=labels_valve, radius=[2] * 3,\n majorityThreshold=1, backgroundValue=0, foregroundValue=1)\n', (10263, 10363), True, 'import SimpleITK as sitk\n'), ((10388, 10525), 'SimpleITK.VotingBinaryHoleFilling', 'sitk.VotingBinaryHoleFilling', ([], {'image1': 'labels_valve_no_holes', 'radius': '([1] * 3)', 'majorityThreshold': '(0)', 'backgroundValue': '(1)', 'foregroundValue': '(0)'}), '(image1=labels_valve_no_holes, radius=[1] * 3,\n majorityThreshold=0, backgroundValue=1, foregroundValue=0)\n', (10416, 10525), True, 'import SimpleITK as sitk\n'), ((11794, 11811), 'pandas.Series', 'pd.Series', (['labels'], {}), '(labels)\n', (11803, 11811), True, 'import pandas as pd\n'), ((13685, 13720), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['left_points'], {}), '(left_points)\n', (13707, 13720), True, 'import pandas as pd\n'), ((13739, 13775), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['right_points'], {}), '(right_points)\n', (13761, 13775), True, 'import pandas as pd\n'), ((4898, 4929), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['df_dict'], {}), '(df_dict)\n', (4920, 4929), True, 'import pandas as pd\n'), ((10637, 10670), 'SimpleITK.RescaleIntensity', 'sitk.RescaleIntensity', (['img_smooth'], {}), '(img_smooth)\n', (10658, 10670), True, 'import SimpleITK as sitk\n'), ((12832, 12860), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['left'], {}), '(left)\n', (12854, 12860), True, 'import SimpleITK as sitk\n'), ((5250, 5281), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['df_dict'], {}), '(df_dict)\n', (5272, 5281), True, 'import pandas as pd\n'), ((6852, 6875), 'sklearn.cluster.DBSCAN', 'cluster.DBSCAN', ([], {'eps': 'eps'}), '(eps=eps)\n', (6866, 6875), False, 'from sklearn import cluster\n'), ((7435, 7474), 'sklearn.cluster.KMeans', 'cluster.KMeans', ([], {'n_clusters': 'num_segments'}), '(n_clusters=num_segments)\n', (7449, 7474), False, 'from sklearn import cluster\n'), ((11680, 11708), 'sklearn.cluster.KMeans', 'cluster.KMeans', ([], {'n_clusters': '(2)'}), '(n_clusters=2)\n', (11694, 11708), False, 'from sklearn import cluster\n'), ((1766, 1807), 'SimpleITK.LabelOverlay', 'sitk.LabelOverlay', (['image', 'all_overlays[i]'], {}), '(image, all_overlays[i])\n', (1783, 1807), True, 'import SimpleITK as sitk\n')]
|
import codecs
import os
import re
from setuptools import setup
here = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""Read parts of a file
Taken from pip's setup.py
intentionally *not* adding an encoding option to open
see: https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
"""
return codecs.open(os.path.join(here, *parts), 'r').read()
def find_version(*file_paths):
"""Find version in source file
Read the version number from a source file.
Code taken from pip's setup.py
"""
version_file = read(*file_paths)
# The version line must have the form:
# __version__ = 'ver'
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
long_description = open('README.md').read()
setup(
name='taffmat',
version=find_version('taffmat.py'),
author='<NAME>',
author_email='<EMAIL>',
py_modules=['taffmat'],
url='http://github.com/questrail/taffmat',
license='MIT',
description='Read and write Teac TAFFmat files.',
long_description=long_description,
requires=['numpy (>=1.6.0)'],
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
[
"os.path.join",
"os.path.dirname",
"re.search",
"pypandoc.convert"
] |
[((88, 113), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (103, 113), False, 'import os\n'), ((685, 758), 're.search', 're.search', (['"""^__version__ = [\'\\\\"]([^\'\\\\"]*)[\'\\\\"]"""', 'version_file', 're.M'], {}), '(\'^__version__ = [\\\'\\\\"]([^\\\'\\\\"]*)[\\\'\\\\"]\', version_file, re.M)\n', (694, 758), False, 'import re\n'), ((951, 987), 'pypandoc.convert', 'pypandoc.convert', (['"""README.md"""', '"""rst"""'], {}), "('README.md', 'rst')\n", (967, 987), False, 'import pypandoc\n'), ((359, 385), 'os.path.join', 'os.path.join', (['here', '*parts'], {}), '(here, *parts)\n', (371, 385), False, 'import os\n')]
|
from distutils.core import setup
setup (name = "HDF5Tools",
author="<NAME>",
version='1.0',
description = "Utilities for HDF5 manipulation, requires h5dump to be present on system",
url = "http://cdat.sourceforge.net",
packages = ['HDF5Tools'],
package_dir = {'HDF5Tools': 'Lib'},
)
|
[
"distutils.core.setup"
] |
[((34, 284), 'distutils.core.setup', 'setup', ([], {'name': '"""HDF5Tools"""', 'author': '"""<NAME>"""', 'version': '"""1.0"""', 'description': '"""Utilities for HDF5 manipulation, requires h5dump to be present on system"""', 'url': '"""http://cdat.sourceforge.net"""', 'packages': "['HDF5Tools']", 'package_dir': "{'HDF5Tools': 'Lib'}"}), "(name='HDF5Tools', author='<NAME>', version='1.0', description=\n 'Utilities for HDF5 manipulation, requires h5dump to be present on system',\n url='http://cdat.sourceforge.net', packages=['HDF5Tools'], package_dir=\n {'HDF5Tools': 'Lib'})\n", (39, 284), False, 'from distutils.core import setup\n')]
|
#!/usr/bin/env python
"""Apply standard license headers and generate contributor list.
Rather than trying to maintain file-level lists of contributors and copyright
dates, apply a standard license header that points to the LICENSE file for the
licensing details. And then generate the LICENSE file with a complete list of
contributors based on the git commit history.
To adjust contributor names or combine email addresses, see .mailmap.
See https://github.com/edsu/pymarc/issues/147 for context.
"""
# This file is part of pymarc. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution and at
# https://opensource.org/licenses/BSD-2-Clause. pymarc may be copied, modified,
# propagated, or distributed according to the terms contained in the LICENSE
# file.
import pathlib
import shlex
import subprocess
def get_contributors():
"""Get a complete list of contributors from `git log`."""
# dictionary = add each name only once
contribs = {}
gitargs = shlex.split("git log --use-mailmap --format=short")
log = subprocess.run(gitargs, capture_output=True, encoding="utf-8")
for line in log.stdout.split("\n"):
if line[0 : len("Author: ")] == "Author: ":
contribs[line[len("Author: ") :]] = 1
# Return a list of the contributors
return sorted(contribs)
def generate_license(contribs):
"""Generate a BSD-2 license file that lists contributors."""
bsd2 = """Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
with open("LICENSE", "w") as licensef:
licensef.write(bsd2)
licensef.write(
"Copyright for this project is held by its many contributors, including:\n\n"
)
for contrib in contribs:
licensef.write("{}\n".format(contrib))
def apply_headers():
"""Ensure standard license header is in each Python file."""
header = """# This file is part of pymarc. It is subject to the license terms in the
# LICENSE file found in the top-level directory of this distribution and at
# https://opensource.org/licenses/BSD-2-Clause. pymarc may be copied, modified,
# propagated, or distributed according to the terms contained in the LICENSE
# file.
"""
path = pathlib.Path(".")
for pyfile in list(path.glob("**/*.py")):
if str(pyfile) == "docs/source/conf.py" or str(pyfile) == "test/__init__.py":
continue
with open(pyfile, "r") as reader:
contents = reader.read()
if contents.find(header) == -1:
if str(pyfile) == "test/__init__.py":
# Avoid angering black with a blank line at the end
write_header(pyfile, reader, contents, header)
else:
write_header(pyfile, reader, contents, header + "\n")
def write_header(pyfile, reader, contents, header):
"""Rewrite Python source file with the license header."""
reader.close()
utf8_decl = "# -*- coding: utf-8 -*-\n"
with open(pyfile, "w") as writer:
if contents.startswith("# __init__.py"):
sections = contents.split("\n\n", 1)
writer.write(sections[0])
writer.write("\n\n")
writer.write(header)
writer.write(sections[1])
elif contents.startswith(utf8_decl):
sections = contents.split(utf8_decl, 1)
writer.write(utf8_decl)
writer.write("\n")
writer.write(header)
writer.write(sections[1])
else:
writer.write(header)
writer.write(contents)
if __name__ == "__main__":
generate_license(get_contributors())
apply_headers()
|
[
"subprocess.run",
"pathlib.Path",
"shlex.split"
] |
[((1031, 1082), 'shlex.split', 'shlex.split', (['"""git log --use-mailmap --format=short"""'], {}), "('git log --use-mailmap --format=short')\n", (1042, 1082), False, 'import shlex\n'), ((1093, 1155), 'subprocess.run', 'subprocess.run', (['gitargs'], {'capture_output': '(True)', 'encoding': '"""utf-8"""'}), "(gitargs, capture_output=True, encoding='utf-8')\n", (1107, 1155), False, 'import subprocess\n'), ((3436, 3453), 'pathlib.Path', 'pathlib.Path', (['"""."""'], {}), "('.')\n", (3448, 3453), False, 'import pathlib\n')]
|
#!/usr/bin/python3
import json
import logging as l
import os
import random
import threading
import time
import math
import urllib.error
import urllib.request
display = None
try:
import pygame
pygame.init()
display = pygame.display.set_mode((100, 100))
NOGUI = False
except ImportError:
l.warning("Error while importing pygame; no GUI will be displayed.")
NOGUI = True
except pygame.error:
l.warning("Error while initializing display; no GUI will be displayed.")
NOGUI = True
maxnum = 1
class DisplayManager(threading.Thread):
def __init__(self):
super().__init__()
self.spinner_phase_1 = 0.0
self.spinner_phase_2 = 3.141
self.spinner_color = pygame.Color('white')
self.display = pygame.display.set_mode((800, 600))
self.daemon = True
self.image = pygame.Surface((0, 0))
self.image_to_blit = None
self.clock = pygame.time.Clock()
self.num = 0
self.max = 1
self.start()
def run(self):
while 1:
self.image_to_blit = self.image.convert()
self.image_to_blit.fill(pygame.Color(192, 192, 192, 100), special_flags=pygame.BLEND_MIN)
self.display.fill(pygame.Color('black'))
self.display.blit(self.image_to_blit, (
400 - (self.image_to_blit.get_width() // 2), 300 - (self.image_to_blit.get_height() // 2)))
r = pygame.Rect(0, 0, 100, 100)
r.center = self.display.get_rect().center
pygame.draw.arc(self.display, self.spinner_color, r, self.spinner_phase_1, self.spinner_phase_2, 10)
self.spinner_phase_1 += 0.1
self.spinner_phase_2 = self.spinner_phase_1 + abs(
math.sin(time.time()) * math.pi * max(self.num / self.max, 0.1))
pygame.display.flip()
self.clock.tick(30)
def set_error(self) -> None:
self.spinner_color = pygame.Color('red')
def unset_error(self) -> None:
self.gen_color()
def gen_color(self):
self.spinner_color = pygame.Color(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
def set_val(self, n: int):
self.num = n
self.image = pygame.image.load('{}/img.png'.format(str(n)))
dm = None
if not NOGUI:
dm = DisplayManager()
def download_json(url: str) -> dict:
if not NOGUI:
dm.unset_error()
dm.gen_color()
l.debug("Downloading JSON from {}...".format(url))
r = b''
try:
with urllib.request.urlopen(url, timeout=10) as response:
r = response.read()
except urllib.error.URLError:
l.error("Timeout while trying to connect to {}!".format(url))
try:
r = r.decode('utf-8')
r = json.loads(r)
except UnicodeDecodeError:
l.error("Data received isn't UTF-8 encoded!")
if not NOGUI:
dm.set_error()
except json.decoder.JSONDecodeError:
l.error("Data received isn't JSON!")
if not NOGUI:
dm.set_error()
if not isinstance(r, dict):
raise ValueError
return r
def download(url: str, save_to: str) -> None: # idea swiped from https://stackoverflow.com/a/7244263
if not NOGUI:
dm.unset_error()
dm.gen_color()
l.debug("Downloading file from {} to {}".format(url, save_to))
try:
with urllib.request.urlopen(url, timeout=10) as response, open(save_to, 'wb') as out_file:
data = response.read(1)
while not data == b'':
out_file.write(data)
data = response.read(1)
except urllib.error.URLError:
l.error("Timeout while trying to connect to {}!".format(url))
if not NOGUI:
dm.set_error()
def get_comic(n: int) -> None:
obj = download_json('http://xkcd.com/{}/info.0.json'.format(str(n)))
try:
os.mkdir(str(n))
except FileExistsError:
pass
os.chdir(str(n))
with open('info.json', 'w') as o:
json.dump(obj, o)
download(obj['img'], 'img.png')
os.chdir('..')
print('Downloaded {}/{} ({}% done): {} '.format(str(n), str(maxnum), str(n/maxnum*100), obj['safe_title']))
if not NOGUI:
pygame.display.set_caption(str(obj['num']) + ': ' + obj['safe_title'])
dm.set_val(n)
def download_all() -> None:
obj = download_json('https://xkcd.com/info.0.json')
global maxnum
maxnum = obj['num']
if not NOGUI:
dm.max = obj['num']
for i in range(1, obj['num']):
get_comic(i)
if __name__ == '__main__':
download_all()
|
[
"pygame.draw.arc",
"json.dump",
"logging.error",
"pygame.Surface",
"json.loads",
"random.randint",
"logging.warning",
"pygame.display.set_mode",
"pygame.Color",
"pygame.Rect",
"pygame.init",
"pygame.display.flip",
"time.time",
"pygame.time.Clock",
"os.chdir"
] |
[((202, 215), 'pygame.init', 'pygame.init', ([], {}), '()\n', (213, 215), False, 'import pygame\n'), ((230, 265), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(100, 100)'], {}), '((100, 100))\n', (253, 265), False, 'import pygame\n'), ((4064, 4078), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (4072, 4078), False, 'import os\n'), ((308, 376), 'logging.warning', 'l.warning', (['"""Error while importing pygame; no GUI will be displayed."""'], {}), "('Error while importing pygame; no GUI will be displayed.')\n", (317, 376), True, 'import logging as l\n'), ((419, 491), 'logging.warning', 'l.warning', (['"""Error while initializing display; no GUI will be displayed."""'], {}), "('Error while initializing display; no GUI will be displayed.')\n", (428, 491), True, 'import logging as l\n'), ((715, 736), 'pygame.Color', 'pygame.Color', (['"""white"""'], {}), "('white')\n", (727, 736), False, 'import pygame\n'), ((760, 795), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(800, 600)'], {}), '((800, 600))\n', (783, 795), False, 'import pygame\n'), ((844, 866), 'pygame.Surface', 'pygame.Surface', (['(0, 0)'], {}), '((0, 0))\n', (858, 866), False, 'import pygame\n'), ((922, 941), 'pygame.time.Clock', 'pygame.time.Clock', ([], {}), '()\n', (939, 941), False, 'import pygame\n'), ((1931, 1950), 'pygame.Color', 'pygame.Color', (['"""red"""'], {}), "('red')\n", (1943, 1950), False, 'import pygame\n'), ((2759, 2772), 'json.loads', 'json.loads', (['r'], {}), '(r)\n', (2769, 2772), False, 'import json\n'), ((4006, 4023), 'json.dump', 'json.dump', (['obj', 'o'], {}), '(obj, o)\n', (4015, 4023), False, 'import json\n'), ((1423, 1450), 'pygame.Rect', 'pygame.Rect', (['(0)', '(0)', '(100)', '(100)'], {}), '(0, 0, 100, 100)\n', (1434, 1450), False, 'import pygame\n'), ((1517, 1621), 'pygame.draw.arc', 'pygame.draw.arc', (['self.display', 'self.spinner_color', 'r', 'self.spinner_phase_1', 'self.spinner_phase_2', '(10)'], {}), '(self.display, self.spinner_color, r, self.spinner_phase_1,\n self.spinner_phase_2, 10)\n', (1532, 1621), False, 'import pygame\n'), ((1814, 1835), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (1833, 1835), False, 'import pygame\n'), ((2080, 2102), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (2094, 2102), False, 'import random\n'), ((2104, 2126), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (2118, 2126), False, 'import random\n'), ((2128, 2150), 'random.randint', 'random.randint', (['(0)', '(255)'], {}), '(0, 255)\n', (2142, 2150), False, 'import random\n'), ((2812, 2857), 'logging.error', 'l.error', (['"""Data received isn\'t UTF-8 encoded!"""'], {}), '("Data received isn\'t UTF-8 encoded!")\n', (2819, 2857), True, 'import logging as l\n'), ((2956, 2992), 'logging.error', 'l.error', (['"""Data received isn\'t JSON!"""'], {}), '("Data received isn\'t JSON!")\n', (2963, 2992), True, 'import logging as l\n'), ((1132, 1164), 'pygame.Color', 'pygame.Color', (['(192)', '(192)', '(192)', '(100)'], {}), '(192, 192, 192, 100)\n', (1144, 1164), False, 'import pygame\n'), ((1228, 1249), 'pygame.Color', 'pygame.Color', (['"""black"""'], {}), "('black')\n", (1240, 1249), False, 'import pygame\n'), ((1746, 1757), 'time.time', 'time.time', ([], {}), '()\n', (1755, 1757), False, 'import time\n')]
|
from app.db import db, BaseModelMixin
from flask_jwt_extended import create_access_token
import datetime
class Auth(db.Model, BaseModelMixin):
id = db.Column(db.Integer, primary_key=True)
token = db.Column(db.Text, unique=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
created_at = db.Column(db.DateTime, onupdate=datetime.datetime.now, default=datetime.datetime.now)
def __init__(self, token, user_id):
self.token = token
self.user_id = user_id
def __str__(self):
return f'{self.id}'
def set_token(self, token):
self.token = token
def update_auth(self, data):
return Auth.query.update(preserve_parameter_order=True).where(Auth.id==self.id).values(data)
def get_by_token(token):
return Auth.query.filter_by(token=token).first()
def create_token(self):
access_token = create_access_token(identity=user.username)
|
[
"app.db.db.Column",
"flask_jwt_extended.create_access_token",
"app.db.db.ForeignKey"
] |
[((150, 189), 'app.db.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (159, 189), False, 'from app.db import db, BaseModelMixin\n'), ((199, 230), 'app.db.db.Column', 'db.Column', (['db.Text'], {'unique': '(True)'}), '(db.Text, unique=True)\n', (208, 230), False, 'from app.db import db, BaseModelMixin\n'), ((304, 394), 'app.db.db.Column', 'db.Column', (['db.DateTime'], {'onupdate': 'datetime.datetime.now', 'default': 'datetime.datetime.now'}), '(db.DateTime, onupdate=datetime.datetime.now, default=datetime.\n datetime.now)\n', (313, 394), False, 'from app.db import db, BaseModelMixin\n'), ((264, 288), 'app.db.db.ForeignKey', 'db.ForeignKey', (['"""user.id"""'], {}), "('user.id')\n", (277, 288), False, 'from app.db import db, BaseModelMixin\n'), ((815, 858), 'flask_jwt_extended.create_access_token', 'create_access_token', ([], {'identity': 'user.username'}), '(identity=user.username)\n', (834, 858), False, 'from flask_jwt_extended import create_access_token\n')]
|
import math
from systems.system import System
from scripts.callbacks import *
from scripts.components import *
from scripts import character
from joystick import *
class Movement(System):
def __init__(self, engine):
System.__init__(self, engine)
self.callbacks = {JOY_STICK: self.on_joystick,
JOY_BUTTON_DOWN: self.on_joy_button_down,
UPDATE: self.update}
self.settings = {'friction': True}
self.left_x = 0
self.left_y = 0
self.right_x = 0
self.right_y = 0
def on_joystick(self, stick_id, x, y, dt):
for e in self.engine.entities:
if has_tag(e, PLAYER_TAG):
if stick_id is 0:
if MOVE_SPEED in e:
if ANGLE in e:
rads = (90 - e[ANGLE]) / 57.3
cos = math.cos(rads)
sin = math.sin(rads)
dx = cos * x + sin * y
dy = sin * x - cos * y
else:
dx = x
dy = y
speed = e[MOVE_SPEED] * dt
e[VEL].x += dx * speed
e[VEL].z += dy * speed
elif stick_id is 1:
if ANGLE in e:
e[ANGLE] += x * dt
break
def on_joy_button_down(self, button):
if button is A:
for e in self.engine.entities:
if has_tag(e, PLAYER_TAG):
if has_components(e, (JUMP, VEL, POS)):
if e[POS].y == 0:
e[VEL].y += e[JUMP]
if STATE in e:
e[STATE] = character.JUMPING
def update(self, dt):
for e in self.engine.entities:
if VEL in e:
if DAMPING in e:
d = 1 - (e[DAMPING] * dt)
e[VEL].x *= d
e[VEL].z *= d
if GRAVITY in e:
e[VEL].y -= e[GRAVITY] * dt
if POS in e:
e[POS] += e[VEL]
if e[POS].y <= 0:
e[POS].y = 0
e[VEL].y = 0
if self.settings['friction']:
if FRICTION in e:
f = 1 - (e[FRICTION] * dt)
e[VEL].x *= f
e[VEL].z *= f
if has_components(e, (STATE, ANIMATOR)):
if isinstance(e[ANIMATOR], character.Character):
e[STATE] = character.WALK
|
[
"systems.system.System.__init__",
"math.cos",
"math.sin"
] |
[((232, 261), 'systems.system.System.__init__', 'System.__init__', (['self', 'engine'], {}), '(self, engine)\n', (247, 261), False, 'from systems.system import System\n'), ((907, 921), 'math.cos', 'math.cos', (['rads'], {}), '(rads)\n', (915, 921), False, 'import math\n'), ((956, 970), 'math.sin', 'math.sin', (['rads'], {}), '(rads)\n', (964, 970), False, 'import math\n')]
|
from unittest import TestCase
from picea import Sequence, SequenceReader, SequenceCollection,\
MultipleSequenceAlignment, alphabets
class SequenceTests(TestCase):
def setUp(self):
self.fasta = '>A\nABC\n>B\nDEF'
self.json = (
'[{"header":"A","sequence":"ABC"},'
'{"header":"B","sequence":"DEF"}]'
)
self.rename_func = lambda x: f'{x}.test'
def test_empty_init_sequencereader(self):
self.assertRaises(AssertionError, SequenceReader)
def test_fasta_sequencereader(self):
for _ in SequenceReader(string=self.fasta, filetype='fasta'):
pass
def test_json_sequencereader(self):
for _ in SequenceReader(string=self.json, filetype='json'):
pass
def test_empty_init_sequence(self):
Sequence()
def test_sequence_detect_dna(self):
s = Sequence('test', 'ACGATCGACTCGAACT')
self.assertEqual(s.alphabet, alphabets.DNA)
def test_sequence_detect_aminoacid(self):
s = Sequence('test', 'KUDHLSKJSPOIJKMSLKM')
self.assertEqual(s.alphabet, alphabets.AminoAcid)
def test_empty_init_sequencecollection(self):
SequenceCollection()
def test_empty_init_msa(self):
MultipleSequenceAlignment()
def test_fasta_parsing_sequencecollection(self):
SequenceCollection.from_fasta(string=self.fasta)
def test_fasta_parsing_msa(self):
MultipleSequenceAlignment.from_fasta(string=self.fasta)
def test_fasta_input_output_sequencecollection(self):
seq = SequenceCollection.from_fasta(string=self.fasta)
self.assertEqual(seq.to_fasta(), self.fasta)
def test_fasta_input_output_msa(self):
seq = MultipleSequenceAlignment.from_fasta(string=self.fasta)
self.assertEqual(seq.to_fasta(), self.fasta)
def test_json_parsing_sequencecollection(self):
SequenceCollection.from_json(string=self.json)
def test_json_parsing_msa(self):
MultipleSequenceAlignment.from_json(string=self.json)
def test_trailing_newline_sequencecollection(self):
fasta = f'{self.fasta}\n'
seq = SequenceCollection.from_fasta(string=fasta)
self.assertEqual(seq.to_fasta(), fasta[:-1])
def test_trailing_newline_msa(self):
fasta = f'{self.fasta}\n'
seq = MultipleSequenceAlignment.from_fasta(string=fasta)
self.assertEqual(seq.to_fasta(), fasta[:-1])
def test_sequence_iter_sequencecollection(self):
for _ in SequenceCollection.from_fasta(string=self.fasta):
pass
def test_sequence_iter_msa(self):
for _ in MultipleSequenceAlignment.from_fasta(string=self.fasta):
pass
def test_sequencecollection_pop(self):
seq_col = SequenceCollection.from_fasta(string=self.fasta)
pop_seq = seq_col.pop('A')
self.assertEqual(pop_seq.header, 'A')
self.assertEqual(pop_seq.sequence, 'ABC')
self.assertNotIn('A', seq_col.headers)
self.assertNotIn('ABC', seq_col.sequences)
def test_msa_pop(self):
msa = MultipleSequenceAlignment.from_fasta(string=self.fasta)
pop_seq = msa.pop('A')
self.assertEqual(pop_seq.header, 'A')
self.assertEqual(pop_seq.sequence, 'ABC')
self.assertNotIn('A', msa.headers)
self.assertNotIn('ABC', msa.sequences)
def test_seqcol_batch_rename(self):
seq_col = SequenceCollection.from_fasta(string=self.fasta)
seq_col.batch_rename(self.rename_func)
self.assertEqual(seq_col.headers, ['A.test', 'B.test'])
def test_msa_batch_rename(self):
msa = MultipleSequenceAlignment.from_fasta(string=self.fasta)
msa.batch_rename(self.rename_func)
self.assertEqual(msa.headers, ['A.test', 'B.test'])
def test_iloc(self):
seq_col = SequenceCollection.from_fasta(string=self.fasta)
sub = seq_col.iloc[0]
self.assertEqual(sub.headers, ['A'])
sub_multiple = seq_col.iloc[[0, 1]]
self.assertEqual(sub_multiple.headers, ['A', 'B'])
sub_slice1 = seq_col.iloc[0:2]
self.assertEqual(sub_slice1.headers, ['A', 'B'])
sub_slice2 = seq_col.iloc[1:]
self.assertEqual(sub_slice2.headers, ['B'])
sub_slice3 = seq_col.iloc[:1]
self.assertEqual(sub_slice3.headers, ['A'])
with self.assertRaises(TypeError):
seq_col.iloc['A']
def test_len(self):
seq_col = SequenceCollection.from_fasta(string=self.fasta)
self.assertEqual(len(seq_col), 2)
def test_from_iter(self):
seq_col = SequenceCollection.from_fasta(string=self.fasta)
seq_col2 = SequenceCollection.from_sequence_iter(seq_col)
self.assertEqual(seq_col.headers, seq_col2.headers)
|
[
"picea.Sequence",
"picea.MultipleSequenceAlignment",
"picea.SequenceReader",
"picea.SequenceCollection",
"picea.SequenceCollection.from_json",
"picea.MultipleSequenceAlignment.from_json",
"picea.MultipleSequenceAlignment.from_fasta",
"picea.SequenceCollection.from_sequence_iter",
"picea.SequenceCollection.from_fasta"
] |
[((571, 622), 'picea.SequenceReader', 'SequenceReader', ([], {'string': 'self.fasta', 'filetype': '"""fasta"""'}), "(string=self.fasta, filetype='fasta')\n", (585, 622), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((699, 748), 'picea.SequenceReader', 'SequenceReader', ([], {'string': 'self.json', 'filetype': '"""json"""'}), "(string=self.json, filetype='json')\n", (713, 748), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((816, 826), 'picea.Sequence', 'Sequence', ([], {}), '()\n', (824, 826), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((880, 916), 'picea.Sequence', 'Sequence', (['"""test"""', '"""ACGATCGACTCGAACT"""'], {}), "('test', 'ACGATCGACTCGAACT')\n", (888, 916), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((1028, 1067), 'picea.Sequence', 'Sequence', (['"""test"""', '"""KUDHLSKJSPOIJKMSLKM"""'], {}), "('test', 'KUDHLSKJSPOIJKMSLKM')\n", (1036, 1067), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((1185, 1205), 'picea.SequenceCollection', 'SequenceCollection', ([], {}), '()\n', (1203, 1205), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((1250, 1277), 'picea.MultipleSequenceAlignment', 'MultipleSequenceAlignment', ([], {}), '()\n', (1275, 1277), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((1340, 1388), 'picea.SequenceCollection.from_fasta', 'SequenceCollection.from_fasta', ([], {'string': 'self.fasta'}), '(string=self.fasta)\n', (1369, 1388), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((1436, 1491), 'picea.MultipleSequenceAlignment.from_fasta', 'MultipleSequenceAlignment.from_fasta', ([], {'string': 'self.fasta'}), '(string=self.fasta)\n', (1472, 1491), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((1565, 1613), 'picea.SequenceCollection.from_fasta', 'SequenceCollection.from_fasta', ([], {'string': 'self.fasta'}), '(string=self.fasta)\n', (1594, 1613), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((1725, 1780), 'picea.MultipleSequenceAlignment.from_fasta', 'MultipleSequenceAlignment.from_fasta', ([], {'string': 'self.fasta'}), '(string=self.fasta)\n', (1761, 1780), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((1895, 1941), 'picea.SequenceCollection.from_json', 'SequenceCollection.from_json', ([], {'string': 'self.json'}), '(string=self.json)\n', (1923, 1941), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((1988, 2041), 'picea.MultipleSequenceAlignment.from_json', 'MultipleSequenceAlignment.from_json', ([], {'string': 'self.json'}), '(string=self.json)\n', (2023, 2041), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((2147, 2190), 'picea.SequenceCollection.from_fasta', 'SequenceCollection.from_fasta', ([], {'string': 'fasta'}), '(string=fasta)\n', (2176, 2190), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((2334, 2384), 'picea.MultipleSequenceAlignment.from_fasta', 'MultipleSequenceAlignment.from_fasta', ([], {'string': 'fasta'}), '(string=fasta)\n', (2370, 2384), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((2509, 2557), 'picea.SequenceCollection.from_fasta', 'SequenceCollection.from_fasta', ([], {'string': 'self.fasta'}), '(string=self.fasta)\n', (2538, 2557), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((2632, 2687), 'picea.MultipleSequenceAlignment.from_fasta', 'MultipleSequenceAlignment.from_fasta', ([], {'string': 'self.fasta'}), '(string=self.fasta)\n', (2668, 2687), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((2768, 2816), 'picea.SequenceCollection.from_fasta', 'SequenceCollection.from_fasta', ([], {'string': 'self.fasta'}), '(string=self.fasta)\n', (2797, 2816), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((3089, 3144), 'picea.MultipleSequenceAlignment.from_fasta', 'MultipleSequenceAlignment.from_fasta', ([], {'string': 'self.fasta'}), '(string=self.fasta)\n', (3125, 3144), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((3421, 3469), 'picea.SequenceCollection.from_fasta', 'SequenceCollection.from_fasta', ([], {'string': 'self.fasta'}), '(string=self.fasta)\n', (3450, 3469), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((3633, 3688), 'picea.MultipleSequenceAlignment.from_fasta', 'MultipleSequenceAlignment.from_fasta', ([], {'string': 'self.fasta'}), '(string=self.fasta)\n', (3669, 3688), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((3836, 3884), 'picea.SequenceCollection.from_fasta', 'SequenceCollection.from_fasta', ([], {'string': 'self.fasta'}), '(string=self.fasta)\n', (3865, 3884), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((4455, 4503), 'picea.SequenceCollection.from_fasta', 'SequenceCollection.from_fasta', ([], {'string': 'self.fasta'}), '(string=self.fasta)\n', (4484, 4503), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((4595, 4643), 'picea.SequenceCollection.from_fasta', 'SequenceCollection.from_fasta', ([], {'string': 'self.fasta'}), '(string=self.fasta)\n', (4624, 4643), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n'), ((4663, 4709), 'picea.SequenceCollection.from_sequence_iter', 'SequenceCollection.from_sequence_iter', (['seq_col'], {}), '(seq_col)\n', (4700, 4709), False, 'from picea import Sequence, SequenceReader, SequenceCollection, MultipleSequenceAlignment, alphabets\n')]
|
from collections import deque
from itertools import count, tee
import time as ttime
from event_model import DocumentNames
from .log import doc_logger
from .utils import (
new_uid,
IllegalMessageSequence,
_rearrange_into_parallel_dicts,
short_uid,
Msg,
)
class RunBundler:
def __init__(self, md, record_interruptions, emit, emit_sync, log, *, loop):
# state stolen from the RE
self.bundling = False # if we are in the middle of bundling readings
self._bundle_name = None # name given to event descriptor
self._run_start_uid = None # The (future) runstart uid
self._objs_read = deque() # objects read in one Event
self._read_cache = deque() # cache of obj.read() in one Event
self._asset_docs_cache = deque() # cache of obj.collect_asset_docs()
self._describe_cache = dict() # cache of all obj.describe() output
self._config_desc_cache = dict() # " obj.describe_configuration()
self._config_values_cache = dict() # " obj.read_configuration() values
self._config_ts_cache = dict() # " obj.read_configuration() timestamps
self._descriptors = dict() # cache of {name: (objs_frozen_set, doc)}
self._sequence_counters = dict() # a seq_num counter per stream
self._teed_sequence_counters = dict() # for if we redo data-points
self._monitor_params = dict() # cache of {obj: (cb, kwargs)}
self.run_is_open = False
self._uncollected = set() # objects after kickoff(), before collect()
# we expect the RE to take care of the composition
self._md = md
# this is state on the RE, mirror it here rather than refer to
# the parent
self.record_interruptions = record_interruptions
# this is RE.emit, but lifted to this context
self.emit = emit
self.emit_sync = emit_sync
self.log = log
self.loop = loop
async def open_run(self, msg):
self.run_is_open = True
self._run_start_uid = new_uid()
self._interruptions_desc_uid = None # uid for a special Event Desc.
self._interruptions_counter = count(1) # seq_num, special Event stream
doc = dict(uid=self._run_start_uid, time=ttime.time(), **self._md)
await self.emit(DocumentNames.start, doc)
doc_logger.debug("[start] document is emitted (run_uid=%r)", self._run_start_uid,
extra={'doc_name': 'start',
'run_uid': self._run_start_uid})
await self.reset_checkpoint_state_coro()
# Emit an Event Descriptor for recording any interruptions as Events.
if self.record_interruptions:
self._interruptions_desc_uid = new_uid()
dk = {"dtype": "string", "shape": [], "source": "RunEngine"}
interruptions_desc = dict(
time=ttime.time(),
uid=self._interruptions_desc_uid,
name="interruptions",
data_keys={"interruption": dk},
run_start=self._run_start_uid,
)
await self.emit(DocumentNames.descriptor, interruptions_desc)
return self._run_start_uid
async def close_run(self, msg):
"""Instruct the RunEngine to write the RunStop document
Expected message object is::
Msg('close_run', None, exit_status=None, reason=None)
if *exit_stats* and *reason* are not provided, use the values
stashed on the RE.
"""
if not self.run_is_open:
raise IllegalMessageSequence(
"A 'close_run' message was received but there is no run "
"open. If this occurred after a pause/resume, add "
"a 'checkpoint' message after the 'close_run' message."
)
self.log.debug("Stopping run %r", self._run_start_uid)
# Clear any uncleared monitoring callbacks.
for obj, (cb, kwargs) in list(self._monitor_params.items()):
obj.clear_sub(cb)
del self._monitor_params[obj]
# Count the number of Events in each stream.
num_events = {}
for bundle_name, counter in self._sequence_counters.items():
if bundle_name is None:
# rare but possible via Msg('create', name='primary')
continue
num_events[bundle_name] = next(counter) - 1
reason = msg.kwargs.get("reason", None)
if reason is None:
reason = ""
exit_status = msg.kwargs.get("exit_status", "success") or "success"
doc = dict(
run_start=self._run_start_uid,
time=ttime.time(),
uid=new_uid(),
exit_status=exit_status,
reason=reason,
num_events=num_events,
)
await self.emit(DocumentNames.stop, doc)
doc_logger.debug("[stop] document is emitted (run_uid=%r)", self._run_start_uid,
extra={'doc_name': 'stop',
'run_uid': self._run_start_uid})
await self.reset_checkpoint_state_coro()
self.run_is_open = False
return doc["run_start"]
async def create(self, msg):
"""Trigger the run engine to start bundling future obj.read() calls for
an Event document
Expected message object is::
Msg('create', None, name='primary')
Msg('create', name='primary')
Note that the `name` kwarg will be the 'name' field of the resulting
descriptor. So descriptor['name'] = msg.kwargs['name'].
Also note that changing the 'name' of the Event will create a new
Descriptor document.
"""
if self.bundling:
raise IllegalMessageSequence(
"A second 'create' message is not "
"allowed until the current event "
"bundle is closed with a 'save' or "
"'drop' message."
)
self._read_cache.clear()
self._asset_docs_cache.clear()
self._objs_read.clear()
self.bundling = True
command, obj, args, kwargs, _ = msg
try:
self._bundle_name = kwargs["name"]
except KeyError:
try:
self._bundle_name, = args
except ValueError:
raise ValueError(
"Msg('create') now requires a stream name, given as "
"Msg('create', name) or Msg('create', name=name)"
) from None
async def read(self, msg, reading):
"""
Add a reading to the open event bundle.
Expected message object is::
Msg('read', obj)
"""
if self.bundling:
obj = msg.obj
# if the object is not in the _describe_cache, cache it
if obj not in self._describe_cache:
# Validate that there is no data key name collision.
data_keys = obj.describe()
self._describe_cache[obj] = data_keys
self._config_desc_cache[obj] = obj.describe_configuration()
self._cache_config(obj)
# check that current read collides with nothing else in
# current event
cur_keys = set(self._describe_cache[obj].keys())
for read_obj in self._objs_read:
# that is, field names
known_keys = self._describe_cache[read_obj].keys()
if set(known_keys) & cur_keys:
raise ValueError(
f"Data keys (field names) from {obj!r} "
f"collide with those from {read_obj!r}. "
f"The colliding keys are {set(known_keys) & cur_keys}"
)
# add this object to the cache of things we have read
self._objs_read.append(obj)
# Stash the results, which will be emitted the next time _save is
# called --- or never emitted if _drop is called instead.
self._read_cache.append(reading)
# Ask the object for any resource or datum documents is has cached
# and cache them as well. Likewise, these will be emitted if and
# when _save is called.
if hasattr(obj, "collect_asset_docs"):
self._asset_docs_cache.extend(
obj.collect_asset_docs(*msg.args, **msg.kwargs)
)
return reading
def _cache_config(self, obj):
"Read the object's configuration and cache it."
config_values = {}
config_ts = {}
for key, val in obj.read_configuration().items():
config_values[key] = val["value"]
config_ts[key] = val["timestamp"]
self._config_values_cache[obj] = config_values
self._config_ts_cache[obj] = config_ts
async def monitor(self, msg):
"""
Monitor a signal. Emit event documents asynchronously.
A descriptor document is emitted immediately. Then, a closure is
defined that emits Event documents associated with that descriptor
from a separate thread. This process is not related to the main
bundling process (create/read/save).
Expected message object is::
Msg('monitor', obj, **kwargs)
Msg('monitor', obj, name='event-stream-name', **kwargs)
where kwargs are passed through to ``obj.subscribe()``
"""
obj = msg.obj
if msg.args:
raise ValueError(
"The 'monitor' Msg does not accept positional " "arguments."
)
kwargs = dict(msg.kwargs)
name = kwargs.pop("name", short_uid("monitor"))
if obj in self._monitor_params:
raise IllegalMessageSequence(
"A 'monitor' message was sent for {}"
"which is already monitored".format(obj)
)
descriptor_uid = new_uid()
data_keys = obj.describe()
config = {obj.name: {"data": {}, "timestamps": {}}}
config[obj.name]["data_keys"] = obj.describe_configuration()
for key, val in obj.read_configuration().items():
config[obj.name]["data"][key] = val["value"]
config[obj.name]["timestamps"][key] = val["timestamp"]
object_keys = {obj.name: list(data_keys)}
hints = {}
if hasattr(obj, "hints"):
hints.update({obj.name: obj.hints})
desc_doc = dict(
run_start=self._run_start_uid,
time=ttime.time(),
data_keys=data_keys,
uid=descriptor_uid,
configuration=config,
hints=hints,
name=name,
object_keys=object_keys,
)
doc_logger.debug("[descriptor] document is emitted with name %r containing "
"data keys %r (run_uid=%r)", name, data_keys.keys(),
self._run_start_uid,
extra={'doc_name': 'descriptor',
'run_uid': self._run_start_uid,
'data_keys': data_keys.keys()})
seq_num_counter = count(1)
def emit_event(*args, **kwargs):
# Ignore the inputs. Use this call as a signal to call read on the
# object, a crude way to be sure we get all the info we need.
data, timestamps = _rearrange_into_parallel_dicts(obj.read())
doc = dict(
descriptor=descriptor_uid,
time=ttime.time(),
data=data,
timestamps=timestamps,
seq_num=next(seq_num_counter),
uid=new_uid(),
)
self.emit_sync(DocumentNames.event, doc)
self._monitor_params[obj] = emit_event, kwargs
await self.emit(DocumentNames.descriptor, desc_doc)
obj.subscribe(emit_event, **kwargs)
def record_interruption(self, content):
"""
Emit an event in the 'interruptions' event stream.
If we are not inside a run or if self.record_interruptions is False,
nothing is done.
"""
if self._interruptions_desc_uid is not None:
# We are inside a run and self.record_interruptions is True.
doc = dict(
descriptor=self._interruptions_desc_uid,
time=ttime.time(),
uid=new_uid(),
seq_num=next(self._interruptions_counter),
data={"interruption": content},
timestamps={"interruption": ttime.time()},
)
self.emit_sync(DocumentNames.event, doc)
def rewind(self):
self._sequence_counters.clear()
self._sequence_counters.update(self._teed_sequence_counters)
# This is needed to 'cancel' an open bundling (e.g. create) if
# the pause happens after a 'checkpoint', after a 'create', but
# before the paired 'save'.
self.bundling = False
async def unmonitor(self, msg):
"""
Stop monitoring; i.e., remove the callback emitting event documents.
Expected message object is::
Msg('unmonitor', obj)
"""
obj = msg.obj
if obj not in self._monitor_params:
raise IllegalMessageSequence(
f"Cannot 'unmonitor' {obj}; it is not " "being monitored."
)
cb, kwargs = self._monitor_params[obj]
obj.clear_sub(cb)
del self._monitor_params[obj]
await self.reset_checkpoint_state_coro()
async def save(self, msg):
"""Save the event that is currently being bundled
Create and emit an Event document containing the data read from devices
in self._objs_read. Emit any Resource and Datum documents cached by
those devices before emitting the Event document. If this is the first
Event of its stream then create and emit the Event Descriptor document
before emitting Resource, Datum, and Event documents.
Expected message object is::
Msg('save')
"""
if not self.bundling:
raise IllegalMessageSequence(
"A 'create' message must be sent, to "
"open an event bundle, before that "
"bundle can be saved with 'save'."
)
# Short-circuit if nothing has been read. (Do not create empty Events.)
if not self._objs_read:
self.bundling = False
self._bundle_name = None
return
# The Event Descriptor is uniquely defined by the set of objects
# read in this Event grouping.
objs_read = frozenset(self._objs_read)
# Event Descriptor key
desc_key = self._bundle_name
# This is a separate check because it can be reset on resume.
seq_num_key = desc_key
if seq_num_key not in self._sequence_counters:
counter = count(1)
counter_copy1, counter_copy2 = tee(counter)
self._sequence_counters[seq_num_key] = counter_copy1
self._teed_sequence_counters[seq_num_key] = counter_copy2
self.bundling = False
self._bundle_name = None
d_objs, descriptor_doc = self._descriptors.get(desc_key, (None, None))
if d_objs is not None and d_objs != objs_read:
raise RuntimeError(
"Mismatched objects read, expected {!s}, "
"got {!s}".format(d_objs, objs_read)
)
if descriptor_doc is None:
# We do not have an Event Descriptor for this set
# so one must be created.
data_keys = {}
config = {}
object_keys = {}
hints = {}
for obj in objs_read:
dks = self._describe_cache[obj]
obj_name = obj.name
# dks is an OrderedDict. Record that order as a list.
object_keys[obj.name] = list(dks)
for field, dk in dks.items():
dk["object_name"] = obj_name
data_keys.update(dks)
config[obj_name] = {}
config[obj_name]["data"] = self._config_values_cache[obj]
config[obj_name]["timestamps"] = self._config_ts_cache[obj]
config[obj_name]["data_keys"] = self._config_desc_cache[obj]
if hasattr(obj, "hints"):
hints[obj_name] = obj.hints
descriptor_uid = new_uid()
descriptor_doc = dict(
run_start=self._run_start_uid,
time=ttime.time(),
data_keys=data_keys,
uid=descriptor_uid,
configuration=config,
name=desc_key,
hints=hints,
object_keys=object_keys,
)
await self.emit(DocumentNames.descriptor, descriptor_doc)
doc_logger.debug(
"[descriptor] document emitted with name %r containing "
"data keys %r (run_uid=%r)",
obj_name,
data_keys.keys(),
self._run_start_uid,
extra={
'doc_name': 'descriptor',
'run_uid': self._run_start_uid,
'data_keys': data_keys.keys()}
)
self._descriptors[desc_key] = (objs_read, descriptor_doc)
descriptor_uid = descriptor_doc["uid"]
# Resource and Datum documents
for resource_or_datum_name, resource_or_datum_doc in self._asset_docs_cache:
# Add a 'run_start' field to resource documents on their way out
# since this field could not have been set correctly before this point.
if resource_or_datum_name == "resource":
resource_or_datum_doc["run_start"] = self._run_start_uid
doc_logger.debug(
"[%s] document emitted %r",
resource_or_datum_name,
resource_or_datum_doc,
extra={
"doc_name": resource_or_datum_name,
"run_uid": self._run_start_uid,
"doc": resource_or_datum_doc
}
)
await self.emit(
DocumentNames(resource_or_datum_name),
resource_or_datum_doc
)
# Event document
seq_num = next(self._sequence_counters[seq_num_key])
event_uid = new_uid()
# Merge list of readings into single dict.
readings = {k: v for d in self._read_cache for k, v in d.items()}
data, timestamps = _rearrange_into_parallel_dicts(readings)
# Mark all externally-stored data as not filled so that consumers
# know that the corresponding data are identifiers, not dereferenced
# data.
filled = {
k: False
for k, v in self._descriptors[desc_key][1]["data_keys"].items()
if "external" in v
}
event_doc = dict(
descriptor=descriptor_uid,
time=ttime.time(),
data=data,
timestamps=timestamps,
seq_num=seq_num,
uid=event_uid,
filled=filled,
)
await self.emit(DocumentNames.event, event_doc)
doc_logger.debug(
"[event] document emitted with data keys %r (run_uid=%r)",
data.keys(),
self._run_start_uid,
extra={
'doc_name': 'event',
'run_uid': self._run_start_uid,
'data_keys': data.keys()}
)
def clear_monitors(self):
for obj, (cb, kwargs) in list(self._monitor_params.items()):
try:
obj.clear_sub(cb)
except Exception:
self.log.exception("Failed to stop monitoring %r.", obj)
else:
del self._monitor_params[obj]
def reset_checkpoint_state(self):
# Keep a safe separate copy of the sequence counters to use if we
# rewind and retake some data points.
for key, counter in list(self._sequence_counters.items()):
counter_copy1, counter_copy2 = tee(counter)
self._sequence_counters[key] = counter_copy1
self._teed_sequence_counters[key] = counter_copy2
async def reset_checkpoint_state_coro(self):
self.reset_checkpoint_state()
async def suspend_monitors(self):
for obj, (cb, kwargs) in self._monitor_params.items():
obj.clear_sub(cb)
async def restore_monitors(self):
for obj, (cb, kwargs) in self._monitor_params.items():
obj.subscribe(cb, **kwargs)
async def clear_checkpoint(self, msg):
self._teed_sequence_counters.clear()
async def drop(self, msg):
"""Drop the event that is currently being bundled
Expected message object is::
Msg('drop')
"""
if not self.bundling:
raise IllegalMessageSequence(
"A 'create' message must be sent, to "
"open an event bundle, before that "
"bundle can be dropped with 'drop'."
)
self.bundling = False
self._bundle_name = None
self.log.debug("Dropped open event bundle")
async def kickoff(self, msg):
"""Start a flyscan object.
Expected message object is:
If `flyer_object` has a `kickoff` function that takes no arguments::
Msg('kickoff', flyer_object)
Msg('kickoff', flyer_object, group=<name>)
If *flyer_object* has a ``kickoff`` function that takes
``(start, stop, steps)`` as its function arguments::
Msg('kickoff', flyer_object, start, stop, step)
Msg('kickoff', flyer_object, start, stop, step, group=<name>)
"""
self._uncollected.add(msg.obj)
async def complete(self, msg):
"""
Tell a flyer, 'stop collecting, whenever you are ready'.
The flyer returns a status object. Some flyers respond to this
command by stopping collection and returning a finished status
object immediately. Other flyers finish their given course and
finish whenever they finish, irrespective of when this command is
issued.
Expected message object is::
Msg('complete', flyer, group=<GROUP>)
where <GROUP> is a hashable identifier.
"""
...
async def collect(self, msg):
"""
Collect data cached by a flyer and emit documents.
Expect message object is
Msg('collect', collect_obj)
Msg('collect', flyer_object, stream=True, return_payload=False)
"""
collect_obj = msg.obj
if not self.run_is_open:
# sanity check -- 'kickoff' should catch this and make this
# code path impossible
raise IllegalMessageSequence(
"A 'collect' message was sent but no run is open."
)
self._uncollected.discard(collect_obj)
if hasattr(collect_obj, "collect_asset_docs"):
# Resource and Datum documents
for name, doc in collect_obj.collect_asset_docs():
# Add a 'run_start' field to the resource document on its way out.
if name == "resource":
doc["run_start"] = self._run_start_uid
await self.emit(DocumentNames(name), doc)
collect_obj_config = {}
if hasattr(collect_obj, "read_configuration"):
doc_logger.debug("reading configuration from %s", collect_obj)
collect_obj_config[collect_obj.name] = {
"data": {},
"timestamps": {},
"data_keys": collect_obj.describe_configuration()
}
for config_key, config in collect_obj.read_configuration().items():
collect_obj_config[collect_obj.name]["data"][config_key] = config["value"]
collect_obj_config[collect_obj.name]["timestamps"][config_key] = config["timestamp"]
else:
doc_logger.debug("%s has no read_configuration method", collect_obj)
bulk_data = {}
local_descriptors = {} # hashed on objs_read, not (name, objs_read)
# collect_obj.describe_collect() returns a dictionary like this:
# {name_for_desc1: data_keys_for_desc1,
# name_for_desc2: data_keys_for_desc2, ...}
for stream_name, stream_data_keys in collect_obj.describe_collect().items():
if stream_name not in self._descriptors:
# We do not have an Event Descriptor for this set.
descriptor_uid = new_uid()
hints = {}
if hasattr(collect_obj, "hints"):
hints.update({collect_obj.name: collect_obj.hints})
doc = dict(
run_start=self._run_start_uid,
time=ttime.time(),
data_keys=stream_data_keys,
uid=descriptor_uid,
name=stream_name,
configuration=collect_obj_config,
hints=hints,
object_keys={collect_obj.name: list(stream_data_keys)},
)
await self.emit(DocumentNames.descriptor, doc)
doc_logger.debug("[descriptor] document is emitted with name %r "
"containing data keys %r (run_uid=%r)", stream_name,
stream_data_keys.keys(), self._run_start_uid,
extra={'doc_name': 'descriptor',
'run_uid': self._run_start_uid,
'data_keys': stream_data_keys.keys()})
self._descriptors[stream_name] = (stream_data_keys, doc)
self._sequence_counters[stream_name] = count(1)
else:
objs_read, doc = self._descriptors[stream_name]
if stream_data_keys != objs_read:
raise RuntimeError(
"Mismatched objects read, "
"expected {!s}, "
"got {!s}".format(stream_data_keys, objs_read)
)
descriptor_uid = doc["uid"]
local_descriptors[frozenset(stream_data_keys)] = (stream_name, descriptor_uid)
bulk_data[descriptor_uid] = []
# If stream is True, run 'event' subscription per document.
# If stream is False, run 'bulk_events' subscription once.
stream = msg.kwargs.get("stream", False)
# If True, accumulate all the Events in memory and return them at the
# end, providing the plan access to the Events. If False, do not
# accumulate, and return None.
return_payload = msg.kwargs.get('return_payload', True)
payload = []
for ev in collect_obj.collect():
if return_payload:
payload.append(ev)
objs_read = frozenset(ev["data"])
stream_name, descriptor_uid = local_descriptors[objs_read]
seq_num = next(self._sequence_counters[stream_name])
event_uid = new_uid()
reading = ev["data"]
for key in ev["data"]:
reading[key] = reading[key]
ev["data"] = reading
ev["descriptor"] = descriptor_uid
ev["seq_num"] = seq_num
ev["uid"] = event_uid
if stream:
doc_logger.debug("[event] document is emitted with data keys %r (run_uid=%r)",
ev['data'].keys(), self._run_start_uid,
event_uid,
extra={'doc_name': 'event',
'run_uid': self._run_start_uid,
'data_keys': ev['data'].keys()})
await self.emit(DocumentNames.event, ev)
else:
bulk_data[descriptor_uid].append(ev)
if not stream:
await self.emit(DocumentNames.bulk_events, bulk_data)
doc_logger.debug("[bulk events] document is emitted for descriptors (run_uid=%r)",
self._run_start_uid,
extra={'doc_name': 'bulk_events',
'run_uid': self._run_start_uid})
if return_payload:
return payload
async def backstop_collect(self):
for obj in list(self._uncollected):
try:
await self.collect(Msg("collect", obj))
except Exception:
self.log.exception("Failed to collect %r.", obj)
async def configure(self, msg):
"""Configure an object
Expected message object is ::
Msg('configure', object, *args, **kwargs)
which results in this call ::
object.configure(*args, **kwargs)
"""
obj = msg.obj
# Invalidate any event descriptors that include this object.
# New event descriptors, with this new configuration, will
# be created for any future event documents.
for name in list(self._descriptors):
obj_set, _ = self._descriptors[name]
if obj in obj_set:
del self._descriptors[name]
self._cache_config(obj)
|
[
"itertools.count",
"time.time",
"event_model.DocumentNames",
"itertools.tee",
"collections.deque"
] |
[((645, 652), 'collections.deque', 'deque', ([], {}), '()\n', (650, 652), False, 'from collections import deque\n'), ((709, 716), 'collections.deque', 'deque', ([], {}), '()\n', (714, 716), False, 'from collections import deque\n'), ((786, 793), 'collections.deque', 'deque', ([], {}), '()\n', (791, 793), False, 'from collections import deque\n'), ((2167, 2175), 'itertools.count', 'count', (['(1)'], {}), '(1)\n', (2172, 2175), False, 'from itertools import count, tee\n'), ((11216, 11224), 'itertools.count', 'count', (['(1)'], {}), '(1)\n', (11221, 11224), False, 'from itertools import count, tee\n'), ((15010, 15018), 'itertools.count', 'count', (['(1)'], {}), '(1)\n', (15015, 15018), False, 'from itertools import count, tee\n'), ((15062, 15074), 'itertools.tee', 'tee', (['counter'], {}), '(counter)\n', (15065, 15074), False, 'from itertools import count, tee\n'), ((20285, 20297), 'itertools.tee', 'tee', (['counter'], {}), '(counter)\n', (20288, 20297), False, 'from itertools import count, tee\n'), ((2259, 2271), 'time.time', 'ttime.time', ([], {}), '()\n', (2269, 2271), True, 'import time as ttime\n'), ((4680, 4692), 'time.time', 'ttime.time', ([], {}), '()\n', (4690, 4692), True, 'import time as ttime\n'), ((10587, 10599), 'time.time', 'ttime.time', ([], {}), '()\n', (10597, 10599), True, 'import time as ttime\n'), ((19164, 19176), 'time.time', 'ttime.time', ([], {}), '()\n', (19174, 19176), True, 'import time as ttime\n'), ((26066, 26074), 'itertools.count', 'count', (['(1)'], {}), '(1)\n', (26071, 26074), False, 'from itertools import count, tee\n'), ((2895, 2907), 'time.time', 'ttime.time', ([], {}), '()\n', (2905, 2907), True, 'import time as ttime\n'), ((11582, 11594), 'time.time', 'ttime.time', ([], {}), '()\n', (11592, 11594), True, 'import time as ttime\n'), ((12426, 12438), 'time.time', 'ttime.time', ([], {}), '()\n', (12436, 12438), True, 'import time as ttime\n'), ((16672, 16684), 'time.time', 'ttime.time', ([], {}), '()\n', (16682, 16684), True, 'import time as ttime\n'), ((18357, 18394), 'event_model.DocumentNames', 'DocumentNames', (['resource_or_datum_name'], {}), '(resource_or_datum_name)\n', (18370, 18394), False, 'from event_model import DocumentNames\n'), ((12622, 12634), 'time.time', 'ttime.time', ([], {}), '()\n', (12632, 12634), True, 'import time as ttime\n'), ((23555, 23574), 'event_model.DocumentNames', 'DocumentNames', (['name'], {}), '(name)\n', (23568, 23574), False, 'from event_model import DocumentNames\n'), ((25090, 25102), 'time.time', 'ttime.time', ([], {}), '()\n', (25100, 25102), True, 'import time as ttime\n')]
|
import streamlit as st
from collections import defaultdict
from kafka import KafkaConsumer
from json import loads
import time
import numpy as np
from datetime import datetime
import matplotlib.pyplot as plt
import plotly.graph_objects as go
import plotly.express as px
import pandas as pd
import PIL
from PIL import Image
import streamlit.components.v1 as components
import os
import tweepy
import logging
import sys
from collections import deque
from geopy.geocoders import Nominatim
import threading
import pickle
# Streamlit layout CSS
st.markdown(
f"""
<style>
.reportview-container .main .block-container{{
max-width: 100vw;
padding-top: 1rem;
padding-right: 1rem;
padding-left: 1rem;
padding-bottom: 1rem;
}}
.reportview-container .main {{
color: black;
background-color: white;
}}
</style>
""",
unsafe_allow_html=True,
)
# Lambdas and Constants
def normalize(x): return (x - np.mean(x) + np.finfo(x.dtype).eps) / (np.std(x) + np.finfo(x.dtype).eps)
def timestamp_seconds(x): return datetime.fromisoformat(x).timestamp()
wave_dict = defaultdict(list)
pick_dict = defaultdict(list)
event_dict = defaultdict(dict)
EVENT_MIN_GAP = 5
WINDOW_LENGTH = 100
WINDOW_NUMBER = 60
HOP_LENGTH = 10
REFRESH_SEC = 1.0
MAP_WIDTH = 900
MAP_HEIGHT = 650
MAP_ZOOM = 9
BOT_MAGNITUDE_THRESHOLD = 1.5
GEOLOC_TOUT = 5 # in seconds
I_MADE_A_TWEET = False
dt = 0.01
prev_event_bundle = None
prev_event_bundle = (0.0, 0.0, 0.0, 0.0)
CONFIG_PKL = "config_hawaii.pkl"
STATION_CSV = "stations_hawaii.csv"
with open(CONFIG_PKL, "rb") as fp:
CONFIG = pickle.load(fp)
STATIONS = pd.read_csv(STATION_CSV, delimiter="\t")
STATIONS = STATIONS.rename(columns={"station":"id"})
NUM_STATION = len(STATIONS)
consumer = None
# Connection to Kafka
try:
print('Connecting to k8s kafka')
BROKER_URL = 'quakeflow-kafka:9092'
consumer = KafkaConsumer(
bootstrap_servers=[BROKER_URL],
auto_offset_reset='earliest',
enable_auto_commit=True,
key_deserializer=lambda x: loads(x.decode('utf-8')),
value_deserializer=lambda x: loads(x.decode('utf-8'))
)
print('k8s kafka connection success!')
consumer.subscribe(['waveform_raw', 'phasenet_picks', 'gmma_events'])
except BaseException:
print('k8s Kafka connection error')
try:
print('Connecting to local kafka')
BROKER_URL = 'localhost:9092'
consumer = KafkaConsumer(
bootstrap_servers=[BROKER_URL],
auto_offset_reset='earliest',
enable_auto_commit=True,
key_deserializer=lambda x: loads(x.decode('utf-8')),
value_deserializer=lambda x: loads(x.decode('utf-8'))
)
print('local kafka connection success!')
consumer.subscribe(['waveform_raw', 'phasenet_picks', 'gmma_events'])
except BaseException:
print('local Kafka connection error')
if not consumer:
print('No kafka server found!')
# Setting up Tweepy
consumer_key = os.getenv('CONSUMER_KEY')
consumer_secret = os.getenv('CONSUMER_SECRET')
access_token = os.getenv('ACCESS_TOKEN')
access_token_secret = os.getenv('ACCESS_TOKEN_SECRET')
print(consumer_key)
print(consumer_secret)
print(access_token)
print(access_token_secret)
logger = logging.getLogger()
def create_api():
consumer_key = os.getenv("CONSUMER_KEY")
consumer_secret = os.getenv("CONSUMER_SECRET")
access_token = os.getenv("ACCESS_TOKEN")
access_token_secret = os.getenv("ACCESS_TOKEN_SECRET")
if not consumer_key:
return
try:
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, wait_on_rate_limit=True,
wait_on_rate_limit_notify=True)
api.verify_credentials()
logger.info("API created")
return api
except Exception as e:
logger.error("Error creating API", exc_info=True)
return None
api = create_api()
# Functions
def latlon2address(lat, lon, geolocator):
try:
location = geolocator.reverse(f"{lat}, {lon}")
print(location)
return location.address
except BaseException:
return None
geolocator = Nominatim(user_agent="https", timeout=5)
def update_figure_layout(figure):
figure.update_layout(
mapbox_style="white-bg",
mapbox_layers=[
{
"below": 'traces',
"sourcetype": "raster",
"sourceattribution": "United States Geological Survey",
"source": [
"https://basemap.nationalmap.gov/arcgis/rest/services/USGSImageryOnly/MapServer/tile/{z}/{y}/{x}"
]
}
])
figure.update_layout(
showlegend=True,
width=MAP_WIDTH,
height=MAP_HEIGHT,
geo=dict(
landcolor='rgb(217, 217, 217)',
lonaxis=dict(
showgrid=True,
gridwidth=0.05,
range=CONFIG["xlim_degree"],
dtick=5
),
lataxis=dict(
showgrid=True,
gridwidth=0.05,
range=CONFIG["ylim_degree"],
dtick=5
)
),
)
figure.update_layout(margin={"r": 0.5, "t": 0.5, "l": 0, "b": 0})
return figure
def get_plot_picks(message, t0, tn):
t0_idx = 0
t_picks = []
colors = []
for i, x in enumerate(message):
if timestamp_seconds(x["timestamp"]) >= t0:
if t0_idx == 0:
t0_idx = i
if timestamp_seconds(x["timestamp"]) <= tn:
t_picks.append(timestamp_seconds(x["timestamp"]) - t0)
if x["type"] == "p":
colors.append("b")
elif x["type"] == "s":
colors.append("r")
else:
raise("Phase type error!")
else:
return t_picks, colors, t0_idx
return t_picks, colors, t0_idx
def get_plot_events(message, t0, tn):
t0_idx = 0
t_events = []
mag_events = []
loc_events = []
for k, x in message.items():
if timestamp_seconds(x["time"]) >= t0:
# if t0_idx == 0:
# t0_idx = i
if timestamp_seconds(x["time"]) <= tn - 8:
t_events.append(timestamp_seconds(x["time"]) - t0)
mag_events.append(x["magnitude"])
loc_events.append(x["location"])
else:
return t_events, mag_events, loc_events, t0_idx
return t_events, mag_events, loc_events, t0_idx
def update_figure(figure, lat_list, lng_list, z_list, mag_events, t_events):
if(figure is not None):
figure.data = []
figure_df = pd.DataFrame({'lat': lat_list, 'lon': lng_list, 'z': z_list, 'mag': mag_events,
'time': t_events, 'size': [(mag_event**4) / 3.5 for mag_event in mag_events]})
figure = px.scatter_mapbox(
figure_df,
lat="lat",
lon="lon",
hover_data=[
"mag",
"time",
"lat",
"lon"],
size="size",
color_discrete_sequence=["fuchsia"],
zoom=MAP_ZOOM,
height=300)
figure = update_figure_layout(figure)
return figure
def update_figure_with_cols(figure, col1, col2, lat_list, lng_list, z_list, mag_events, t_events):
with col1:
figure = update_figure(figure, lat_list, lng_list, z_list, mag_events, t_events)
return figure
def tweep_update_with_media(api, mag, lng, lat, z, event_time, geolocator):
temp_time = time.time()
# get figure using update_figure
figure = update_figure(None, [lat], [lng], [z], [mag], [event_time])
figure.write_image("twitter_fig.png")
print("Time taken to render: %f" % (time.time() - temp_time))
address = latlon2address(lat, lng, geolocator)
if address is not None:
caption = f"Magnitude {mag} earthquake occurred at address {address} at time {event_time}"
else:
caption = "Magnitude %f earthquake happened at longitude %f degrees, latitude %f degrees at depth %f km at time %s" % (
mag, lng, lat, z, event_time)
try:
api.update_with_media("twitter_fig.png", caption)
print('Update Twitter with media success!', flush=True)
global I_MADE_A_TWEET
I_MADE_A_TWEET = True # Demo purpose, don't want to use up all the Twitter API Quota
print("Time taken to from start to end to fully upload to twitter: %f" % (time.time() - temp_time))
except BaseException:
pass
def tweepy_status_update(event_dict):
if(len(event_dict) > 0):
event = list(event_dict.values())[-1]
print("tweepy_status_update (event): ", event)
event_time = event['time']
lng = lng_from_x(event['location'][0])
lat = lat_from_y(event['location'][1])
z = event['location'][2]
mag = event['magnitude']
bundle = (lng, lat, z, mag)
global prev_event_bundle
if(bundle != prev_event_bundle):
print("----------New Event----------")
prev_event_bundle = bundle
if mag > BOT_MAGNITUDE_THRESHOLD and api is not None and not I_MADE_A_TWEET:
print("time is %s, current time is %f" % (event_time, time.time()))
print("Try to update status on twitter............")
print("Magnitude %f earthquake happened at longitude %f, latitude %f at depth %f at time %s" % (mag, lng, lat, z, event_time))
upload_thread = threading.Thread(
target=tweep_update_with_media, name="Uploader", args=(
api, mag, lng, lat, z, event_time, geolocator, ))
upload_thread.start()
temp_time = time.time()
# Pure text upload, will be fast
# api.update_status(
# "Magnitude %f earthquake happened at longitude %f, latitude %f at depth %f at time %s" %
# (mag, lng, lat, z, event_time))
print("Time taken for fast alert: %f" % (time.time() - temp_time)) # It took: 0.161690 seconds
def extract_df_from_event_dict(event_dict):
event_dict_values = list(event_dict.values())
event_dict_values.reverse()
lat_values = []
lon_values = []
z_values = []
mag_values = []
time_values = []
for event in event_dict_values:
lon_values.append(lng_from_x(event['location'][0]))
lat_values.append(lat_from_y(event['location'][1]))
z_values.append(event['location'][2])
mag_values.append(event['magnitude'])
time_values.append(event['time'])
event_dict_df = pd.DataFrame({'Magnitude': mag_values, 'Time': time_values, 'Latitude (deg)': lat_values,
'Longitude (deg)': lon_values, 'Depth (km)': z_values})
return event_dict_df
# Page header
image_data = np.asarray(Image.open('quakeflow logo design 2.jpg'))
st.image(image_data, caption=None, width=None, use_column_width=None, clamp=False, channels='RGB', output_format='auto')
st.balloons()
# Streamlit layout
col1, col2 = st.beta_columns([1, 1])
# Initial plotting
with col1:
experimental_df = pd.DataFrame({'lat': [], 'lon': [], 'z': [], 'mag': [], 'time': [], 'size': []})
event_df = pd.DataFrame({'Magnitude': [], 'Time': [], 'Latitude (deg)': [], 'Longitude (deg)': [], 'Depth (km)': []})
experimental = px.scatter_mapbox(
experimental_df,
lat="lat",
lon="lon",
hover_data=[
"mag",
"time",
"lat",
"lon"],
color_discrete_sequence=["fuchsia"],
zoom=MAP_ZOOM,
height=300)
experimental = update_figure_layout(experimental)
map_figure_experimental = st.plotly_chart(experimental, width=MAP_WIDTH, height=MAP_HEIGHT)
fig, (ax1) = plt.subplots(1, 1, figsize=(8, 5.8))
x = np.arange(WINDOW_LENGTH * WINDOW_NUMBER // HOP_LENGTH) * (dt * HOP_LENGTH)
ax1.set_ylim(-1, NUM_STATION)
ax1.set_xlim(np.around(x[0]), np.around(x[-1]))
lines = []
for i in range(NUM_STATION):
line, = ax1.plot(x, np.zeros(len(x)) + i, linewidth=0.5)
lines.append(line)
scatters = []
for i in range(NUM_STATION):
scatter = ax1.scatter([-1], [-1], s=300, c="white", marker="|")
scatters.append(scatter)
ax1.scatter([-1], [-1], s=200, c="blue", marker="|", label="P-wave")
ax1.scatter([-1], [-1], s=200, c="red", marker="|", label="S-wave")
ax1.legend(loc="upper left")
ax1.title.set_text("Streaming Seismic Waveforms and Detected P/S Phases")
with col2:
ui_plot = st.pyplot(plt)
catalog_df_visual = st.empty()
prev_time = time.time()
prev_time_bot = time.time()
# Handle messages from Kafka
for i, message in enumerate(consumer):
if message.topic == "waveform_raw":
key = message.key.strip('"')
timestamp = message.value['timestamp']
# print(timestamp)
vec = message.value['vec']
wave_dict[key].append([message.value['timestamp'], message.value['vec']])
wave_dict[key] = wave_dict[key][-WINDOW_NUMBER:]
elif message.topic == "phasenet_picks":
# print("phasenet!")
key = message.key
pick = message.value
pick_dict[key].append(pick)
elif message.topic == "gmma_events":
# print("gmma!")
key = np.round(timestamp_seconds(message.key) / EVENT_MIN_GAP) * EVENT_MIN_GAP
event = message.value
# event_list.extend(event)
# event_dict[key].append(event)
event_dict[key] = event
else:
print(message.topic)
raise("Topic Error!")
# Tweepy timer
if time.time() - prev_time_bot > EVENT_MIN_GAP:
tweepy_status_update(event_dict)
prev_time_bot = time.time()
if time.time() - prev_time > REFRESH_SEC:
prev_time = time.time()
keys = sorted(wave_dict.keys())
print("refreshing...")
min_t = prev_time
max_t = 0
# print("len(pick_dict): ", len(pick_dict))
for j, k in enumerate(keys):
tmp_vec = []
tmp_t = []
for _ in range(WINDOW_NUMBER - len(wave_dict[k])):
tmp_vec.extend([[0] * 3] * WINDOW_LENGTH)
for v in wave_dict[k]:
tmp_vec.extend(v[1])
tmp_t.append(v[0])
lines[j].set_ydata(normalize(np.array(tmp_vec)[::HOP_LENGTH, -1]) / 5 + j)
# print(pick_dict.keys())
# print(k, len(k))
if k in pick_dict:
t0 = timestamp_seconds(max(tmp_t)) - WINDOW_LENGTH * (WINDOW_NUMBER - 1) * dt
tn = timestamp_seconds(max(tmp_t)) + WINDOW_LENGTH * dt
if tn > max_t:
max_t = tn
if t0 < min_t:
min_t = t0
t_picks, colors, t0_idx = get_plot_picks(pick_dict[k], t0, tn)
scatters[j].set_offsets(np.c_[t_picks, np.ones_like(t_picks) * j])
scatters[j].set_color(colors)
if len(event_dict) > 0:
t_events, mag_events, loc_events, t0_idx = get_plot_events(event_dict, min_t, max_t)
if len(t_events) > 0:
loc_events = np.array(loc_events)
# organize data into the correct form
lng_list, lat_list, z_list = loc_events_organize(loc_events)
# update figure
experimental = update_figure_with_cols(experimental, col1, col2, lat_list, lng_list, z_list, mag_events, t_events)
event_df = extract_df_from_event_dict(event_dict)
if len(keys) > 0:
print("plotting...")
with col2:
ui_plot.pyplot(plt)
catalog_df_visual.dataframe(event_df)
with col1:
map_figure_experimental.plotly_chart(experimental, width=MAP_WIDTH, height=MAP_HEIGHT)
if message.topic == "waveform_raw":
time.sleep(REFRESH_SEC / NUM_STATION / 20)
|
[
"streamlit.balloons",
"streamlit.image",
"pandas.read_csv",
"plotly.express.scatter_mapbox",
"collections.defaultdict",
"numpy.around",
"pickle.load",
"numpy.arange",
"numpy.mean",
"pandas.DataFrame",
"datetime.datetime.fromisoformat",
"numpy.std",
"numpy.finfo",
"streamlit.beta_columns",
"matplotlib.pyplot.subplots",
"threading.Thread",
"streamlit.plotly_chart",
"tweepy.API",
"numpy.ones_like",
"time.sleep",
"streamlit.pyplot",
"streamlit.empty",
"tweepy.OAuthHandler",
"os.getenv",
"streamlit.markdown",
"geopy.geocoders.Nominatim",
"time.time",
"PIL.Image.open",
"numpy.array",
"logging.getLogger"
] |
[((541, 908), 'streamlit.markdown', 'st.markdown', (['f"""\n<style>\n .reportview-container .main .block-container{{\n max-width: 100vw;\n padding-top: 1rem;\n padding-right: 1rem;\n padding-left: 1rem;\n padding-bottom: 1rem;\n }}\n .reportview-container .main {{\n color: black;\n background-color: white;\n }}\n</style>\n"""'], {'unsafe_allow_html': '(True)'}), '(\n f"""\n<style>\n .reportview-container .main .block-container{{\n max-width: 100vw;\n padding-top: 1rem;\n padding-right: 1rem;\n padding-left: 1rem;\n padding-bottom: 1rem;\n }}\n .reportview-container .main {{\n color: black;\n background-color: white;\n }}\n</style>\n"""\n , unsafe_allow_html=True)\n', (552, 908), True, 'import streamlit as st\n'), ((1123, 1140), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1134, 1140), False, 'from collections import defaultdict\n'), ((1153, 1170), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (1164, 1170), False, 'from collections import defaultdict\n'), ((1184, 1201), 'collections.defaultdict', 'defaultdict', (['dict'], {}), '(dict)\n', (1195, 1201), False, 'from collections import defaultdict\n'), ((1643, 1683), 'pandas.read_csv', 'pd.read_csv', (['STATION_CSV'], {'delimiter': '"""\t"""'}), "(STATION_CSV, delimiter='\\t')\n", (1654, 1683), True, 'import pandas as pd\n'), ((3014, 3039), 'os.getenv', 'os.getenv', (['"""CONSUMER_KEY"""'], {}), "('CONSUMER_KEY')\n", (3023, 3039), False, 'import os\n'), ((3058, 3086), 'os.getenv', 'os.getenv', (['"""CONSUMER_SECRET"""'], {}), "('CONSUMER_SECRET')\n", (3067, 3086), False, 'import os\n'), ((3102, 3127), 'os.getenv', 'os.getenv', (['"""ACCESS_TOKEN"""'], {}), "('ACCESS_TOKEN')\n", (3111, 3127), False, 'import os\n'), ((3150, 3182), 'os.getenv', 'os.getenv', (['"""ACCESS_TOKEN_SECRET"""'], {}), "('ACCESS_TOKEN_SECRET')\n", (3159, 3182), False, 'import os\n'), ((3283, 3302), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3300, 3302), False, 'import logging\n'), ((4260, 4300), 'geopy.geocoders.Nominatim', 'Nominatim', ([], {'user_agent': '"""https"""', 'timeout': '(5)'}), "(user_agent='https', timeout=5)\n", (4269, 4300), False, 'from geopy.geocoders import Nominatim\n'), ((11093, 11218), 'streamlit.image', 'st.image', (['image_data'], {'caption': 'None', 'width': 'None', 'use_column_width': 'None', 'clamp': '(False)', 'channels': '"""RGB"""', 'output_format': '"""auto"""'}), "(image_data, caption=None, width=None, use_column_width=None, clamp\n =False, channels='RGB', output_format='auto')\n", (11101, 11218), True, 'import streamlit as st\n'), ((11214, 11227), 'streamlit.balloons', 'st.balloons', ([], {}), '()\n', (11225, 11227), True, 'import streamlit as st\n'), ((11261, 11284), 'streamlit.beta_columns', 'st.beta_columns', (['[1, 1]'], {}), '([1, 1])\n', (11276, 11284), True, 'import streamlit as st\n'), ((11993, 12029), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(8, 5.8)'}), '(1, 1, figsize=(8, 5.8))\n', (12005, 12029), True, 'import matplotlib.pyplot as plt\n'), ((12781, 12792), 'time.time', 'time.time', ([], {}), '()\n', (12790, 12792), False, 'import time\n'), ((12809, 12820), 'time.time', 'time.time', ([], {}), '()\n', (12818, 12820), False, 'import time\n'), ((1616, 1631), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (1627, 1631), False, 'import pickle\n'), ((3341, 3366), 'os.getenv', 'os.getenv', (['"""CONSUMER_KEY"""'], {}), "('CONSUMER_KEY')\n", (3350, 3366), False, 'import os\n'), ((3389, 3417), 'os.getenv', 'os.getenv', (['"""CONSUMER_SECRET"""'], {}), "('CONSUMER_SECRET')\n", (3398, 3417), False, 'import os\n'), ((3437, 3462), 'os.getenv', 'os.getenv', (['"""ACCESS_TOKEN"""'], {}), "('ACCESS_TOKEN')\n", (3446, 3462), False, 'import os\n'), ((3489, 3521), 'os.getenv', 'os.getenv', (['"""ACCESS_TOKEN_SECRET"""'], {}), "('ACCESS_TOKEN_SECRET')\n", (3498, 3521), False, 'import os\n'), ((6818, 6986), 'pandas.DataFrame', 'pd.DataFrame', (["{'lat': lat_list, 'lon': lng_list, 'z': z_list, 'mag': mag_events, 'time':\n t_events, 'size': [(mag_event ** 4 / 3.5) for mag_event in mag_events]}"], {}), "({'lat': lat_list, 'lon': lng_list, 'z': z_list, 'mag':\n mag_events, 'time': t_events, 'size': [(mag_event ** 4 / 3.5) for\n mag_event in mag_events]})\n", (6830, 6986), True, 'import pandas as pd\n'), ((7020, 7197), 'plotly.express.scatter_mapbox', 'px.scatter_mapbox', (['figure_df'], {'lat': '"""lat"""', 'lon': '"""lon"""', 'hover_data': "['mag', 'time', 'lat', 'lon']", 'size': '"""size"""', 'color_discrete_sequence': "['fuchsia']", 'zoom': 'MAP_ZOOM', 'height': '(300)'}), "(figure_df, lat='lat', lon='lon', hover_data=['mag',\n 'time', 'lat', 'lon'], size='size', color_discrete_sequence=['fuchsia'],\n zoom=MAP_ZOOM, height=300)\n", (7037, 7197), True, 'import plotly.express as px\n'), ((7681, 7692), 'time.time', 'time.time', ([], {}), '()\n', (7690, 7692), False, 'import time\n'), ((10805, 10958), 'pandas.DataFrame', 'pd.DataFrame', (["{'Magnitude': mag_values, 'Time': time_values, 'Latitude (deg)': lat_values,\n 'Longitude (deg)': lon_values, 'Depth (km)': z_values}"], {}), "({'Magnitude': mag_values, 'Time': time_values,\n 'Latitude (deg)': lat_values, 'Longitude (deg)': lon_values,\n 'Depth (km)': z_values})\n", (10817, 10958), True, 'import pandas as pd\n'), ((11050, 11091), 'PIL.Image.open', 'Image.open', (['"""quakeflow logo design 2.jpg"""'], {}), "('quakeflow logo design 2.jpg')\n", (11060, 11091), False, 'from PIL import Image\n'), ((11338, 11423), 'pandas.DataFrame', 'pd.DataFrame', (["{'lat': [], 'lon': [], 'z': [], 'mag': [], 'time': [], 'size': []}"], {}), "({'lat': [], 'lon': [], 'z': [], 'mag': [], 'time': [], 'size': []}\n )\n", (11350, 11423), True, 'import pandas as pd\n'), ((11434, 11544), 'pandas.DataFrame', 'pd.DataFrame', (["{'Magnitude': [], 'Time': [], 'Latitude (deg)': [], 'Longitude (deg)': [],\n 'Depth (km)': []}"], {}), "({'Magnitude': [], 'Time': [], 'Latitude (deg)': [],\n 'Longitude (deg)': [], 'Depth (km)': []})\n", (11446, 11544), True, 'import pandas as pd\n'), ((11560, 11731), 'plotly.express.scatter_mapbox', 'px.scatter_mapbox', (['experimental_df'], {'lat': '"""lat"""', 'lon': '"""lon"""', 'hover_data': "['mag', 'time', 'lat', 'lon']", 'color_discrete_sequence': "['fuchsia']", 'zoom': 'MAP_ZOOM', 'height': '(300)'}), "(experimental_df, lat='lat', lon='lon', hover_data=['mag',\n 'time', 'lat', 'lon'], color_discrete_sequence=['fuchsia'], zoom=\n MAP_ZOOM, height=300)\n", (11577, 11731), True, 'import plotly.express as px\n'), ((11913, 11978), 'streamlit.plotly_chart', 'st.plotly_chart', (['experimental'], {'width': 'MAP_WIDTH', 'height': 'MAP_HEIGHT'}), '(experimental, width=MAP_WIDTH, height=MAP_HEIGHT)\n', (11928, 11978), True, 'import streamlit as st\n'), ((12034, 12088), 'numpy.arange', 'np.arange', (['(WINDOW_LENGTH * WINDOW_NUMBER // HOP_LENGTH)'], {}), '(WINDOW_LENGTH * WINDOW_NUMBER // HOP_LENGTH)\n', (12043, 12088), True, 'import numpy as np\n'), ((12152, 12167), 'numpy.around', 'np.around', (['x[0]'], {}), '(x[0])\n', (12161, 12167), True, 'import numpy as np\n'), ((12169, 12185), 'numpy.around', 'np.around', (['x[-1]'], {}), '(x[-1])\n', (12178, 12185), True, 'import numpy as np\n'), ((12718, 12732), 'streamlit.pyplot', 'st.pyplot', (['plt'], {}), '(plt)\n', (12727, 12732), True, 'import streamlit as st\n'), ((12757, 12767), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (12765, 12767), True, 'import streamlit as st\n'), ((3587, 3637), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (['consumer_key', 'consumer_secret'], {}), '(consumer_key, consumer_secret)\n', (3606, 3637), False, 'import tweepy\n'), ((3717, 3790), 'tweepy.API', 'tweepy.API', (['auth'], {'wait_on_rate_limit': '(True)', 'wait_on_rate_limit_notify': '(True)'}), '(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n', (3727, 3790), False, 'import tweepy\n'), ((13877, 13888), 'time.time', 'time.time', ([], {}), '()\n', (13886, 13888), False, 'import time\n'), ((13956, 13967), 'time.time', 'time.time', ([], {}), '()\n', (13965, 13967), False, 'import time\n'), ((16062, 16104), 'time.sleep', 'time.sleep', (['(REFRESH_SEC / NUM_STATION / 20)'], {}), '(REFRESH_SEC / NUM_STATION / 20)\n', (16072, 16104), False, 'import time\n'), ((1004, 1013), 'numpy.std', 'np.std', (['x'], {}), '(x)\n', (1010, 1013), True, 'import numpy as np\n'), ((1072, 1097), 'datetime.datetime.fromisoformat', 'datetime.fromisoformat', (['x'], {}), '(x)\n', (1094, 1097), False, 'from datetime import datetime\n'), ((13767, 13778), 'time.time', 'time.time', ([], {}), '()\n', (13776, 13778), False, 'import time\n'), ((13897, 13908), 'time.time', 'time.time', ([], {}), '()\n', (13906, 13908), False, 'import time\n'), ((965, 975), 'numpy.mean', 'np.mean', (['x'], {}), '(x)\n', (972, 975), True, 'import numpy as np\n'), ((978, 995), 'numpy.finfo', 'np.finfo', (['x.dtype'], {}), '(x.dtype)\n', (986, 995), True, 'import numpy as np\n'), ((1016, 1033), 'numpy.finfo', 'np.finfo', (['x.dtype'], {}), '(x.dtype)\n', (1024, 1033), True, 'import numpy as np\n'), ((7885, 7896), 'time.time', 'time.time', ([], {}), '()\n', (7894, 7896), False, 'import time\n'), ((9658, 9781), 'threading.Thread', 'threading.Thread', ([], {'target': 'tweep_update_with_media', 'name': '"""Uploader"""', 'args': '(api, mag, lng, lat, z, event_time, geolocator)'}), "(target=tweep_update_with_media, name='Uploader', args=(api,\n mag, lng, lat, z, event_time, geolocator))\n", (9674, 9781), False, 'import threading\n'), ((9893, 9904), 'time.time', 'time.time', ([], {}), '()\n', (9902, 9904), False, 'import time\n'), ((15331, 15351), 'numpy.array', 'np.array', (['loc_events'], {}), '(loc_events)\n', (15339, 15351), True, 'import numpy as np\n'), ((8609, 8620), 'time.time', 'time.time', ([], {}), '()\n', (8618, 8620), False, 'import time\n'), ((9399, 9410), 'time.time', 'time.time', ([], {}), '()\n', (9408, 9410), False, 'import time\n'), ((10213, 10224), 'time.time', 'time.time', ([], {}), '()\n', (10222, 10224), False, 'import time\n'), ((14492, 14509), 'numpy.array', 'np.array', (['tmp_vec'], {}), '(tmp_vec)\n', (14500, 14509), True, 'import numpy as np\n'), ((15064, 15085), 'numpy.ones_like', 'np.ones_like', (['t_picks'], {}), '(t_picks)\n', (15076, 15085), True, 'import numpy as np\n')]
|
from threading import currentThread
from typing import Callable, Any
from twisted.internet.threads import deferToThread
from mdstudio.deferred.chainable import Chainable
from mdstudio.util.exception import MDStudioException
def make_deferred(method):
# type: (Callable) -> Callable[Any, Chainable]
"""
Anyone with a love for their job, should NOT, and I repeat NOT touch this function.
It has caused me endless frustration, and I hope you should never endure it :)
:param method:
:return:
"""
def wrapper(*args, **kwargs):
if currentThread().getName() != 'MainThread':
raise MDStudioException('Not on the main thread')
return Chainable(deferToThread(method, *args, **kwargs))
return wrapper
|
[
"mdstudio.util.exception.MDStudioException",
"twisted.internet.threads.deferToThread",
"threading.currentThread"
] |
[((633, 676), 'mdstudio.util.exception.MDStudioException', 'MDStudioException', (['"""Not on the main thread"""'], {}), "('Not on the main thread')\n", (650, 676), False, 'from mdstudio.util.exception import MDStudioException\n'), ((702, 740), 'twisted.internet.threads.deferToThread', 'deferToThread', (['method', '*args'], {}), '(method, *args, **kwargs)\n', (715, 740), False, 'from twisted.internet.threads import deferToThread\n'), ((572, 587), 'threading.currentThread', 'currentThread', ([], {}), '()\n', (585, 587), False, 'from threading import currentThread\n')]
|
# Source: https://github.com/joeduffy/aws-tags-example
import pulumi
from taggable import is_taggable
# register_auto_tags registers a global stack transformation that merges a set
# of tags with whatever was also explicitly added to the resource definition.
def register_auto_tags(auto_tags):
pulumi.runtime.register_stack_transformation(lambda args: auto_tag(args, auto_tags))
# auto_tag applies the given tags to the resource properties if applicable.
def auto_tag(args, auto_tags):
if is_taggable(args.type_):
args.props["tags"] = {**(args.props["tags"] or {}), **auto_tags}
return pulumi.ResourceTransformationResult(args.props, args.opts)
|
[
"pulumi.ResourceTransformationResult",
"taggable.is_taggable"
] |
[((501, 524), 'taggable.is_taggable', 'is_taggable', (['args.type_'], {}), '(args.type_)\n', (512, 524), False, 'from taggable import is_taggable\n'), ((614, 672), 'pulumi.ResourceTransformationResult', 'pulumi.ResourceTransformationResult', (['args.props', 'args.opts'], {}), '(args.props, args.opts)\n', (649, 672), False, 'import pulumi\n')]
|
#!/usr/bin/env python
from http.server import BaseHTTPRequestHandler,HTTPServer
from os import curdir,sep
PORT_NUMBER = 8080
class myHandler(BaseHTTPRequestHandler):
#Handler for the GET requests
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','image/png')
self.end_headers()
#f = open(curdir + sep + 'logo.png')
#self.wfile.write(f.read())
with open(curdir + sep + 'logo.png', 'rb') as file:
self.wfile.write(file.read())
return
try:
server = HTTPServer(('', PORT_NUMBER), myHandler)
print('Started httpserver on port ' + str(PORT_NUMBER))
server.serve_forever()
except KeyboardInterrupt:
server.socket.close()
|
[
"http.server.HTTPServer"
] |
[((495, 535), 'http.server.HTTPServer', 'HTTPServer', (["('', PORT_NUMBER)", 'myHandler'], {}), "(('', PORT_NUMBER), myHandler)\n", (505, 535), False, 'from http.server import BaseHTTPRequestHandler, HTTPServer\n')]
|
import tempfile
import time
from typing import Any, Dict, List, Optional
from asyncssh.process import SSHCompletedProcess
from tenacity import retry, stop_after_attempt, wait_random
from labfunctions import defaults
from labfunctions.cluster import ssh
from labfunctions.cluster.utils import ssh_from_settings
from labfunctions.conf.jtemplates import render_to_file
from labfunctions.types import ServerSettings
from labfunctions.types.agent import AgentRequest
from labfunctions.utils import execute_cmd_no_block, get_version, run_sync
def _prepare_agent_cmd(
ip_address: str,
machine_id: str,
cluster: str,
qnames: str,
workers_n=1,
):
"""
It will run nb agent command.
Name is not provided, so the agent will choose their name.
"""
cmd = f"nb agent -i {ip_address} -C {cluster} -q {qnames} -w {workers_n} -m {machine_id}"
return cmd
def _prepare_docker_cmd(
ip_address: str,
machine_id: str,
qnames: str,
cluster: str,
env_file: str,
docker_image: str,
docker_version="latest",
workers_n=1,
):
nb_agent_cmd = _prepare_agent_cmd(
ip_address, machine_id, cluster, qnames, workers_n
)
cmd = (
f"docker run -d -v /var/run/docker.sock:/var/run/docker.sock "
f"-e LF_SERVER=true --env-file={env_file} "
f"{docker_image}:{docker_version} "
f"{nb_agent_cmd}"
)
return cmd
@retry(stop=stop_after_attempt(3), wait=wait_random(min=1, max=3))
def agent(req: AgentRequest, data_settings: Dict[str, Any]) -> SSHCompletedProcess:
"""
Deploy an agent into a server, it has two steps:
render and copy a .env.docker file into the remote server
and pull and start the agent's docker instance
"""
with tempfile.TemporaryDirectory() as tmpdir:
env_file = f"{tmpdir}/.env.docker"
render_to_file(defaults.AGENT_ENV_TPL, env_file, data=data_settings)
run_sync(
ssh.scp_from_local,
remote_addr=req.machine_ip,
remote_dir=req.agent_homedir,
local_file=env_file,
keys=[req.private_key_path],
)
agent_env_file = f"{req.agent_homedir}/.env.docker"
addr = req.advertise_addr or req.machine_ip
cmd = _prepare_docker_cmd(
addr,
machine_id=req.machine_id,
qnames=",".join(req.qnames),
cluster=req.cluster,
env_file=agent_env_file,
docker_image=req.docker_image,
docker_version=req.docker_version,
workers_n=req.worker_procs,
)
result = run_sync(ssh.run_cmd, req.machine_ip, cmd, keys=[req.private_key_path])
return result
def agent_local(req: AgentRequest, data_settings: Dict[str, Any], use_docker=False):
addr = req.advertise_addr or req.machine_ip
env_file = f"{req.agent_homedir}/.env.docker"
render_to_file(defaults.AGENT_ENV_TPL, env_file, data=data_settings)
if use_docker:
cmd = _prepare_docker_cmd(
addr,
machine_id=req.machine_id,
qnames=",".join(req.qnames),
cluster=req.cluster,
env_file=env_file,
docker_image=req.docker_image,
docker_version=req.docker_version,
workers_n=req.worker_procs,
)
else:
cmd = _prepare_agent_cmd(
addr,
machine_id=req.machine_id,
cluster=req.cluster,
qnames=",".join(req.qnames),
workers_n=req.worker_procs,
)
result = execute_cmd_no_block(cmd, check=False)
return {"pid": result.pid}
def agent_from_settings(
ip,
machine_id,
cluster,
settings: ServerSettings,
qnames: List[str],
worker_procs=1,
docker_version=None,
) -> AgentRequest:
key = ssh_from_settings(settings)
version = docker_version or get_version()
return AgentRequest(
machine_ip=ip,
machine_id=machine_id,
private_key_path=key.private_path,
cluster=cluster,
qnames=qnames,
agent_homedir=settings.AGENT_HOMEDIR,
docker_version=version,
worker_procs=worker_procs,
)
|
[
"tempfile.TemporaryDirectory",
"tenacity.stop_after_attempt",
"labfunctions.types.agent.AgentRequest",
"labfunctions.utils.run_sync",
"labfunctions.conf.jtemplates.render_to_file",
"labfunctions.cluster.utils.ssh_from_settings",
"tenacity.wait_random",
"labfunctions.utils.get_version",
"labfunctions.utils.execute_cmd_no_block"
] |
[((2556, 2627), 'labfunctions.utils.run_sync', 'run_sync', (['ssh.run_cmd', 'req.machine_ip', 'cmd'], {'keys': '[req.private_key_path]'}), '(ssh.run_cmd, req.machine_ip, cmd, keys=[req.private_key_path])\n', (2564, 2627), False, 'from labfunctions.utils import execute_cmd_no_block, get_version, run_sync\n'), ((2836, 2904), 'labfunctions.conf.jtemplates.render_to_file', 'render_to_file', (['defaults.AGENT_ENV_TPL', 'env_file'], {'data': 'data_settings'}), '(defaults.AGENT_ENV_TPL, env_file, data=data_settings)\n', (2850, 2904), False, 'from labfunctions.conf.jtemplates import render_to_file\n'), ((3501, 3539), 'labfunctions.utils.execute_cmd_no_block', 'execute_cmd_no_block', (['cmd'], {'check': '(False)'}), '(cmd, check=False)\n', (3521, 3539), False, 'from labfunctions.utils import execute_cmd_no_block, get_version, run_sync\n'), ((3762, 3789), 'labfunctions.cluster.utils.ssh_from_settings', 'ssh_from_settings', (['settings'], {}), '(settings)\n', (3779, 3789), False, 'from labfunctions.cluster.utils import ssh_from_settings\n'), ((3847, 4063), 'labfunctions.types.agent.AgentRequest', 'AgentRequest', ([], {'machine_ip': 'ip', 'machine_id': 'machine_id', 'private_key_path': 'key.private_path', 'cluster': 'cluster', 'qnames': 'qnames', 'agent_homedir': 'settings.AGENT_HOMEDIR', 'docker_version': 'version', 'worker_procs': 'worker_procs'}), '(machine_ip=ip, machine_id=machine_id, private_key_path=key.\n private_path, cluster=cluster, qnames=qnames, agent_homedir=settings.\n AGENT_HOMEDIR, docker_version=version, worker_procs=worker_procs)\n', (3859, 4063), False, 'from labfunctions.types.agent import AgentRequest\n'), ((1758, 1787), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1785, 1787), False, 'import tempfile\n'), ((1850, 1918), 'labfunctions.conf.jtemplates.render_to_file', 'render_to_file', (['defaults.AGENT_ENV_TPL', 'env_file'], {'data': 'data_settings'}), '(defaults.AGENT_ENV_TPL, env_file, data=data_settings)\n', (1864, 1918), False, 'from labfunctions.conf.jtemplates import render_to_file\n'), ((1927, 2068), 'labfunctions.utils.run_sync', 'run_sync', (['ssh.scp_from_local'], {'remote_addr': 'req.machine_ip', 'remote_dir': 'req.agent_homedir', 'local_file': 'env_file', 'keys': '[req.private_key_path]'}), '(ssh.scp_from_local, remote_addr=req.machine_ip, remote_dir=req.\n agent_homedir, local_file=env_file, keys=[req.private_key_path])\n', (1935, 2068), False, 'from labfunctions.utils import execute_cmd_no_block, get_version, run_sync\n'), ((1427, 1448), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['(3)'], {}), '(3)\n', (1445, 1448), False, 'from tenacity import retry, stop_after_attempt, wait_random\n'), ((1455, 1480), 'tenacity.wait_random', 'wait_random', ([], {'min': '(1)', 'max': '(3)'}), '(min=1, max=3)\n', (1466, 1480), False, 'from tenacity import retry, stop_after_attempt, wait_random\n'), ((3822, 3835), 'labfunctions.utils.get_version', 'get_version', ([], {}), '()\n', (3833, 3835), False, 'from labfunctions.utils import execute_cmd_no_block, get_version, run_sync\n')]
|
from django.core.management.base import BaseCommand, CommandError
from email_user.models import EmailUser
from service_info_cms.utils import create_essential_pages
class Command(BaseCommand):
help = """
Create CMS pages which are required for basic functionality of the site.
"""
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError('A single argument is required: user id for page publisher (e-mail)')
publisher = EmailUser.objects.get(email=args[0])
create_essential_pages(publisher)
|
[
"django.core.management.base.CommandError",
"service_info_cms.utils.create_essential_pages",
"email_user.models.EmailUser.objects.get"
] |
[((492, 528), 'email_user.models.EmailUser.objects.get', 'EmailUser.objects.get', ([], {'email': 'args[0]'}), '(email=args[0])\n', (513, 528), False, 'from email_user.models import EmailUser\n'), ((537, 570), 'service_info_cms.utils.create_essential_pages', 'create_essential_pages', (['publisher'], {}), '(publisher)\n', (559, 570), False, 'from service_info_cms.utils import create_essential_pages\n'), ((389, 476), 'django.core.management.base.CommandError', 'CommandError', (['"""A single argument is required: user id for page publisher (e-mail)"""'], {}), "(\n 'A single argument is required: user id for page publisher (e-mail)')\n", (401, 476), False, 'from django.core.management.base import BaseCommand, CommandError\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Ringo"
'''
Aberration策略 (难度:初级)
参考: https://www.shinnytech.com/blog/aberration/
注: 该示例策略仅用于功能示范, 实盘时请根据自己的策略/经验进行修改
'''
from tqsdk import TqApi, TargetPosTask
from tqsdk.ta import BOLL
# 设置合约代码
SYMBOL = "DCE.i2001"
api = TqApi()
quote = api.get_quote(SYMBOL)
klines = api.get_kline_serial(SYMBOL, 60*60*24)
position = api.get_position(SYMBOL)
target_pos = TargetPosTask(api, SYMBOL)
# 使用BOLL指标计算中轨、上轨和下轨,其中26为周期N ,2为参数p
def boll_line(klines):
boll = BOLL(klines, 26, 2)
midline = boll["mid"].iloc[-1]
topline = boll["top"].iloc[-1]
bottomline = boll["bottom"].iloc[-1]
print("策略运行,中轨:%.2f,上轨为:%.2f,下轨为:%.2f" % (midline, topline, bottomline))
return midline, topline, bottomline
midline, topline, bottomline = boll_line(klines)
while True:
api.wait_update()
# 每次生成新的K线时重新计算BOLL指标
if api.is_changing(klines.iloc[-1], "datetime"):
midline, topline, bottomline = boll_line(klines)
# 每次最新价发生变化时进行判断
if api.is_changing(quote, "last_price"):
# 判断开仓条件
if position.pos_long == 0 and position.pos_short == 0:
# 如果最新价大于上轨,K线上穿上轨,开多仓
if quote.last_price > topline:
print("最新价大于上轨,K线上穿上轨,开多仓")
target_pos.set_target_volume(20)
# 如果最新价小于轨,K线下穿下轨,开空仓
elif quote.last_price < bottomline:
print("最新价大于上轨,K线上穿上轨,开多仓")
target_pos.set_target_volume(-20)
else:
print("当前最新价%.2f,未穿上轨或下轨,不开仓" % quote.last_price)
# 在多头情况下,空仓条件
elif position.pos_long > 0:
# 如果最新价低于中线,多头清仓离场
if quote.last_price < midline:
print("最新价低于中线,多头清仓离场")
target_pos.set_target_volume(0)
else:
print("当前多仓,未穿越中线,仓位无变化")
# 在空头情况下,空仓条件
elif position.pos_short > 0:
# 如果最新价高于中线,空头清仓离场
if quote.last_price > midline:
print("最新价高于中线,空头清仓离场")
target_pos.set_target_volume(0)
else:
print("当前空仓,未穿越中线,仓位无变化")
|
[
"tqsdk.ta.BOLL",
"tqsdk.TargetPosTask",
"tqsdk.TqApi"
] |
[((285, 292), 'tqsdk.TqApi', 'TqApi', ([], {}), '()\n', (290, 292), False, 'from tqsdk import TqApi, TargetPosTask\n'), ((420, 446), 'tqsdk.TargetPosTask', 'TargetPosTask', (['api', 'SYMBOL'], {}), '(api, SYMBOL)\n', (433, 446), False, 'from tqsdk import TqApi, TargetPosTask\n'), ((522, 541), 'tqsdk.ta.BOLL', 'BOLL', (['klines', '(26)', '(2)'], {}), '(klines, 26, 2)\n', (526, 541), False, 'from tqsdk.ta import BOLL\n')]
|