hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7f5323c0ecb93023af8ca06a7ed9f607268d682 | 7,924 | py | Python | assignment2/q1_classifier.py | junteudjio/stanford_NLP_CS224n_assigment2 | 69c579b211293cdafe5ba07551448b42a28f34c5 | [
"MIT"
] | null | null | null | assignment2/q1_classifier.py | junteudjio/stanford_NLP_CS224n_assigment2 | 69c579b211293cdafe5ba07551448b42a28f34c5 | [
"MIT"
] | null | null | null | assignment2/q1_classifier.py | junteudjio/stanford_NLP_CS224n_assigment2 | 69c579b211293cdafe5ba07551448b42a28f34c5 | [
"MIT"
] | null | null | null | import time
import numpy as np
import tensorflow as tf
from q1_softmax import softmax
from q1_softmax import cross_entropy_loss
from model import Model
from utils.general_utils import get_minibatches
class Config(object):
"""Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters. Model objects are passed a Config() object at
instantiation.
"""
n_samples = 1024
n_features = 100
n_classes = 5
batch_size = 64
n_epochs = 50
lr = 1e-4
class SoftmaxModel(Model):
"""Implements a Softmax classifier with cross-entropy loss."""
def add_placeholders(self):
"""Generates placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
and will be fed data during training.
Adds following nodes to the computational graph
input_placeholder: Input placeholder tensor of shape
(batch_size, n_features), type tf.float32
labels_placeholder: Labels placeholder tensor of shape
(batch_size, n_classes), type tf.int32
Add these placeholders to self as the instance variables
self.input_placeholder
self.labels_placeholder
"""
### YOUR CODE HERE
self.input_placeholder = tf.placeholder(dtype=tf.float32, shape=(self.config.batch_size, self.config.n_features))
self.labels_placeholder = tf.placeholder(dtype=tf.int32, shape=(self.config.batch_size, self.config.n_classes))
### END YOUR CODE
def create_feed_dict(self, inputs_batch, labels_batch=None):
"""Creates the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
If label_batch is None, then no labels are added to feed_dict.
Hint: The keys for the feed_dict should be the placeholder
tensors created in add_placeholders.
Args:
inputs_batch: A batch of input data.
labels_batch: A batch of label data.
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
### YOUR CODE HERE
feed_dict = {
self.input_placeholder: inputs_batch,
}
if labels_batch is not None:
feed_dict[self.labels_placeholder] = labels_batch
### END YOUR CODE
return feed_dict
def add_prediction_op(self):
"""Adds the core transformation for this model which transforms a batch of input
data into a batch of predictions. In this case, the transformation is a linear layer plus a
softmax transformation:
y = softmax(Wx + b)
Hint: Make sure to create tf.Variables as needed.
Hint: For this simple use-case, it's sufficient to initialize both weights W
and biases b with zeros.
Args:
input_data: A tensor of shape (batch_size, n_features).
Returns:
pred: A tensor of shape (batch_size, n_classes)
"""
### YOUR CODE HERE
W = tf.Variable(tf.zeros(shape=(self.config.n_features, self.config.n_classes)), dtype=tf.float32)
b = tf.Variable(tf.zeros(shape=(1, self.config.n_classes)), dtype=tf.float32)
pred = softmax(tf.matmul(self.input_placeholder, W) + b)
### END YOUR CODE
return pred
def add_loss_op(self, pred):
"""Adds cross_entropy_loss ops to the computational graph.
Hint: Use the cross_entropy_loss function we defined. This should be a very
short function.
Args:
pred: A tensor of shape (batch_size, n_classes)
Returns:
loss: A 0-d tensor (scalar)
"""
### YOUR CODE HERE
loss = cross_entropy_loss(self.labels_placeholder, pred)
### END YOUR CODE
return loss
def add_training_op(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#Optimizer
for more information.
Hint: Use tf.train.GradientDescentOptimizer to get an optimizer object.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: Loss tensor, from cross_entropy_loss.
Returns:
train_op: The Op for training.
"""
### YOUR CODE HERE
optimizer = tf.train.GradientDescentOptimizer(learning_rate=Config.lr)
train_op = optimizer.minimize(loss)
### END YOUR CODE
return train_op
def run_epoch(self, sess, inputs, labels):
"""Runs an epoch of training.
Args:
sess: tf.Session() object
inputs: np.ndarray of shape (n_samples, n_features)
labels: np.ndarray of shape (n_samples, n_classes)
Returns:
average_loss: scalar. Average minibatch loss of model on epoch.
"""
n_minibatches, total_loss = 0, 0
for input_batch, labels_batch in get_minibatches([inputs, labels], self.config.batch_size):
n_minibatches += 1
total_loss += self.train_on_batch(sess, input_batch, labels_batch)
return total_loss / n_minibatches
def fit(self, sess, inputs, labels):
"""Fit model on provided data.
Args:
sess: tf.Session()
inputs: np.ndarray of shape (n_samples, n_features)
labels: np.ndarray of shape (n_samples, n_classes)
Returns:
losses: list of loss per epoch
"""
losses = []
for epoch in range(self.config.n_epochs):
start_time = time.time()
average_loss = self.run_epoch(sess, inputs, labels)
duration = time.time() - start_time
print 'Epoch {:}: loss = {:.2f} ({:.3f} sec)'.format(epoch, average_loss, duration)
losses.append(average_loss)
return losses
def __init__(self, config):
"""Initializes the model.
Args:
config: A model configuration object of type Config
"""
self.config = config
self.build()
def test_softmax_model():
"""Train softmax model for a number of steps."""
config = Config()
# Generate random data to train the model on
np.random.seed(1234)
inputs = np.random.rand(config.n_samples, config.n_features)
labels = np.zeros((config.n_samples, config.n_classes), dtype=np.int32)
labels[:, 0] = 1
# Tell TensorFlow that the model will be built into the default Graph.
# (not required but good practice)
with tf.Graph().as_default():
# Build the model and add the variable initializer Op
model = SoftmaxModel(config)
init = tf.global_variables_initializer()
# If you are using an old version of TensorFlow, you may have to use
# this initializer instead.
# init = tf.initialize_all_variables()
# Create a session for running Ops in the Graph
with tf.Session() as sess:
# Run the Op to initialize the variables.
sess.run(init)
# Fit the model
losses = model.fit(sess, inputs, labels)
# If Ops are implemented correctly, the average loss should fall close to zero
# rapidly.
assert losses[-1] < .5
print "Basic (non-exhaustive) classifier tests pass"
if __name__ == "__main__":
test_softmax_model()
| 35.533632 | 121 | 0.627839 | import time
import numpy as np
import tensorflow as tf
from q1_softmax import softmax
from q1_softmax import cross_entropy_loss
from model import Model
from utils.general_utils import get_minibatches
class Config(object):
"""Holds model hyperparams and data information.
The config class is used to store various hyperparameters and dataset
information parameters. Model objects are passed a Config() object at
instantiation.
"""
n_samples = 1024
n_features = 100
n_classes = 5
batch_size = 64
n_epochs = 50
lr = 1e-4
class SoftmaxModel(Model):
"""Implements a Softmax classifier with cross-entropy loss."""
def add_placeholders(self):
"""Generates placeholder variables to represent the input tensors.
These placeholders are used as inputs by the rest of the model building
and will be fed data during training.
Adds following nodes to the computational graph
input_placeholder: Input placeholder tensor of shape
(batch_size, n_features), type tf.float32
labels_placeholder: Labels placeholder tensor of shape
(batch_size, n_classes), type tf.int32
Add these placeholders to self as the instance variables
self.input_placeholder
self.labels_placeholder
"""
tf.placeholder(dtype=tf.float32, shape=(self.config.batch_size, self.config.n_features))
self.labels_placeholder = tf.placeholder(dtype=tf.int32, shape=(self.config.batch_size, self.config.n_classes))
, inputs_batch, labels_batch=None):
"""Creates the feed_dict for training the given step.
A feed_dict takes the form of:
feed_dict = {
<placeholder>: <tensor of values to be passed for placeholder>,
....
}
If label_batch is None, then no labels are added to feed_dict.
Hint: The keys for the feed_dict should be the placeholder
tensors created in add_placeholders.
Args:
inputs_batch: A batch of input data.
labels_batch: A batch of label data.
Returns:
feed_dict: The feed dictionary mapping from placeholders to values.
"""
self.input_placeholder: inputs_batch,
}
if labels_batch is not None:
feed_dict[self.labels_placeholder] = labels_batch
def add_prediction_op(self):
"""Adds the core transformation for this model which transforms a batch of input
data into a batch of predictions. In this case, the transformation is a linear layer plus a
softmax transformation:
y = softmax(Wx + b)
Hint: Make sure to create tf.Variables as needed.
Hint: For this simple use-case, it's sufficient to initialize both weights W
and biases b with zeros.
Args:
input_data: A tensor of shape (batch_size, n_features).
Returns:
pred: A tensor of shape (batch_size, n_classes)
"""
### YOUR CODE HERE
W = tf.Variable(tf.zeros(shape=(self.config.n_features, self.config.n_classes)), dtype=tf.float32)
b = tf.Variable(tf.zeros(shape=(1, self.config.n_classes)), dtype=tf.float32)
pred = softmax(tf.matmul(self.input_placeholder, W) + b)
### END YOUR CODE
return pred
def add_loss_op(self, pred):
"""Adds cross_entropy_loss ops to the computational graph.
Hint: Use the cross_entropy_loss function we defined. This should be a very
short function.
Args:
pred: A tensor of shape (batch_size, n_classes)
Returns:
loss: A 0-d tensor (scalar)
"""
### YOUR CODE HERE
loss = cross_entropy_loss(self.labels_placeholder, pred)
### END YOUR CODE
return loss
def add_training_op(self, loss):
"""Sets up the training Ops.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train. See
https://www.tensorflow.org/versions/r0.7/api_docs/python/train.html#Optimizer
for more information.
Hint: Use tf.train.GradientDescentOptimizer to get an optimizer object.
Calling optimizer.minimize() will return a train_op object.
Args:
loss: Loss tensor, from cross_entropy_loss.
Returns:
train_op: The Op for training.
"""
### YOUR CODE HERE
optimizer = tf.train.GradientDescentOptimizer(learning_rate=Config.lr)
train_op = optimizer.minimize(loss)
### END YOUR CODE
return train_op
def run_epoch(self, sess, inputs, labels):
"""Runs an epoch of training.
Args:
sess: tf.Session() object
inputs: np.ndarray of shape (n_samples, n_features)
labels: np.ndarray of shape (n_samples, n_classes)
Returns:
average_loss: scalar. Average minibatch loss of model on epoch.
"""
n_minibatches, total_loss = 0, 0
for input_batch, labels_batch in get_minibatches([inputs, labels], self.config.batch_size):
n_minibatches += 1
total_loss += self.train_on_batch(sess, input_batch, labels_batch)
return total_loss / n_minibatches
def fit(self, sess, inputs, labels):
"""Fit model on provided data.
Args:
sess: tf.Session()
inputs: np.ndarray of shape (n_samples, n_features)
labels: np.ndarray of shape (n_samples, n_classes)
Returns:
losses: list of loss per epoch
"""
losses = []
for epoch in range(self.config.n_epochs):
start_time = time.time()
average_loss = self.run_epoch(sess, inputs, labels)
duration = time.time() - start_time
print 'Epoch {:}: loss = {:.2f} ({:.3f} sec)'.format(epoch, average_loss, duration)
losses.append(average_loss)
return losses
def __init__(self, config):
"""Initializes the model.
Args:
config: A model configuration object of type Config
"""
self.config = config
self.build()
def test_softmax_model():
"""Train softmax model for a number of steps."""
config = Config()
# Generate random data to train the model on
np.random.seed(1234)
inputs = np.random.rand(config.n_samples, config.n_features)
labels = np.zeros((config.n_samples, config.n_classes), dtype=np.int32)
labels[:, 0] = 1
# Tell TensorFlow that the model will be built into the default Graph.
# (not required but good practice)
with tf.Graph().as_default():
# Build the model and add the variable initializer Op
model = SoftmaxModel(config)
init = tf.global_variables_initializer()
# If you are using an old version of TensorFlow, you may have to use
# this initializer instead.
# init = tf.initialize_all_variables()
# Create a session for running Ops in the Graph
with tf.Session() as sess:
# Run the Op to initialize the variables.
sess.run(init)
# Fit the model
losses = model.fit(sess, inputs, labels)
# If Ops are implemented correctly, the average loss should fall close to zero
# rapidly.
assert losses[-1] < .5
print "Basic (non-exhaustive) classifier tests pass"
if __name__ == "__main__":
test_softmax_model()
| false | true |
f7f5338453578b70b526478111e0bcf82e38c627 | 2,215 | py | Python | datasets/columns.py | math-sasso/datasets | 07a5a87ba8f40e49c8ff5d3590b163461bf7779f | [
"Apache-2.0"
] | null | null | null | datasets/columns.py | math-sasso/datasets | 07a5a87ba8f40e49c8ff5d3590b163461bf7779f | [
"Apache-2.0"
] | null | null | null | datasets/columns.py | math-sasso/datasets | 07a5a87ba8f40e49c8ff5d3590b163461bf7779f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
from typing import Dict, List
from platiagro import load_dataset, save_dataset, stat_dataset
from platiagro.featuretypes import validate_featuretypes
from werkzeug.exceptions import BadRequest, NotFound
def list_columns(dataset: str) -> List[Dict[str, str]]:
"""Lists all columns from a dataset.
Args:
dataset (str): the dataset name.
Returns:
A list of columns names and featuretypes.
Raises:
NotFound: when the dataset does not exist.
"""
try:
metadata = stat_dataset(dataset)
columns = metadata.get("columns", [])
featuretypes = metadata.get("featuretypes", [])
columns = [{"name": col, "featuretype": ftype} for col, ftype in zip(columns, featuretypes)]
return columns
except FileNotFoundError:
raise NotFound("The specified dataset does not exist")
def update_column(dataset: str, column: str, featuretype: str) -> Dict[str, str]:
"""Updates a column from a dataset.
Args:
dataset (str): the dataset name.
column (str): the column name.
featuretype (str): the feature type (Numerical, Categorical, or DateTime).
Returns:
The column info.
Raises:
NotFound: when the dataset or column does not exist.
BadRequest: when the featuretype is invalid.
"""
try:
metadata = stat_dataset(dataset)
if "columns" not in metadata or "featuretypes" not in metadata:
raise NotFound("The specified column does not exist")
columns = metadata["columns"]
if column not in columns:
raise NotFound("The specified column does not exist")
# sets new metadata
index = columns.index(column)
metadata["featuretypes"][index] = featuretype
validate_featuretypes(metadata["featuretypes"])
df = load_dataset(dataset)
# uses PlatIAgro SDK to save the dataset
save_dataset(dataset, df, metadata=metadata)
except FileNotFoundError:
raise NotFound("The specified dataset does not exist")
except ValueError as e:
raise BadRequest(str(e))
return {"name": column, "featuretype": featuretype}
| 29.533333 | 100 | 0.654628 |
from typing import Dict, List
from platiagro import load_dataset, save_dataset, stat_dataset
from platiagro.featuretypes import validate_featuretypes
from werkzeug.exceptions import BadRequest, NotFound
def list_columns(dataset: str) -> List[Dict[str, str]]:
try:
metadata = stat_dataset(dataset)
columns = metadata.get("columns", [])
featuretypes = metadata.get("featuretypes", [])
columns = [{"name": col, "featuretype": ftype} for col, ftype in zip(columns, featuretypes)]
return columns
except FileNotFoundError:
raise NotFound("The specified dataset does not exist")
def update_column(dataset: str, column: str, featuretype: str) -> Dict[str, str]:
try:
metadata = stat_dataset(dataset)
if "columns" not in metadata or "featuretypes" not in metadata:
raise NotFound("The specified column does not exist")
columns = metadata["columns"]
if column not in columns:
raise NotFound("The specified column does not exist")
index = columns.index(column)
metadata["featuretypes"][index] = featuretype
validate_featuretypes(metadata["featuretypes"])
df = load_dataset(dataset)
save_dataset(dataset, df, metadata=metadata)
except FileNotFoundError:
raise NotFound("The specified dataset does not exist")
except ValueError as e:
raise BadRequest(str(e))
return {"name": column, "featuretype": featuretype}
| true | true |
f7f5338f6024957a5790b9d27c48e6729fecf1a0 | 1,361 | py | Python | src/schemathesis/service/handler.py | gluhar2006/schemathesis | 3cb6b0b4f5d93242da1f2e79575b6b7b3b7a63d1 | [
"MIT"
] | 659 | 2020-09-03T13:27:50.000Z | 2022-03-31T17:07:16.000Z | src/schemathesis/service/handler.py | gluhar2006/schemathesis | 3cb6b0b4f5d93242da1f2e79575b6b7b3b7a63d1 | [
"MIT"
] | 570 | 2020-09-03T15:57:43.000Z | 2022-03-31T17:13:52.000Z | src/schemathesis/service/handler.py | gluhar2006/schemathesis | 3cb6b0b4f5d93242da1f2e79575b6b7b3b7a63d1 | [
"MIT"
] | 66 | 2020-09-05T07:09:03.000Z | 2022-03-17T08:17:55.000Z | import threading
from queue import Queue
import attr
from ..cli.context import ExecutionContext
from ..cli.handlers import EventHandler
from ..runner import events
from . import worker
from .constants import DEFAULT_URL, STOP_MARKER, WORKER_JOIN_TIMEOUT
@attr.s(slots=True) # pragma: no mutate
class ServiceReporter(EventHandler):
"""Send events to the worker that communicates with Schemathesis.io."""
out_queue: Queue = attr.ib() # pragma: no mutate
token: str = attr.ib() # pragma: no mutate
url: str = attr.ib(default=DEFAULT_URL) # pragma: no mutate
in_queue: Queue = attr.ib(factory=Queue) # pragma: no mutate
worker: threading.Thread = attr.ib(init=False) # pragma: no mutate
def __attrs_post_init__(self) -> None:
# A worker thread, that does all the work concurrently
self.worker = threading.Thread(
target=worker.start,
kwargs={"token": self.token, "url": self.url, "in_queue": self.in_queue, "out_queue": self.out_queue},
)
self.worker.start()
def handle_event(self, context: ExecutionContext, event: events.ExecutionEvent) -> None:
self.in_queue.put(event)
def shutdown(self) -> None:
self._stop_worker()
def _stop_worker(self) -> None:
self.in_queue.put(STOP_MARKER)
self.worker.join(WORKER_JOIN_TIMEOUT)
| 34.025 | 114 | 0.689199 | import threading
from queue import Queue
import attr
from ..cli.context import ExecutionContext
from ..cli.handlers import EventHandler
from ..runner import events
from . import worker
from .constants import DEFAULT_URL, STOP_MARKER, WORKER_JOIN_TIMEOUT
@attr.s(slots=True)
class ServiceReporter(EventHandler):
out_queue: Queue = attr.ib()
token: str = attr.ib()
url: str = attr.ib(default=DEFAULT_URL)
in_queue: Queue = attr.ib(factory=Queue)
worker: threading.Thread = attr.ib(init=False)
def __attrs_post_init__(self) -> None:
self.worker = threading.Thread(
target=worker.start,
kwargs={"token": self.token, "url": self.url, "in_queue": self.in_queue, "out_queue": self.out_queue},
)
self.worker.start()
def handle_event(self, context: ExecutionContext, event: events.ExecutionEvent) -> None:
self.in_queue.put(event)
def shutdown(self) -> None:
self._stop_worker()
def _stop_worker(self) -> None:
self.in_queue.put(STOP_MARKER)
self.worker.join(WORKER_JOIN_TIMEOUT)
| true | true |
f7f533b85a402cb7b0d8f610041f458d8977cf7f | 3,734 | py | Python | src/the_tale/the_tale/game/bills/tests/test_place_description.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | 1 | 2020-04-02T11:51:20.000Z | 2020-04-02T11:51:20.000Z | src/the_tale/the_tale/game/bills/tests/test_place_description.py | devapromix/the-tale | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/game/bills/tests/test_place_description.py | devapromix/the-tale | 2a10efd3270734f8cf482b4cfbc5353ef8f0494c | [
"BSD-3-Clause"
] | null | null | null |
import smart_imports
smart_imports.all()
class PlaceDescriptionTests(helpers.BaseTestPrototypes):
def setUp(self):
super(PlaceDescriptionTests, self).setUp()
self.place = places_storage.places.all()[0]
self.place.description = 'old description'
places_logic.save_place(self.place)
self.place_2 = places_storage.places.all()[1]
self.bill_data = bills.place_description.PlaceDescripton(place_id=self.place.id, description='new description')
self.bill = prototypes.BillPrototype.create(self.account1, 'bill-1-caption', self.bill_data, chronicle_on_accepted='chronicle-on-accepted')
def test_create(self):
self.assertEqual(self.bill.data.place_id, self.place.id)
self.assertEqual(self.bill.data.description, 'new description')
def test_actors(self):
self.assertEqual([id(a) for a in self.bill_data.actors], [id(self.place)])
def test_update(self):
form = self.bill.data.get_user_form_update(post={'caption': 'new-caption',
'place': self.place_2.id,
'chronicle_on_accepted': 'chronicle-on-accepted',
'new_description': 'new new description'})
self.assertTrue(form.is_valid())
self.bill.update(form)
self.bill = prototypes.BillPrototype.get_by_id(self.bill.id)
self.assertEqual(self.bill.data.place_id, self.place_2.id)
self.assertEqual(self.bill.data.description, 'new new description')
def test_long_description_error(self):
form = self.bill.data.get_user_form_update(post={'caption': 'new-caption',
'place': self.place_2.id,
'new_description': '!' * (places_conf.settings.MAX_DESCRIPTION_LENGTH + 1)})
self.assertFalse(form.is_valid())
@mock.patch('the_tale.game.bills.conf.settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def test_apply(self):
prototypes.VotePrototype.create(self.account2, self.bill, relations.VOTE_TYPE.AGAINST)
prototypes.VotePrototype.create(self.account3, self.bill, relations.VOTE_TYPE.FOR)
data = self.bill.user_form_initials
data['approved'] = True
form = self.bill.data.get_moderator_form_update(data)
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form)
self.assertTrue(self.bill.apply())
bill = prototypes.BillPrototype.get_by_id(self.bill.id)
self.assertTrue(bill.state.is_ACCEPTED)
self.assertNotEqual(self.place.description, 'old description')
self.assertEqual(self.place.description, 'new description')
@mock.patch('the_tale.game.bills.conf.settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def test_has_meaning__duplicate_description(self):
prototypes.VotePrototype.create(self.account2, self.bill, relations.VOTE_TYPE.AGAINST)
prototypes.VotePrototype.create(self.account3, self.bill, relations.VOTE_TYPE.FOR)
data = self.bill.user_form_initials
data['approved'] = True
form = self.bill.data.get_moderator_form_update(data)
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form)
self.place.description = 'new description'
places_logic.save_place(self.place)
self.assertFalse(self.bill.has_meaning())
| 43.418605 | 147 | 0.663364 |
import smart_imports
smart_imports.all()
class PlaceDescriptionTests(helpers.BaseTestPrototypes):
def setUp(self):
super(PlaceDescriptionTests, self).setUp()
self.place = places_storage.places.all()[0]
self.place.description = 'old description'
places_logic.save_place(self.place)
self.place_2 = places_storage.places.all()[1]
self.bill_data = bills.place_description.PlaceDescripton(place_id=self.place.id, description='new description')
self.bill = prototypes.BillPrototype.create(self.account1, 'bill-1-caption', self.bill_data, chronicle_on_accepted='chronicle-on-accepted')
def test_create(self):
self.assertEqual(self.bill.data.place_id, self.place.id)
self.assertEqual(self.bill.data.description, 'new description')
def test_actors(self):
self.assertEqual([id(a) for a in self.bill_data.actors], [id(self.place)])
def test_update(self):
form = self.bill.data.get_user_form_update(post={'caption': 'new-caption',
'place': self.place_2.id,
'chronicle_on_accepted': 'chronicle-on-accepted',
'new_description': 'new new description'})
self.assertTrue(form.is_valid())
self.bill.update(form)
self.bill = prototypes.BillPrototype.get_by_id(self.bill.id)
self.assertEqual(self.bill.data.place_id, self.place_2.id)
self.assertEqual(self.bill.data.description, 'new new description')
def test_long_description_error(self):
form = self.bill.data.get_user_form_update(post={'caption': 'new-caption',
'place': self.place_2.id,
'new_description': '!' * (places_conf.settings.MAX_DESCRIPTION_LENGTH + 1)})
self.assertFalse(form.is_valid())
@mock.patch('the_tale.game.bills.conf.settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def test_apply(self):
prototypes.VotePrototype.create(self.account2, self.bill, relations.VOTE_TYPE.AGAINST)
prototypes.VotePrototype.create(self.account3, self.bill, relations.VOTE_TYPE.FOR)
data = self.bill.user_form_initials
data['approved'] = True
form = self.bill.data.get_moderator_form_update(data)
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form)
self.assertTrue(self.bill.apply())
bill = prototypes.BillPrototype.get_by_id(self.bill.id)
self.assertTrue(bill.state.is_ACCEPTED)
self.assertNotEqual(self.place.description, 'old description')
self.assertEqual(self.place.description, 'new description')
@mock.patch('the_tale.game.bills.conf.settings.MIN_VOTES_PERCENT', 0.6)
@mock.patch('the_tale.game.bills.prototypes.BillPrototype.time_before_voting_end', datetime.timedelta(seconds=0))
def test_has_meaning__duplicate_description(self):
prototypes.VotePrototype.create(self.account2, self.bill, relations.VOTE_TYPE.AGAINST)
prototypes.VotePrototype.create(self.account3, self.bill, relations.VOTE_TYPE.FOR)
data = self.bill.user_form_initials
data['approved'] = True
form = self.bill.data.get_moderator_form_update(data)
self.assertTrue(form.is_valid())
self.bill.update_by_moderator(form)
self.place.description = 'new description'
places_logic.save_place(self.place)
self.assertFalse(self.bill.has_meaning())
| true | true |
f7f533c6e7e3733c5d4d3fe653d525a07fd8c8e1 | 1,407 | py | Python | slp/modules/norm.py | PansoK/slp | e2c478b00f8f054b24eebb257e18a57451471c79 | [
"MIT"
] | 21 | 2018-11-22T00:43:02.000Z | 2021-12-07T21:19:13.000Z | slp/modules/norm.py | baby636/slp | ac55154f063245e0e4ed584c59f16370d228d8a7 | [
"MIT"
] | 5 | 2018-12-11T16:00:52.000Z | 2021-06-02T04:09:37.000Z | slp/modules/norm.py | baby636/slp | ac55154f063245e0e4ed584c59f16370d228d8a7 | [
"MIT"
] | 10 | 2019-09-14T11:10:38.000Z | 2021-11-11T17:47:21.000Z | import torch
import torch.nn as nn
def safe_norm(x, eps=1e-5, dim=-1, keepdim=True):
return torch.sqrt(torch.sum(torch.square(x), dim=dim, keepdim=keepdim) + eps)
class LayerNormTf(nn.Module):
def __init__(self, hidden_size: int, eps: float = 1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
Link: https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/pytorch_pretrained_bert/modeling.py#L234
"""
super(LayerNormTf, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Calculate Layernorm the tf way"""
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class ScaleNorm(nn.Module):
def __init__(self, hidden_size: int, eps=1e-5):
super(ScaleNorm, self).__init__()
self.eps = eps
self.g = nn.Parameter(torch.tensor(hidden_size ** 0.5))
def forward(self, x: torch.Tensor):
scaled_norm = self.g / safe_norm(x, dim=-1, keepdim=True).clamp(min=self.eps)
return scaled_norm * x
# Default to pytorch layernorm
LayerNorm = nn.LayerNorm
| 33.5 | 121 | 0.651741 | import torch
import torch.nn as nn
def safe_norm(x, eps=1e-5, dim=-1, keepdim=True):
return torch.sqrt(torch.sum(torch.square(x), dim=dim, keepdim=keepdim) + eps)
class LayerNormTf(nn.Module):
def __init__(self, hidden_size: int, eps: float = 1e-12):
super(LayerNormTf, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class ScaleNorm(nn.Module):
def __init__(self, hidden_size: int, eps=1e-5):
super(ScaleNorm, self).__init__()
self.eps = eps
self.g = nn.Parameter(torch.tensor(hidden_size ** 0.5))
def forward(self, x: torch.Tensor):
scaled_norm = self.g / safe_norm(x, dim=-1, keepdim=True).clamp(min=self.eps)
return scaled_norm * x
LayerNorm = nn.LayerNorm
| true | true |
f7f534141ef469afed6c4923a10957ff776e25f5 | 2,520 | py | Python | src/slap/configuration.py | NiklasRosenstein/slap | 7932ae6c0da9614f47491fe389a7951abb9966b6 | [
"MIT"
] | 1 | 2022-03-23T10:52:33.000Z | 2022-03-23T10:52:33.000Z | src/slap/configuration.py | NiklasRosenstein/slam | 7932ae6c0da9614f47491fe389a7951abb9966b6 | [
"MIT"
] | 17 | 2022-02-13T03:19:03.000Z | 2022-03-02T18:13:05.000Z | src/slap/configuration.py | NiklasRosenstein/slam | 7932ae6c0da9614f47491fe389a7951abb9966b6 | [
"MIT"
] | null | null | null | from __future__ import annotations
import logging
import typing as t
from pathlib import Path
from slap.util.toml_file import TomlFile
if t.TYPE_CHECKING:
from nr.util.functional import Once
logger = logging.getLogger(__name__)
class Configuration:
"""Represents the configuration stored in a directory, which is either read from `slap.toml` or `pyproject.toml`."""
#: The directory of the project. This is the directory where the `slap.toml` or `pyproject.toml` configuration
#: would usually reside in, but the existence of neither is absolutely required.
directory: Path
#: Points to the `pyproject.toml` file in the project and can be used to conveniently check for its existence
#: or to access its contents.
pyproject_toml: TomlFile
#: Points to the `slap.toml` file in the project and can be used to conveniently check for its existence
#: or to access its contents.
slap_toml: TomlFile
#: Use this to access the Slap configuration, automatically loaded from either `slap.toml` or the `tool.slap`
#: section in `pyproject.toml`. The attribute is a #Once instance, thus it needs to be called to retrieve
#: the contents. This is the same as #get_raw_configuration(), but is more efficient.
raw_config: Once[dict[str, t.Any]]
def __init__(self, directory: Path) -> None:
from nr.util.functional import Once
self.directory = directory
self.pyproject_toml = TomlFile(directory / "pyproject.toml")
self.slap_toml = TomlFile(directory / "slap.toml")
self.raw_config = Once(self.get_raw_configuration)
def __repr__(self) -> str:
return f'{type(self).__name__}(directory="{self.directory}")'
def get_raw_configuration(self) -> dict[str, t.Any]:
"""Loads the raw configuration data for Slap from either the `slap.toml` configuration file or `pyproject.toml`
under the `[slap.tool]` section. If neither of the files exist or the section in the pyproject does not exist,
an empty dictionary will be returned."""
if self.slap_toml.exists():
logger.debug("Reading configuration for <subj>%s</subj> from <val>%s</val>", self, self.slap_toml.path)
return self.slap_toml.value()
if self.pyproject_toml.exists():
logger.debug("Reading configuration for <subj>%s</subj> from <val>%s</val>", self, self.pyproject_toml.path)
return self.pyproject_toml.value().get("tool", {}).get("slap", {})
return {}
| 43.448276 | 120 | 0.697222 | from __future__ import annotations
import logging
import typing as t
from pathlib import Path
from slap.util.toml_file import TomlFile
if t.TYPE_CHECKING:
from nr.util.functional import Once
logger = logging.getLogger(__name__)
class Configuration:
directory: Path
pyproject_toml: TomlFile
slap_toml: TomlFile
from nr.util.functional import Once
self.directory = directory
self.pyproject_toml = TomlFile(directory / "pyproject.toml")
self.slap_toml = TomlFile(directory / "slap.toml")
self.raw_config = Once(self.get_raw_configuration)
def __repr__(self) -> str:
return f'{type(self).__name__}(directory="{self.directory}")'
def get_raw_configuration(self) -> dict[str, t.Any]:
if self.slap_toml.exists():
logger.debug("Reading configuration for <subj>%s</subj> from <val>%s</val>", self, self.slap_toml.path)
return self.slap_toml.value()
if self.pyproject_toml.exists():
logger.debug("Reading configuration for <subj>%s</subj> from <val>%s</val>", self, self.pyproject_toml.path)
return self.pyproject_toml.value().get("tool", {}).get("slap", {})
return {}
| true | true |
f7f5344290af26c3d7dc28024993d3e08b27be12 | 3,201 | py | Python | 1-99/20-29/29.py | dcragusa/LeetCode | 01c30de0832b378a1b054d80d1ea1d3f09a2abd3 | [
"MIT"
] | null | null | null | 1-99/20-29/29.py | dcragusa/LeetCode | 01c30de0832b378a1b054d80d1ea1d3f09a2abd3 | [
"MIT"
] | null | null | null | 1-99/20-29/29.py | dcragusa/LeetCode | 01c30de0832b378a1b054d80d1ea1d3f09a2abd3 | [
"MIT"
] | null | null | null | """
Given two integers dividend and divisor, divide two integers without using multiplication, division and mod operator.
Return the quotient after dividing dividend by divisor. The integer division should truncate toward zero, which means
losing its fractional part. For example, truncate(8.345) = 8 and truncate(-2.7335) = -2.
Example 1:
Input: dividend = 10, divisor = 3, Output: 3
Explanation: 10/3 = truncate(3.33333..) = 3.
Example 2:
Input: dividend = 7, divisor = -3, Output: -2
Explanation: 7/-3 = truncate(-2.33333..) = -2.
Note:
Both dividend and divisor will be 32-bit signed integers. The divisor will never be 0.
Assume we are dealing with an environment which can only store integers within the 32-bit signed integer range:
[−2^31, 2^31 − 1]. For the purpose of this problem, assume that your function returns 2^31 − 1 when the division
result overflows.
"""
"""
The brute force solution is just to implement repeated subtraction.
We can optimise this by implementing long multiplication/division.
"""
MIN_INT = -2**31
MAX_INT = 2**31 - 1
# def divide(dividend, divisor):
# sign = 1 if (abs_dividend := abs(dividend)) + (abs_divisor := abs(divisor)) == abs(dividend + divisor) else -1
# quotient = 0
# while abs_dividend - abs_divisor >= 0:
# quotient += 1
# abs_dividend -= abs_divisor
# result = quotient * sign
# if result < MIN_INT:
# return MIN_INT
# elif result > MAX_INT:
# return MAX_INT
# else:
# return result
def divide_positive_simple(dividend, divisor):
quotient = 0
while dividend >= divisor:
quotient += 1
dividend -= divisor
return quotient, dividend
def multiply_positive_simple(multiplicand, multiplier):
result = 0
for _ in range(multiplier):
result += multiplicand
return result
def multiply_positive_long(multiplicand, multiplier):
if multiplicand == 0 or multiplier == 0:
return 0
result = 0
for pow_10, digit in enumerate(reversed(str(multiplier))):
result += int(str(multiply_positive_simple(multiplicand, int(digit))) + '0'*pow_10)
return result
def divide(dividend, divisor):
sign = 1 if (abs_dividend := abs(dividend)) + (abs_divisor := abs(divisor)) == abs(dividend + divisor) else -1
if abs_divisor > abs_dividend:
return 0
dividend = str(abs_dividend)
working_dividend = ''
working_quotient = ''
for digit in dividend:
working_dividend = int(working_dividend + digit)
quotient, remainder = divide_positive_simple(working_dividend, abs_divisor)
working_quotient += str(quotient)
working_dividend = str(working_dividend - multiply_positive_long(quotient, abs_divisor))
result = int(working_quotient) * sign
if result < MIN_INT:
return MIN_INT
elif result > MAX_INT:
return MAX_INT
else:
return result
assert divide(2, 3) == 0
assert divide(3, 3) == 1
assert divide(10, 3) == 3
assert divide(7, -3) == -2
assert divide(-7, -3) == 2
assert divide(-7, 3) == -2
assert divide(-2**31, -1) == 2**31 - 1
assert divide(1004958205, -2137325331) == 0
assert divide(-1021989372, -82778243) == 12
| 32.01 | 117 | 0.678538 |
MIN_INT = -2**31
MAX_INT = 2**31 - 1
def divide_positive_simple(dividend, divisor):
quotient = 0
while dividend >= divisor:
quotient += 1
dividend -= divisor
return quotient, dividend
def multiply_positive_simple(multiplicand, multiplier):
result = 0
for _ in range(multiplier):
result += multiplicand
return result
def multiply_positive_long(multiplicand, multiplier):
if multiplicand == 0 or multiplier == 0:
return 0
result = 0
for pow_10, digit in enumerate(reversed(str(multiplier))):
result += int(str(multiply_positive_simple(multiplicand, int(digit))) + '0'*pow_10)
return result
def divide(dividend, divisor):
sign = 1 if (abs_dividend := abs(dividend)) + (abs_divisor := abs(divisor)) == abs(dividend + divisor) else -1
if abs_divisor > abs_dividend:
return 0
dividend = str(abs_dividend)
working_dividend = ''
working_quotient = ''
for digit in dividend:
working_dividend = int(working_dividend + digit)
quotient, remainder = divide_positive_simple(working_dividend, abs_divisor)
working_quotient += str(quotient)
working_dividend = str(working_dividend - multiply_positive_long(quotient, abs_divisor))
result = int(working_quotient) * sign
if result < MIN_INT:
return MIN_INT
elif result > MAX_INT:
return MAX_INT
else:
return result
assert divide(2, 3) == 0
assert divide(3, 3) == 1
assert divide(10, 3) == 3
assert divide(7, -3) == -2
assert divide(-7, -3) == 2
assert divide(-7, 3) == -2
assert divide(-2**31, -1) == 2**31 - 1
assert divide(1004958205, -2137325331) == 0
assert divide(-1021989372, -82778243) == 12
| true | true |
f7f534b949f53b0d0accf4f171701864247d36d4 | 6,676 | py | Python | arcade/experimental/lights.py | janscas/arcade | d83dda946563429c8ee7d1a036bc0407758c638f | [
"MIT"
] | 824 | 2016-01-07T19:27:57.000Z | 2020-08-01T03:15:47.000Z | arcade/experimental/lights.py | janscas/arcade | d83dda946563429c8ee7d1a036bc0407758c638f | [
"MIT"
] | 646 | 2016-01-08T02:42:31.000Z | 2020-08-03T14:13:27.000Z | arcade/experimental/lights.py | janscas/arcade | d83dda946563429c8ee7d1a036bc0407758c638f | [
"MIT"
] | 221 | 2016-01-07T22:36:33.000Z | 2020-07-24T23:30:08.000Z | from array import array
from typing import Iterable, Tuple, Sequence, List, Optional
from arcade import Color
from arcade import gl
from arcade.experimental.texture_render_target import RenderTargetTexture
class Light:
HARD = 1.0
SOFT = 0.0
def __init__(self, center_x: float, center_y: float,
radius: float = 50.0, color: Tuple[int, int, int] = (255, 255, 255),
mode: str = 'hard'):
"""Create a Light.
Note: It's important to separate lights that don't change properties
and static ones with the `usage` parameter.
:param Tuple[float, float] position: the position of the light
:param float radius: The radius of the light
:param str mode: `hard` or `soft`
"""
if not (isinstance(color, tuple) or isinstance(color, list)):
raise ValueError("Color must be a 3-4 element Tuple or List with red-green-blue and optionally an alpha.")
if not isinstance(mode, str) or not (mode == 'soft' or mode == 'hard'):
raise ValueError("Mode must be set to either 'soft' or 'hard'.")
self._center_x = center_x
self._center_y = center_y
self._radius = radius
self._attenuation = Light.HARD if mode == 'hard' else Light.SOFT
self._color = color
self._light_layer: Optional[LightLayer] = None
@property
def position(self) -> Tuple[float, float]:
"""Get or set the light position"""
return self._center_x, self._center_y
@position.setter
def position(self, value):
if self._light_layer:
self._light_layer._rebuild = True
self._center_x, self._center_y = value
@property
def radius(self) -> float:
"""Get or set the light size"""
return self._radius
@radius.setter
def radius(self, value):
if self._light_layer:
self._light_layer._rebuild = True
self._radius = value
class LightLayer(RenderTargetTexture):
def __init__(self, width: int, height: int):
"""Create a LightLayer
The size of a layer should ideally be of the same size and the screen.
:param Tuple[int, int] size: Width and height of light layer
"""
super().__init__(width, height)
self._lights: List[Light] = []
self._prev_target = None
self._rebuild = False
self._stride = 28
self._buffer = self.ctx.buffer(reserve=self._stride * 100)
self._vao = self.ctx.geometry([
gl.BufferDescription(
self._buffer,
'2f 1f 1f 3f',
['in_vert', 'in_radius', 'in_attenuation', 'in_color'],
normalized=['in_color'],
),
])
self._light_program = self.ctx.load_program(
vertex_shader=":resources:shaders/lights/point_lights_vs.glsl",
geometry_shader=":resources:shaders/lights/point_lights_geo.glsl",
fragment_shader=":resources:shaders/lights/point_lights_fs.glsl",
)
self._combine_program = self.ctx.load_program(
vertex_shader=":resources:shaders/lights/combine_vs.glsl",
fragment_shader=":resources:shaders/lights/combine_fs.glsl",
)
# NOTE: Diffuse buffer created in parent
self._light_buffer = self.ctx.framebuffer(color_attachments=self.ctx.texture((width, height), components=3))
@property
def diffuse_texture(self):
return self.texture
@property
def light_texture(self):
return self._light_buffer.color_attachments[0]
def resize(self, width, height):
super().resize(width, height)
self._light_buffer = self.ctx.framebuffer(color_attachments=self.ctx.texture((width, height), components=3))
def clear(self):
super().clear()
self._light_buffer.clear()
def add(self, light: Light):
"""Add a Light to the layer"""
self._lights.append(light)
light._light_layer = self
self._rebuild = True
def extend(self, lights: Sequence[Light]):
for light in lights:
self.add(light)
def remove(self, light: Light):
"""Remove a light to the layer"""
self._lights.remove(light)
light._light_layer = None
self._rebuild = True
def __len__(self) -> int:
"""Number of lights"""
return len(self._lights)
def __iter__(self) -> Iterable[Light]:
"""Return an iterable object of lights"""
return iter(self._lights)
def __getitem__(self, i) -> Light:
return self._lights[i]
def __enter__(self):
self._prev_target = self.ctx.active_framebuffer
self._fbo.use()
self._fbo.clear(self._background_color)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._prev_target.use()
def draw(self, position: Tuple[float, float] = (0, 0), target=None, ambient_color: Color = (64, 64, 64)):
"""Draw the lights
:param Tuple[float, float] position: Position offset (scrolling)
:param target: The window or framebuffer we want to render to (default is window)
:param Color ambient_color: The ambient light color
"""
if target is None:
target = self.window
# Re-build light data if needed
if self._rebuild and len(self._lights) > 0:
data: List[float] = []
for light in self._lights:
data.extend(light.position)
data.append(light.radius)
data.append(light._attenuation)
data.extend(light._color)
while self._buffer.size < len(data) * self._stride:
self._buffer.orphan(double=True)
self._buffer.write(data=array('f', data))
self._rebuild = False
# Render to light buffer
self._light_buffer.use()
self._light_buffer.clear()
if len(self._lights) > 0:
self._light_program['position'] = position
self.ctx.enable(self.ctx.BLEND)
self.ctx.blend_func = self.ctx.BLEND_ADDITIVE
self._vao.render(self._light_program, mode=self.ctx.POINTS, vertices=len(self._lights))
self.ctx.blend_func = self.ctx.BLEND_DEFAULT
# Combine pass
target.use()
self._combine_program['diffuse_buffer'] = 0
self._combine_program['light_buffer'] = 1
self._combine_program['ambient'] = ambient_color[:3]
self._fbo.color_attachments[0].use(0)
self._light_buffer.color_attachments[0].use(1)
self._quad_fs.render(self._combine_program)
| 34.770833 | 118 | 0.617436 | from array import array
from typing import Iterable, Tuple, Sequence, List, Optional
from arcade import Color
from arcade import gl
from arcade.experimental.texture_render_target import RenderTargetTexture
class Light:
HARD = 1.0
SOFT = 0.0
def __init__(self, center_x: float, center_y: float,
radius: float = 50.0, color: Tuple[int, int, int] = (255, 255, 255),
mode: str = 'hard'):
if not (isinstance(color, tuple) or isinstance(color, list)):
raise ValueError("Color must be a 3-4 element Tuple or List with red-green-blue and optionally an alpha.")
if not isinstance(mode, str) or not (mode == 'soft' or mode == 'hard'):
raise ValueError("Mode must be set to either 'soft' or 'hard'.")
self._center_x = center_x
self._center_y = center_y
self._radius = radius
self._attenuation = Light.HARD if mode == 'hard' else Light.SOFT
self._color = color
self._light_layer: Optional[LightLayer] = None
@property
def position(self) -> Tuple[float, float]:
return self._center_x, self._center_y
@position.setter
def position(self, value):
if self._light_layer:
self._light_layer._rebuild = True
self._center_x, self._center_y = value
@property
def radius(self) -> float:
return self._radius
@radius.setter
def radius(self, value):
if self._light_layer:
self._light_layer._rebuild = True
self._radius = value
class LightLayer(RenderTargetTexture):
def __init__(self, width: int, height: int):
super().__init__(width, height)
self._lights: List[Light] = []
self._prev_target = None
self._rebuild = False
self._stride = 28
self._buffer = self.ctx.buffer(reserve=self._stride * 100)
self._vao = self.ctx.geometry([
gl.BufferDescription(
self._buffer,
'2f 1f 1f 3f',
['in_vert', 'in_radius', 'in_attenuation', 'in_color'],
normalized=['in_color'],
),
])
self._light_program = self.ctx.load_program(
vertex_shader=":resources:shaders/lights/point_lights_vs.glsl",
geometry_shader=":resources:shaders/lights/point_lights_geo.glsl",
fragment_shader=":resources:shaders/lights/point_lights_fs.glsl",
)
self._combine_program = self.ctx.load_program(
vertex_shader=":resources:shaders/lights/combine_vs.glsl",
fragment_shader=":resources:shaders/lights/combine_fs.glsl",
)
self._light_buffer = self.ctx.framebuffer(color_attachments=self.ctx.texture((width, height), components=3))
@property
def diffuse_texture(self):
return self.texture
@property
def light_texture(self):
return self._light_buffer.color_attachments[0]
def resize(self, width, height):
super().resize(width, height)
self._light_buffer = self.ctx.framebuffer(color_attachments=self.ctx.texture((width, height), components=3))
def clear(self):
super().clear()
self._light_buffer.clear()
def add(self, light: Light):
self._lights.append(light)
light._light_layer = self
self._rebuild = True
def extend(self, lights: Sequence[Light]):
for light in lights:
self.add(light)
def remove(self, light: Light):
self._lights.remove(light)
light._light_layer = None
self._rebuild = True
def __len__(self) -> int:
return len(self._lights)
def __iter__(self) -> Iterable[Light]:
return iter(self._lights)
def __getitem__(self, i) -> Light:
return self._lights[i]
def __enter__(self):
self._prev_target = self.ctx.active_framebuffer
self._fbo.use()
self._fbo.clear(self._background_color)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._prev_target.use()
def draw(self, position: Tuple[float, float] = (0, 0), target=None, ambient_color: Color = (64, 64, 64)):
if target is None:
target = self.window
if self._rebuild and len(self._lights) > 0:
data: List[float] = []
for light in self._lights:
data.extend(light.position)
data.append(light.radius)
data.append(light._attenuation)
data.extend(light._color)
while self._buffer.size < len(data) * self._stride:
self._buffer.orphan(double=True)
self._buffer.write(data=array('f', data))
self._rebuild = False
self._light_buffer.use()
self._light_buffer.clear()
if len(self._lights) > 0:
self._light_program['position'] = position
self.ctx.enable(self.ctx.BLEND)
self.ctx.blend_func = self.ctx.BLEND_ADDITIVE
self._vao.render(self._light_program, mode=self.ctx.POINTS, vertices=len(self._lights))
self.ctx.blend_func = self.ctx.BLEND_DEFAULT
target.use()
self._combine_program['diffuse_buffer'] = 0
self._combine_program['light_buffer'] = 1
self._combine_program['ambient'] = ambient_color[:3]
self._fbo.color_attachments[0].use(0)
self._light_buffer.color_attachments[0].use(1)
self._quad_fs.render(self._combine_program)
| true | true |
f7f53509cf22349c11fc7794e4e12162c1415979 | 1,322 | py | Python | aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/DescribeDomainCustomLogConfigRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/DescribeDomainCustomLogConfigRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/DescribeDomainCustomLogConfigRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeDomainCustomLogConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'DescribeDomainCustomLogConfig')
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | 36.722222 | 82 | 0.769289 |
from aliyunsdkcore.request import RpcRequest
class DescribeDomainCustomLogConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'DescribeDomainCustomLogConfig')
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | true | true |
f7f53522f0e0ed669affbf35242cc86852027221 | 1,593 | py | Python | tests/test_callback.py | unfoldingWord-dev/door43-job-handler | d26424810706875dd6eef33288900ea486de2e03 | [
"MIT"
] | null | null | null | tests/test_callback.py | unfoldingWord-dev/door43-job-handler | d26424810706875dd6eef33288900ea486de2e03 | [
"MIT"
] | 66 | 2018-10-16T19:14:36.000Z | 2020-10-23T03:17:48.000Z | tests/test_callback.py | unfoldingWord-dev/door43-job-handler | d26424810706875dd6eef33288900ea486de2e03 | [
"MIT"
] | null | null | null | from unittest import TestCase, skip
from unittest.mock import Mock, patch
import json
import sqlalchemy
from rq import get_current_job
from rq_settings import prefix, callback_queue_name
from app_settings.app_settings import AppSettings
from callback import job
def my_get_current_job():
class Result:
id = 12345
origin = callback_queue_name
return Result()
class TestCallback(TestCase):
def setUp(self):
# Make sure that other tests didn't mess up our prefix
AppSettings(prefix=prefix)
def test_prefix(self):
self.assertEqual(prefix, AppSettings.prefix)
@skip("Not currently working")
@patch('callback.get_current_job', side_effect=my_get_current_job)
def test_bad_payload(self, mocked_get_current_job_function):
test_payload = {'something': 'anything',}
with self.assertRaises(KeyError):
job(test_payload)
@skip("Skip this test on Travis-CI coz it fails with AWS test credentials - leave for standalone testing")
@patch('callback.get_current_job', side_effect=my_get_current_job)
def test_typical_full_payload(self, mocked_get_current_job_function):
with open( 'tests/resources/webhook_post.json', 'rt' ) as json_file:
payload_json = json.load(json_file)
#with self.assertRaises(sqlalchemy.exc.OperationalError): # access denied to tx_db -- why did this stop happening???
#job(payload_json)
job(payload_json)
# After job has run, should update https://dev.door43.org/u/tx-manager-test-data/en-obs-rc-0.2/93829a566c/
| 34.630435 | 124 | 0.721908 | from unittest import TestCase, skip
from unittest.mock import Mock, patch
import json
import sqlalchemy
from rq import get_current_job
from rq_settings import prefix, callback_queue_name
from app_settings.app_settings import AppSettings
from callback import job
def my_get_current_job():
class Result:
id = 12345
origin = callback_queue_name
return Result()
class TestCallback(TestCase):
def setUp(self):
AppSettings(prefix=prefix)
def test_prefix(self):
self.assertEqual(prefix, AppSettings.prefix)
@skip("Not currently working")
@patch('callback.get_current_job', side_effect=my_get_current_job)
def test_bad_payload(self, mocked_get_current_job_function):
test_payload = {'something': 'anything',}
with self.assertRaises(KeyError):
job(test_payload)
@skip("Skip this test on Travis-CI coz it fails with AWS test credentials - leave for standalone testing")
@patch('callback.get_current_job', side_effect=my_get_current_job)
def test_typical_full_payload(self, mocked_get_current_job_function):
with open( 'tests/resources/webhook_post.json', 'rt' ) as json_file:
payload_json = json.load(json_file)
#with self.assertRaises(sqlalchemy.exc.OperationalError): # access denied to tx_db -- why did this stop happening???
#job(payload_json)
job(payload_json)
# After job has run, should update https://dev.door43.org/u/tx-manager-test-data/en-obs-rc-0.2/93829a566c/
| true | true |
f7f535b0326923fd5b6dca53b71446bb5f5c50b4 | 4,169 | py | Python | blueoil/datasets/pascalvoc_2007_2012.py | ruimashita/blueoil | e65d64dc0604193e2658d8e0cd6ece09260b806e | [
"Apache-2.0"
] | null | null | null | blueoil/datasets/pascalvoc_2007_2012.py | ruimashita/blueoil | e65d64dc0604193e2658d8e0cd6ece09260b806e | [
"Apache-2.0"
] | null | null | null | blueoil/datasets/pascalvoc_2007_2012.py | ruimashita/blueoil | e65d64dc0604193e2658d8e0cd6ece09260b806e | [
"Apache-2.0"
] | 1 | 2018-12-21T05:21:04.000Z | 2018-12-21T05:21:04.000Z | # -*- coding: utf-8 -*-
# Copyright 2018 The Blueoil Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
import functools
import numpy as np
from blueoil.utils.image import load_image
from blueoil.datasets.base import ObjectDetectionBase
from blueoil.datasets.pascalvoc_2007 import Pascalvoc2007
from blueoil.datasets.pascalvoc_2012 import Pascalvoc2012
class Pascalvoc20072012(ObjectDetectionBase):
classes = default_classes = [
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
num_classes = len(classes)
available_subsets = ["train", "validation", "test"]
extend_dir = None
@classmethod
@functools.lru_cache(maxsize=None)
def count_max_boxes(cls, skip_difficult=True):
"""Count max boxes size over all subsets."""
num_max_boxes = 0
for subset in cls.available_subsets:
obj = cls(subset=subset, is_shuffle=False, skip_difficult=skip_difficult)
gt_boxes_list = obj.annotations
subset_max = max([len(gt_boxes) for gt_boxes in gt_boxes_list])
if subset_max >= num_max_boxes:
num_max_boxes = subset_max
return num_max_boxes
def __init__(
self,
subset="train",
is_standardize=True,
is_shuffle=True,
skip_difficult=True,
*args,
**kwargs
):
super().__init__(
subset=subset,
*args,
**kwargs,
)
self.is_standardize = is_standardize
self.is_shuffle = is_shuffle
self.skip_difficult = skip_difficult
self._init_files_and_annotations(*args, **kwargs)
def _init_files_and_annotations(self, *args, **kwargs):
"""Create files and annotations."""
if self.subset == "train":
subset = "train_validation"
elif self.subset == "validation" or self.subset == "test":
subset = "test"
if subset == "train_validation":
pascalvoc_2007 = Pascalvoc2007(subset=subset, skip_difficult=self.skip_difficult, *args, **kwargs)
pascalvoc_2012 = Pascalvoc2012(subset=subset, skip_difficult=self.skip_difficult, *args, **kwargs)
self.files = pascalvoc_2007.files + pascalvoc_2012.files
self.annotations = pascalvoc_2007.annotations + pascalvoc_2012.annotations
elif subset == "test":
pascalvoc_2007 = Pascalvoc2007(subset=subset, skip_difficult=self.skip_difficult, *args, **kwargs)
self.files = pascalvoc_2007.files
self.annotations = pascalvoc_2007.annotations
@property
def num_max_boxes(self):
# calculate by cls.count_max_boxes(self.skip_difficult)
if self.skip_difficult:
return 39
else:
return 56
@property
def num_per_epoch(self):
return len(self.files)
def __getitem__(self, i):
target_file = self.files[i]
image = load_image(target_file)
gt_boxes = self.annotations[i]
gt_boxes = np.array(gt_boxes)
gt_boxes = gt_boxes.copy() # is it really needed?
gt_boxes = self._fill_dummy_boxes(gt_boxes)
return (image, gt_boxes)
def __len__(self):
return self.num_per_epoch
| 31.583333 | 110 | 0.617174 |
import functools
import numpy as np
from blueoil.utils.image import load_image
from blueoil.datasets.base import ObjectDetectionBase
from blueoil.datasets.pascalvoc_2007 import Pascalvoc2007
from blueoil.datasets.pascalvoc_2012 import Pascalvoc2012
class Pascalvoc20072012(ObjectDetectionBase):
classes = default_classes = [
"aeroplane",
"bicycle",
"bird",
"boat",
"bottle",
"bus",
"car",
"cat",
"chair",
"cow",
"diningtable",
"dog",
"horse",
"motorbike",
"person",
"pottedplant",
"sheep",
"sofa",
"train",
"tvmonitor",
]
num_classes = len(classes)
available_subsets = ["train", "validation", "test"]
extend_dir = None
@classmethod
@functools.lru_cache(maxsize=None)
def count_max_boxes(cls, skip_difficult=True):
num_max_boxes = 0
for subset in cls.available_subsets:
obj = cls(subset=subset, is_shuffle=False, skip_difficult=skip_difficult)
gt_boxes_list = obj.annotations
subset_max = max([len(gt_boxes) for gt_boxes in gt_boxes_list])
if subset_max >= num_max_boxes:
num_max_boxes = subset_max
return num_max_boxes
def __init__(
self,
subset="train",
is_standardize=True,
is_shuffle=True,
skip_difficult=True,
*args,
**kwargs
):
super().__init__(
subset=subset,
*args,
**kwargs,
)
self.is_standardize = is_standardize
self.is_shuffle = is_shuffle
self.skip_difficult = skip_difficult
self._init_files_and_annotations(*args, **kwargs)
def _init_files_and_annotations(self, *args, **kwargs):
if self.subset == "train":
subset = "train_validation"
elif self.subset == "validation" or self.subset == "test":
subset = "test"
if subset == "train_validation":
pascalvoc_2007 = Pascalvoc2007(subset=subset, skip_difficult=self.skip_difficult, *args, **kwargs)
pascalvoc_2012 = Pascalvoc2012(subset=subset, skip_difficult=self.skip_difficult, *args, **kwargs)
self.files = pascalvoc_2007.files + pascalvoc_2012.files
self.annotations = pascalvoc_2007.annotations + pascalvoc_2012.annotations
elif subset == "test":
pascalvoc_2007 = Pascalvoc2007(subset=subset, skip_difficult=self.skip_difficult, *args, **kwargs)
self.files = pascalvoc_2007.files
self.annotations = pascalvoc_2007.annotations
@property
def num_max_boxes(self):
if self.skip_difficult:
return 39
else:
return 56
@property
def num_per_epoch(self):
return len(self.files)
def __getitem__(self, i):
target_file = self.files[i]
image = load_image(target_file)
gt_boxes = self.annotations[i]
gt_boxes = np.array(gt_boxes)
gt_boxes = gt_boxes.copy()
gt_boxes = self._fill_dummy_boxes(gt_boxes)
return (image, gt_boxes)
def __len__(self):
return self.num_per_epoch
| true | true |
f7f535b2aec21c2cb25b2cddbe7a0b802c586313 | 9,644 | py | Python | Python/Client/remote_click_control.py | henrymidles/LidarBot | f67b5ed77671abad7267a86f425192fc6d5aad42 | [
"MIT"
] | null | null | null | Python/Client/remote_click_control.py | henrymidles/LidarBot | f67b5ed77671abad7267a86f425192fc6d5aad42 | [
"MIT"
] | null | null | null | Python/Client/remote_click_control.py | henrymidles/LidarBot | f67b5ed77671abad7267a86f425192fc6d5aad42 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import socket
import time
import math
import threading
import numpy as np
from util import point_direction, point_distance
from queue import Queue
#from PythonRobotics.SLAM.ICP.iterative_closest_point import icp_matching
from RasPi_coms import RasPi_coms
from Turtle_UI import UI
import sys
HOST = 'raspberrypi' # The server's hostname or IP address
PORT = 65432 # The port used by the server
""" """
class LidarBot():
def __init__(self):
self.button_queue = Queue(32)
self.running = True
self.moving = False
self.path_points = [[0,0], [0,0]]
self.scan_points = [] #np.zeros(shape=(2,500))
self.travel_points = [[0,0]]
self.bot_actions = Queue()
self.last_scan_points = np.zeros(shape=(2,500))
self.mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.mysocket.connect((HOST, PORT))
#self.exe_thread = threading.Thread(target=self.thread_execute_actions, args=())
#self.exe_thread.start()
self.t_ui = UI(self.get_click_point)
self.Raspi = RasPi_coms(self.mysocket)
#self.lidar_thread = threading.Thread(target=self.Raspi.run, args=())
""" Runs when the screen is clicked, adds click position to a queue """
def get_click_point(self, x, y):
# check if click is in a button
for key in self.t_ui.buttons:
if point_distance(self.t_ui.buttons[key]['pos'], [x,y]) < self.t_ui.buttons[key]['size']:
self.button_queue.put(key)
return
# otherwise add it to path
self.path_points[1] = [x,y]
""" Run the ICP algorithm to calculate new position, this doesn't work quite right, yet """
def ICP_tracking(self, curpos):
newsize = self.scan_points.shape[1]
lastsize = self.last_scan_points.shape[1]
if newsize > lastsize:
pass
#rot, trans = icp_matching(self.last_scan_points, self.scan_points[:lastsize, :lastsize])
elif newsize < lastsize:
pass
#rot, trans = icp_matching(self.last_scan_points[:newsize, :newsize], self.scan_points)
else:
pass
#rot, trans = icp_matching(self.last_scan_points, self.scan_points)
#print(time.time() - startTime)
if startPos == True:
curpos[0] += trans[0]
curpos[1] += trans[1]
self.travel_points.append([curpos[0], curpos[1]])
else:
curpos[0] = trans[0]
curpos[1] = trans[1]
startPos = True
#print(f"{pos[0]} , {pos[1]}")
return curpos
""" Check for new lidar data, this data is put in a queue """
def check_lidar_queue(self):
if not self.Raspi.queue.empty():
self.scan_points.clear()
while not self.Raspi.queue.empty():
self.scan_points.append(self.Raspi.queue.get())
# self.last_scan_points = self.scan_points.copy()
# self.scan_points = np.zeros(shape=(2, self.Lidar.queue.qsize()))
# idx = 0
# while not self.Lidar.queue.empty():
# p = self.Lidar.queue.get()
# self.scan_points[0][idx] = p[0]
# self.scan_points[1][idx] = p[1]
# idx += 1
""" """
def thread_execute_actions(self):
while self.running:
if not self.bot_actions.empty():
self.moving = True
act = self.bot_actions.get()
# First, turn to correct direction
self.mysocket.sendall(bytes(act['turn_msg'], 'UTF-8'))
timer = round(act['turn_time'], 1)
percentLeft = 1.0
while timer >= 0:
print(f"{round(timer, 1)}", end='\r')
curDir = percentLeft * act['turn']
curx = act['dist'] * math.sin(math.radians(curDir))
cury = act['dist'] * math.cos(math.radians(curDir))
self.path_points[1] = [curx, cury]
time.sleep(0.1)
timer -= 0.1
percentLeft = timer / act['turn_time']
time.sleep(0.5)
# Then, move correct distance
self.mysocket.sendall(bytes(act['dist_msg'], 'UTF-8'))
timer = round(act['dist_time'], 1)
percentLeft = 1.0
while timer >= 0:
print(f"{round(timer, 1)}", end='\r')
curDist = percentLeft * act['dist']
self.path_points[1] = [0, curDist]
time.sleep(0.1)
timer -= 0.1
percentLeft = timer / act['dist_time']
print("")
self.moving = False
self.mysocket.sendall(bytes('Done\n', 'UTF-8'))
#time.sleep(0.5)
""" """
def estimate_action_time(self, distance=None, heading=None):
if distance != None:
steps = abs(distance) * 97 # 97 steps / cm
elif heading != None:
steps = abs(heading) *17 # 17 steps / degree
else:
return 0
if steps > 3570:
acceltime = 1.43
constVtime = (steps - 3570) / 5000
else:
acceltime = math.sqrt(steps/7000)
constVtime = 0.2 # The math is a little off for short movements, so add a buffer
return acceltime + constVtime
""" Check what buttons were pressed, and executes the appropriate function"""
def check_button_queue(self):
while not self.button_queue.empty():
key = self.button_queue.get()
if key == 'Go' and self.moving == False:
dist = round(point_distance(self.path_points[0], self.path_points[1]))
dir = round(math.degrees(point_direction(self.path_points[1]))) - 90
print(f"X: {self.path_points[1][0]} , Y: {self.path_points[1][1]} , Distance: {round(dist, 1)} , Direction: {round(dir, 1)}")
if dir > 180:
dir -= 360
dir = -dir
action = {
"turn": dir,
"dist": dist,
"turn_msg": f"TRN {dir}\n",
"dist_msg": f"MOV {dist}\n",
"turn_time": self.estimate_action_time(heading=dir),
"dist_time": self.estimate_action_time(distance=dist)
}
self.bot_actions.put(action)
elif key == 'Stop':
print("STOP")
msg = f"STP0\n"
self.mysocket.sendall(bytes(msg, 'UTF-8'))
""" Main loop, run continiously """
def main(self):
#self.lidar_thread.start()
lastDataTime = 0
while self.running:
#self.check_lidar_queue()
newPoints = self.Raspi.sync_run()
if newPoints != None:
self.scan_points = newPoints
print(f"{round(time.time() - lastDataTime, 2)}", end='\r')
lastDataTime = time.time()
#self.check_button_queue()
self.t_ui.clear()
self.t_ui.draw_bot()
self.t_ui.draw_buttons()
if self.moving == False: self.t_ui.draw_basic_points(self.path_points, color='red')
else: self.t_ui.draw_basic_points(self.path_points, color='green')
self.t_ui.draw_basic_points(self.scan_points, color='black')
self.t_ui.update()
"""" Stop the program """
def stop(self):
self.running = False
self.Raspi.running = False
#self.lidar_thread.join()
print("Closing")
time.sleep(1)
self.mysocket.shutdown(0)
time.sleep(1)
self.mysocket.close()
if __name__ == "__main__":
Bot = LidarBot()
try:
print("Starting")
Bot.main()
except KeyboardInterrupt:
Bot.stop()
# con.update()
# msg = "[,"
# for a in range(len(con.axis)):
# msg += f"{round(con.axis[a]*255)},"
# msg += ']'
# s.sendall(bytes(msg, 'UTF-8'))
# data = s.recv(8192).decode('UTF-8')
# if data[0] != 'L':
# print("No Start Byte")
# #print(data)
# continue
# points = data.split(';')
# del points[0] # first item contains start byte, so remove it
# turtle.penup()
# turtle.clear()
# wn.tracer(0) # This turns off screen updates
# turtle.color('red')
# for idx, point in enumerate(path):
# turtle.setpos(point)
# turtle.pendown()
# turtle.dot()
# turtle.penup()
# turtle.color('black')
# startTime = time.time()
# try:
# for idx, point in enumerate(points):
# a, r = point.split(',')
# a = float(a)/57.3
# r = float(r)
# x = -r*math.cos(a)
# y = r*math.sin(a)
# turtle.setpos(x,y)
# turtle.pendown()
# turtle.dot()
# except ValueError as e:
# print(f"Error at :{idx}/{len(points)}, {point}", )
# wn.update()
# print(f"\r{time.time() - startTime}", end='') | 35.19708 | 141 | 0.507466 |
import socket
import time
import math
import threading
import numpy as np
from util import point_direction, point_distance
from queue import Queue
from RasPi_coms import RasPi_coms
from Turtle_UI import UI
import sys
HOST = 'raspberrypi'
PORT = 65432 # The port used by the server
class LidarBot():
def __init__(self):
self.button_queue = Queue(32)
self.running = True
self.moving = False
self.path_points = [[0,0], [0,0]]
self.scan_points = [] #np.zeros(shape=(2,500))
self.travel_points = [[0,0]]
self.bot_actions = Queue()
self.last_scan_points = np.zeros(shape=(2,500))
self.mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.mysocket.connect((HOST, PORT))
#self.exe_thread = threading.Thread(target=self.thread_execute_actions, args=())
#self.exe_thread.start()
self.t_ui = UI(self.get_click_point)
self.Raspi = RasPi_coms(self.mysocket)
#self.lidar_thread = threading.Thread(target=self.Raspi.run, args=())
def get_click_point(self, x, y):
# check if click is in a button
for key in self.t_ui.buttons:
if point_distance(self.t_ui.buttons[key]['pos'], [x,y]) < self.t_ui.buttons[key]['size']:
self.button_queue.put(key)
return
# otherwise add it to path
self.path_points[1] = [x,y]
def ICP_tracking(self, curpos):
newsize = self.scan_points.shape[1]
lastsize = self.last_scan_points.shape[1]
if newsize > lastsize:
pass
#rot, trans = icp_matching(self.last_scan_points, self.scan_points[:lastsize, :lastsize])
elif newsize < lastsize:
pass
#rot, trans = icp_matching(self.last_scan_points[:newsize, :newsize], self.scan_points)
else:
pass
#rot, trans = icp_matching(self.last_scan_points, self.scan_points)
#print(time.time() - startTime)
if startPos == True:
curpos[0] += trans[0]
curpos[1] += trans[1]
self.travel_points.append([curpos[0], curpos[1]])
else:
curpos[0] = trans[0]
curpos[1] = trans[1]
startPos = True
#print(f"{pos[0]} , {pos[1]}")
return curpos
def check_lidar_queue(self):
if not self.Raspi.queue.empty():
self.scan_points.clear()
while not self.Raspi.queue.empty():
self.scan_points.append(self.Raspi.queue.get())
# self.last_scan_points = self.scan_points.copy()
# self.scan_points = np.zeros(shape=(2, self.Lidar.queue.qsize()))
# idx = 0
# while not self.Lidar.queue.empty():
# p = self.Lidar.queue.get()
# self.scan_points[0][idx] = p[0]
# self.scan_points[1][idx] = p[1]
# idx += 1
def thread_execute_actions(self):
while self.running:
if not self.bot_actions.empty():
self.moving = True
act = self.bot_actions.get()
# First, turn to correct direction
self.mysocket.sendall(bytes(act['turn_msg'], 'UTF-8'))
timer = round(act['turn_time'], 1)
percentLeft = 1.0
while timer >= 0:
print(f"{round(timer, 1)}", end='\r')
curDir = percentLeft * act['turn']
curx = act['dist'] * math.sin(math.radians(curDir))
cury = act['dist'] * math.cos(math.radians(curDir))
self.path_points[1] = [curx, cury]
time.sleep(0.1)
timer -= 0.1
percentLeft = timer / act['turn_time']
time.sleep(0.5)
# Then, move correct distance
self.mysocket.sendall(bytes(act['dist_msg'], 'UTF-8'))
timer = round(act['dist_time'], 1)
percentLeft = 1.0
while timer >= 0:
print(f"{round(timer, 1)}", end='\r')
curDist = percentLeft * act['dist']
self.path_points[1] = [0, curDist]
time.sleep(0.1)
timer -= 0.1
percentLeft = timer / act['dist_time']
print("")
self.moving = False
self.mysocket.sendall(bytes('Done\n', 'UTF-8'))
#time.sleep(0.5)
def estimate_action_time(self, distance=None, heading=None):
if distance != None:
steps = abs(distance) * 97 # 97 steps / cm
elif heading != None:
steps = abs(heading) *17 # 17 steps / degree
else:
return 0
if steps > 3570:
acceltime = 1.43
constVtime = (steps - 3570) / 5000
else:
acceltime = math.sqrt(steps/7000)
constVtime = 0.2 # The math is a little off for short movements, so add a buffer
return acceltime + constVtime
def check_button_queue(self):
while not self.button_queue.empty():
key = self.button_queue.get()
if key == 'Go' and self.moving == False:
dist = round(point_distance(self.path_points[0], self.path_points[1]))
dir = round(math.degrees(point_direction(self.path_points[1]))) - 90
print(f"X: {self.path_points[1][0]} , Y: {self.path_points[1][1]} , Distance: {round(dist, 1)} , Direction: {round(dir, 1)}")
if dir > 180:
dir -= 360
dir = -dir
action = {
"turn": dir,
"dist": dist,
"turn_msg": f"TRN {dir}\n",
"dist_msg": f"MOV {dist}\n",
"turn_time": self.estimate_action_time(heading=dir),
"dist_time": self.estimate_action_time(distance=dist)
}
self.bot_actions.put(action)
elif key == 'Stop':
print("STOP")
msg = f"STP0\n"
self.mysocket.sendall(bytes(msg, 'UTF-8'))
def main(self):
#self.lidar_thread.start()
lastDataTime = 0
while self.running:
#self.check_lidar_queue()
newPoints = self.Raspi.sync_run()
if newPoints != None:
self.scan_points = newPoints
print(f"{round(time.time() - lastDataTime, 2)}", end='\r')
lastDataTime = time.time()
#self.check_button_queue()
self.t_ui.clear()
self.t_ui.draw_bot()
self.t_ui.draw_buttons()
if self.moving == False: self.t_ui.draw_basic_points(self.path_points, color='red')
else: self.t_ui.draw_basic_points(self.path_points, color='green')
self.t_ui.draw_basic_points(self.scan_points, color='black')
self.t_ui.update()
def stop(self):
self.running = False
self.Raspi.running = False
#self.lidar_thread.join()
print("Closing")
time.sleep(1)
self.mysocket.shutdown(0)
time.sleep(1)
self.mysocket.close()
if __name__ == "__main__":
Bot = LidarBot()
try:
print("Starting")
Bot.main()
except KeyboardInterrupt:
Bot.stop()
# con.update()
# msg = "[,"
# for a in range(len(con.axis)):
# msg += f"{round(con.axis[a]*255)},"
# msg += ']'
# s.sendall(bytes(msg, 'UTF-8'))
# data = s.recv(8192).decode('UTF-8')
# if data[0] != 'L':
# print("No Start Byte")
# #print(data)
# continue
# points = data.split(';')
# del points[0] # first item contains start byte, so remove it
# turtle.penup()
# turtle.clear()
# wn.tracer(0) # This turns off screen updates
# turtle.color('red')
# for idx, point in enumerate(path):
# turtle.setpos(point)
# turtle.pendown()
# turtle.dot()
# turtle.penup()
# turtle.color('black')
# startTime = time.time()
# try:
# for idx, point in enumerate(points):
# a, r = point.split(',')
# a = float(a)/57.3
# r = float(r)
# x = -r*math.cos(a)
# y = r*math.sin(a)
# turtle.setpos(x,y)
# turtle.pendown()
# turtle.dot()
# except ValueError as e:
# print(f"Error at :{idx}/{len(points)}, {point}", )
# wn.update()
# print(f"\r{time.time() - startTime}", end='') | true | true |
f7f535db764119819379354f631faeed54108bd0 | 906 | py | Python | resize.py | PainYo170/CS7323-CVproject | 284dbd0facd569be6b35e027d87446545ea1f491 | [
"Apache-2.0"
] | null | null | null | resize.py | PainYo170/CS7323-CVproject | 284dbd0facd569be6b35e027d87446545ea1f491 | [
"Apache-2.0"
] | null | null | null | resize.py | PainYo170/CS7323-CVproject | 284dbd0facd569be6b35e027d87446545ea1f491 | [
"Apache-2.0"
] | null | null | null | from PIL import Image
import os, sys
# path = "/home/lixm/Desktop/srgan/data2017/DIV2K_train_LR_bicubic_X4/DIV2K_train_LR_bicubic/X4/"
path = '/home/lixm/Desktop/ESRGAN/LR/'
dirs = os.listdir( path )
final_size = 128;
def resize_aspect_fit():
for item in dirs:
if item == '.DS_Store':
continue
if os.path.isfile(path+item):
im = Image.open(path+item)
f, e = os.path.splitext(path+item)
size = im.size
ratio = float(final_size) / max(size)
new_image_size = tuple([int(x*ratio) for x in size])
im = im.resize(new_image_size, Image.ANTIALIAS)
new_im = Image.new("RGB", (final_size, final_size))
new_im.paste(im, ((final_size-new_image_size[0])//2, (final_size-new_image_size[1])//2))
new_im.save(f + 'resized.png', 'PNG', quality=90)
resize_aspect_fit()
| 36.24 | 101 | 0.609272 | from PIL import Image
import os, sys
path = '/home/lixm/Desktop/ESRGAN/LR/'
dirs = os.listdir( path )
final_size = 128;
def resize_aspect_fit():
for item in dirs:
if item == '.DS_Store':
continue
if os.path.isfile(path+item):
im = Image.open(path+item)
f, e = os.path.splitext(path+item)
size = im.size
ratio = float(final_size) / max(size)
new_image_size = tuple([int(x*ratio) for x in size])
im = im.resize(new_image_size, Image.ANTIALIAS)
new_im = Image.new("RGB", (final_size, final_size))
new_im.paste(im, ((final_size-new_image_size[0])//2, (final_size-new_image_size[1])//2))
new_im.save(f + 'resized.png', 'PNG', quality=90)
resize_aspect_fit()
| true | true |
f7f53621237bbe4139cef15e1a9cc4264af8807a | 1,235 | py | Python | __init__.py | lsx137946009/wearableio | 4fd137ace0c0203b4c97dc39e4e4a08c7afc08d5 | [
"MIT"
] | null | null | null | __init__.py | lsx137946009/wearableio | 4fd137ace0c0203b4c97dc39e4e4a08c7afc08d5 | [
"MIT"
] | null | null | null | __init__.py | lsx137946009/wearableio | 4fd137ace0c0203b4c97dc39e4e4a08c7afc08d5 | [
"MIT"
] | null | null | null | #
__docformat__ = 'resreucturedtext'
hard_dependencies = ('numpy', 'pandas')
missing_dependencies = []
for dependency in hard_dependencies:
try:
__import__(dependency)
except ImportError as e:
missing_dependencies.append(dependency)
if missing_dependencies:
raise ImportError(
"Missing required dependencies {0}".format(missing_dependencies))
del hard_dependencies, dependency, missing_dependencies
from datetime import datetime
# TODO: add import
from wearableio.utils import (join_integer_decimal,
join_byteblocks,
join_complementary_byteblocks)
from wearableio.field import BaseField
from wearableio.frame import BaseFrame
from wearableio.sensomics.io import (read_sens_line,
read_sens_stream,
read_sens_text,
write_json)
#
from ._version import get_versions
v = get_versions()
__version__ = v.get('closest-tag', v['version'])
__git_version__ = v.get('full-revisionid')
del get_versions, v
# TODO: add modele level doc-string
__doc__ = """
sixing liu, jianqiang gong
""" | 28.068182 | 74 | 0.641296 |
__docformat__ = 'resreucturedtext'
hard_dependencies = ('numpy', 'pandas')
missing_dependencies = []
for dependency in hard_dependencies:
try:
__import__(dependency)
except ImportError as e:
missing_dependencies.append(dependency)
if missing_dependencies:
raise ImportError(
"Missing required dependencies {0}".format(missing_dependencies))
del hard_dependencies, dependency, missing_dependencies
from datetime import datetime
from wearableio.utils import (join_integer_decimal,
join_byteblocks,
join_complementary_byteblocks)
from wearableio.field import BaseField
from wearableio.frame import BaseFrame
from wearableio.sensomics.io import (read_sens_line,
read_sens_stream,
read_sens_text,
write_json)
from ._version import get_versions
v = get_versions()
__version__ = v.get('closest-tag', v['version'])
__git_version__ = v.get('full-revisionid')
del get_versions, v
__doc__ = """
sixing liu, jianqiang gong
""" | true | true |
f7f53670bebd1a0ac4fca46a744d73173a1bd4e9 | 4,291 | py | Python | torchtext/experimental/transforms.py | guyang3532/text | e2fc987ff6a002018040cffac5e0d61c3d0b06c6 | [
"BSD-3-Clause"
] | null | null | null | torchtext/experimental/transforms.py | guyang3532/text | e2fc987ff6a002018040cffac5e0d61c3d0b06c6 | [
"BSD-3-Clause"
] | 1 | 2020-12-15T01:12:34.000Z | 2020-12-15T01:12:34.000Z | torchtext/experimental/transforms.py | guyang3532/text | e2fc987ff6a002018040cffac5e0d61c3d0b06c6 | [
"BSD-3-Clause"
] | 1 | 2020-12-14T21:46:18.000Z | 2020-12-14T21:46:18.000Z | import torch
import torch.nn as nn
from typing import List, Tuple
__all__ = [
'BasicEnglishNormalize',
'RegexTokenizer'
]
class BasicEnglishNormalize(nn.Module):
r"""Basic normalization for a string sentence.
Normalization includes
- lowercasing
- complete some basic text normalization for English words as follows:
- add spaces before and after '\''
- remove '\"',
- add spaces before and after '.'
- replace '<br \/>'with single space
- add spaces before and after ','
- add spaces before and after '('
- add spaces before and after ')'
- add spaces before and after '!'
- add spaces before and after '?'
- replace ';' with single space
- replace ':' with single space
- replace multiple spaces with single space
Examples:
>>> import torch
>>> from torchtext.experimental.transforms import BasicEnglishNormalize
>>> test_sample = 'Basic English Normalization for a Line of Text'
>>> basic_english_normalize = BasicEnglishNormalize()
>>> jit_basic_english_normalize = torch.jit.script(basic_english_normalize)
>>> tokens = jit_basic_english_normalize(test_sample)
"""
regex_and_replacement_string_pairs: List[Tuple[torch.classes.torchtext.Regex, str]]
def __init__(self):
super(BasicEnglishNormalize, self).__init__()
patterns_list = [
(r'\'', ' \' '),
(r'\"', ''),
(r'\.', ' . '),
(r'<br \/>', ' '),
(r',', ' , '),
(r'\(', ' ( '),
(r'\)', ' ) '),
(r'\!', ' ! '),
(r'\?', ' ? '),
(r'\;', ' '),
(r'\:', ' '),
(r'\s+', ' ')]
regex_objects = map(lambda pattern_tuple: torch.classes.torchtext.Regex(pattern_tuple[0]), patterns_list)
replacement_strings = map(lambda pattern_tuple: pattern_tuple[1], patterns_list)
self.regex_and_replacement_string_pairs = list(zip(regex_objects, replacement_strings))
def forward(self, line: str) -> List[str]:
r"""
Args:
line (str): a line of text to tokenize.
Returns:
List[str]: a list of tokens after normalizing and splitting on whitespace.
"""
line = line.lower()
for regex, replacement_string in self.regex_and_replacement_string_pairs:
line = regex.Sub(line, replacement_string)
return line.split()
class RegexTokenizer(nn.Module):
r"""Regex tokenizer for a string sentence that applies all regex replacements defined in patterns_list.
Args:
patterns_list (List[Tuple[str, str]]): a list of tuples (ordered pairs) which contain the regex pattern string
as the first element and the replacement string as the second element.
Examples:
>>> import torch
>>> from torchtext.experimental.transforms import RegexTokenizer
>>> test_sample = 'Basic Regex Tokenization for a Line of Text'
>>> patterns_list = [
(r'\'', ' \' '),
(r'\"', '')]
>>> regex_tokenizer = RegexTokenizer(patterns_list)
>>> jit_regex_tokenizer = torch.jit.script(regex_tokenizer)
>>> tokens = jit_regex_tokenizer(test_sample)
"""
regex_and_replacement_string_pairs: List[Tuple[torch.classes.torchtext.Regex, str]]
def __init__(self, patterns_list: List[Tuple[str, str]]):
super(RegexTokenizer, self).__init__()
regex_objects = map(lambda pattern_tuple: torch.classes.torchtext.Regex(pattern_tuple[0]), patterns_list)
replacement_strings = map(lambda pattern_tuple: pattern_tuple[1], patterns_list)
self.regex_and_replacement_string_pairs = list(zip(regex_objects, replacement_strings))
def forward(self, line: str) -> List[str]:
r"""
Args:
line (str): a line of text to tokenize.
Returns:
List[str]: a list of tokens after normalizing and splitting on whitespace.
"""
for regex, replacement_string in self.regex_and_replacement_string_pairs:
line = regex.Sub(line, replacement_string)
return line.split()
| 37.313043 | 118 | 0.60522 | import torch
import torch.nn as nn
from typing import List, Tuple
__all__ = [
'BasicEnglishNormalize',
'RegexTokenizer'
]
class BasicEnglishNormalize(nn.Module):
regex_and_replacement_string_pairs: List[Tuple[torch.classes.torchtext.Regex, str]]
def __init__(self):
super(BasicEnglishNormalize, self).__init__()
patterns_list = [
(r'\'', ' \' '),
(r'\"', ''),
(r'\.', ' . '),
(r'<br \/>', ' '),
(r',', ' , '),
(r'\(', ' ( '),
(r'\)', ' ) '),
(r'\!', ' ! '),
(r'\?', ' ? '),
(r'\;', ' '),
(r'\:', ' '),
(r'\s+', ' ')]
regex_objects = map(lambda pattern_tuple: torch.classes.torchtext.Regex(pattern_tuple[0]), patterns_list)
replacement_strings = map(lambda pattern_tuple: pattern_tuple[1], patterns_list)
self.regex_and_replacement_string_pairs = list(zip(regex_objects, replacement_strings))
def forward(self, line: str) -> List[str]:
line = line.lower()
for regex, replacement_string in self.regex_and_replacement_string_pairs:
line = regex.Sub(line, replacement_string)
return line.split()
class RegexTokenizer(nn.Module):
regex_and_replacement_string_pairs: List[Tuple[torch.classes.torchtext.Regex, str]]
def __init__(self, patterns_list: List[Tuple[str, str]]):
super(RegexTokenizer, self).__init__()
regex_objects = map(lambda pattern_tuple: torch.classes.torchtext.Regex(pattern_tuple[0]), patterns_list)
replacement_strings = map(lambda pattern_tuple: pattern_tuple[1], patterns_list)
self.regex_and_replacement_string_pairs = list(zip(regex_objects, replacement_strings))
def forward(self, line: str) -> List[str]:
for regex, replacement_string in self.regex_and_replacement_string_pairs:
line = regex.Sub(line, replacement_string)
return line.split()
| true | true |
f7f53743ad2791ee628fc064bf32971196b1acd8 | 3,276 | py | Python | transwarp/utils.py | michaelliao/transwarp | d4f308ebe9c6698b013a946cb33b9cc9a6076321 | [
"Apache-2.0"
] | 26 | 2015-01-04T02:04:07.000Z | 2021-11-03T08:54:26.000Z | transwarp/utils.py | michaelliao/transwarp | d4f308ebe9c6698b013a946cb33b9cc9a6076321 | [
"Apache-2.0"
] | 1 | 2015-01-25T04:34:04.000Z | 2015-01-30T02:23:42.000Z | transwarp/utils.py | michaelliao/transwarp | d4f308ebe9c6698b013a946cb33b9cc9a6076321 | [
"Apache-2.0"
] | 35 | 2015-01-02T07:26:34.000Z | 2021-11-11T16:04:33.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Michael Liao'
'''
Utils
'''
import re, datetime
class Dict(dict):
'''
Simple dict but support access as x.y style.
>>> d1 = Dict()
>>> d1['x'] = 100
>>> d1.x
100
>>> d1.y = 200
>>> d1['y']
200
>>> d2 = Dict(a=1, b=2, c='3')
>>> d2.c
'3'
>>> d2['empty']
Traceback (most recent call last):
...
KeyError: 'empty'
>>> d2.empty
Traceback (most recent call last):
...
AttributeError: 'Dict' object has no attribute 'empty'
>>> d3 = Dict(('a', 'b', 'c'), (1, 2, 3))
>>> d3.a
1
>>> d3.b
2
>>> d3.c
3
'''
def __init__(self, names=(), values=(), **kw):
super(Dict, self).__init__(**kw)
for k, v in zip(names, values):
self[k] = v
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
_TIMEDELTA_ZERO = datetime.timedelta(0)
# timezone as UTC+8:00, UTC-10:00
_RE_TZ = re.compile('^([\+\-])([0-9]{1,2})\:([0-9]{1,2})$')
class UTC(datetime.tzinfo):
'''
A UTC tzinfo object.
>>> tz0 = UTC('+00:00')
>>> tz0.tzname(None)
'UTC+00:00'
>>> tz8 = UTC('+8:00')
>>> tz8.tzname(None)
'UTC+8:00'
>>> tz7 = UTC('+7:30')
>>> tz7.tzname(None)
'UTC+7:30'
>>> tz5 = UTC('-05:30')
>>> tz5.tzname(None)
'UTC-05:30'
>>> from datetime import datetime
>>> u = datetime.utcnow().replace(tzinfo=tz0)
>>> l1 = u.astimezone(tz8)
>>> l2 = u.replace(tzinfo=tz8)
>>> d1 = u - l1
>>> d2 = u - l2
>>> d1.seconds
0
>>> d2.seconds
28800
'''
def __init__(self, utc):
utc = str(utc.strip().upper())
mt = _RE_TZ.match(utc)
if mt:
minus = mt.group(1)=='-'
h = int(mt.group(2))
m = int(mt.group(3))
if minus:
h, m = (-h), (-m)
self._utcoffset = datetime.timedelta(hours=h, minutes=m)
self._tzname = 'UTC%s' % utc
else:
raise ValueError('bad utc time zone')
def utcoffset(self, dt):
return self._utcoffset
def dst(self, dt):
return _TIMEDELTA_ZERO
def tzname(self, dt):
return self._tzname
def __str__(self):
return 'UTC tzinfo object (%s)' % self._tzname
__repr__ = __str__
def load_module(modname):
'''
Load module as object.
>>> m1 = load_module('time')
>>> type(m1)
<type 'module'>
>>> m1.__name__
'time'
>>> m2 = load_module('xml.dom')
>>> type(m2)
<type 'module'>
>>> m2.__name__
'xml.dom'
>>> m3 = load_module('xml.sax.handler')
>>> type(m3)
<type 'module'>
>>> m3.__name__
'xml.sax.handler'
>>> load_module('base64.b64encode')
Traceback (most recent call last):
...
ImportError: No module named b64encode
'''
last = modname.rfind('.')
name = modname if last==(-1) else modname[:last]
return __import__(modname, globals(), locals(), [name])
if __name__=='__main__':
import doctest
doctest.testmod()
| 22.135135 | 78 | 0.510073 |
__author__ = 'Michael Liao'
import re, datetime
class Dict(dict):
def __init__(self, names=(), values=(), **kw):
super(Dict, self).__init__(**kw)
for k, v in zip(names, values):
self[k] = v
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
_TIMEDELTA_ZERO = datetime.timedelta(0)
_RE_TZ = re.compile('^([\+\-])([0-9]{1,2})\:([0-9]{1,2})$')
class UTC(datetime.tzinfo):
def __init__(self, utc):
utc = str(utc.strip().upper())
mt = _RE_TZ.match(utc)
if mt:
minus = mt.group(1)=='-'
h = int(mt.group(2))
m = int(mt.group(3))
if minus:
h, m = (-h), (-m)
self._utcoffset = datetime.timedelta(hours=h, minutes=m)
self._tzname = 'UTC%s' % utc
else:
raise ValueError('bad utc time zone')
def utcoffset(self, dt):
return self._utcoffset
def dst(self, dt):
return _TIMEDELTA_ZERO
def tzname(self, dt):
return self._tzname
def __str__(self):
return 'UTC tzinfo object (%s)' % self._tzname
__repr__ = __str__
def load_module(modname):
last = modname.rfind('.')
name = modname if last==(-1) else modname[:last]
return __import__(modname, globals(), locals(), [name])
if __name__=='__main__':
import doctest
doctest.testmod()
| true | true |
f7f5390a4be37e465e2d42d1b4947568efc50c54 | 397 | py | Python | src/api_v1/viewsets/openaccess.py | iplweb/django-bpp | 85f183a99d8d5027ae4772efac1e4a9f21675849 | [
"BSD-3-Clause"
] | 1 | 2017-04-27T19:50:02.000Z | 2017-04-27T19:50:02.000Z | src/api_v1/viewsets/openaccess.py | mpasternak/django-bpp | 434338821d5ad1aaee598f6327151aba0af66f5e | [
"BSD-3-Clause"
] | 41 | 2019-11-07T00:07:02.000Z | 2022-02-27T22:09:39.000Z | src/api_v1/viewsets/openaccess.py | iplweb/bpp | f027415cc3faf1ca79082bf7bacd4be35b1a6fdf | [
"BSD-3-Clause"
] | null | null | null | import django_filters
from rest_framework import viewsets
from api_v1.serializers.openaccess import Czas_Udostepnienia_OpenAccess_Serializer
from bpp.models import Czas_Udostepnienia_OpenAccess
class Czas_Udostepnienia_OpenAccess_ViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Czas_Udostepnienia_OpenAccess.objects.all()
serializer_class = Czas_Udostepnienia_OpenAccess_Serializer
| 33.083333 | 82 | 0.881612 | import django_filters
from rest_framework import viewsets
from api_v1.serializers.openaccess import Czas_Udostepnienia_OpenAccess_Serializer
from bpp.models import Czas_Udostepnienia_OpenAccess
class Czas_Udostepnienia_OpenAccess_ViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Czas_Udostepnienia_OpenAccess.objects.all()
serializer_class = Czas_Udostepnienia_OpenAccess_Serializer
| true | true |
f7f539a7e7d7c3e8bcaa3cdde2e7e3363c5d7a65 | 10,553 | py | Python | q_learning/train_arg.py | jakejhansen/minesweeper_solver | 5eaba3f3242f08cbbb69db56e7fde4d2cd104aec | [
"MIT"
] | 32 | 2018-01-09T16:47:57.000Z | 2022-03-13T13:07:15.000Z | q_learning/train_arg.py | jakejhansen/minesweeper_solver | 5eaba3f3242f08cbbb69db56e7fde4d2cd104aec | [
"MIT"
] | null | null | null | q_learning/train_arg.py | jakejhansen/minesweeper_solver | 5eaba3f3242f08cbbb69db56e7fde4d2cd104aec | [
"MIT"
] | 8 | 2018-05-12T06:39:08.000Z | 2021-06-06T17:30:12.000Z | """
This module contains class definitions for open ai gym environments.
"""
import argparse
import random
import numpy as np
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from agent import QAgent
def setup_model(mode = 0):
parser = argparse.ArgumentParser(prog="train.py", description="Train Deep Q-Network for Minesweeper game")
# Atari ROM, TensorFlow model and output directory
parser.add_argument('--model', dest='model_file', type=str, required=False, help="path to TensorFlow model file")
parser.add_argument('--out', dest='output_dir', type=str, default="./q_learning/output/", help="output path models and screen captures")
parser.add_argument('--train', dest="is_train", action="store_true", help="training or only playing")
parser.add_argument('--randomstart', dest='random_start_wait', type=int, default=30, help="random number of frames to wait at start of episode")
parser.add_argument('--game', dest='game', type=str, default="DemonAttack-v0", help="The game we play")
parser.add_argument('--env', dest='env', type=str, default="atari", help="If we want to use atari or minesweeper")
parser.add_argument('--gpumemory', dest="gpu_memory", type=float, default=0.5, help="The percentage of GPU memory allowed to be used by Tensorflow")
# Parameters network input (screens)
parser.add_argument('--inputheight', dest="input_height", type=int, default=84, help="screen input height")
parser.add_argument('--inputwidth', dest="input_width", type=int, default=84, help="screen input width")
parser.add_argument('--historylength', dest="history_length", type=int, default=4, help="Numbe of moves which are repeated in atari")
parser.add_argument('--mines-min', dest="mines_min", type=int, default=5, help="The number of mines")
parser.add_argument('--mines-max', dest="mines_max", type=int, default=7, help="The number of mines")
parser.add_argument('--nchannels', dest="nchannels", type=int, default=4, help="screen input depth")
parser.add_argument('--network-type', dest='network_type', type=int, default=1, help="Different networks")
# Parameters CNN architecture
parser.add_argument('--filtersizes', dest="filter_sizes", type=str, default="8,4,3", help="CNN filter sizes")
parser.add_argument('--filterstrides', dest="filter_strides", type=str, default="4,2,1", help="CNN filter strides")
parser.add_argument('--numfilters', dest="num_filters", type=str, default="32,64,64", help="CNN number of filters per layer")
parser.add_argument('--numhidden', dest="num_hidden", type=int, default=512, help="CNN number of neurons in FC layer")
parser.add_argument('--duelingtype', dest="dueling_type", default=None, type=str, help="Type of dueling enabled")
# See
# http://cs231n.github.io/neural-networks-2/
parser.add_argument('--bias-init', dest="bias_init", type=float, default=0.01, help="The initial value of the biases")
# Parameters for training the CNN
parser.add_argument('--num-iterations', dest="num_iterations", type=int, default=50000000, help="Number of training iterations, i.e., number of passes, each pass using [batch size] number of examples")
parser.add_argument('--batchsize', dest="batch_size", type=int, default=32, help="training batch size")
parser.add_argument('--trainfreq', dest="train_freq", type=int, default=4, help="training frequency, default every frame")
parser.add_argument('--epsilonstep', dest="epsilon_step", type=float, default=1e6, help="epsilon decrease step, linear annealing over iterations")
parser.add_argument('--learnrate', dest="learning_rate", type=float, default=0.00025, help="optimization learning rate")
parser.add_argument('--learnratedecay', dest="learning_rate_decay", type=float, default=0.98, help="learning rate decay")
parser.add_argument('--learnratestep', dest="learning_rate_step", type=float, default=100000, help="learning rate decay step over iterations")
parser.add_argument('--learnratemin', dest="learning_rate_minimum", type=float, default=0.0001, help="minimum learning rate")
parser.add_argument('--discount', dest="discount", type=float, default=0.99, help="gamma for future discounted rewards")
parser.add_argument('--clipdelta', dest="clip_delta", type=bool, default=True, help="clipping of error term in loss function")
parser.add_argument('--networkupdate', dest="network_update_rate", type=float, default=10000, help="number of steps after which the Q-network is copied for predicting targets")
parser.add_argument('--batchaccumulator', dest="batch_accumulator", type=str, default="mean", help="batch accumulator in loss function (mean or sum)")
parser.add_argument('--replaycap', dest="replay_capacity", type=int, default=int(1e6), help="maximum number of samples in replay memory")
parser.add_argument('--trainstart', dest="train_start", type=int, default=50000, help="start training when replay memory is of this size")
# Parameters for evaluation of the model
parser.add_argument('--evalfreq', dest="eval_frequency", type=int, default=250000, help="frequency of model evaluation")
parser.add_argument('--evaliterations', dest="eval_iterations", type=int, default=125000, help="number of games played in each evaluation")
parser.add_argument('--evalepsilon', dest="eval_epsilon", type=float, default=0.05, help="epsilon random move when evaluating")
parser.add_argument('--minepsilon', dest="min_epsilon", type=float, default=0.1, help="Lowest epsilon when exploring")
parser.add_argument('--num-steps', dest="num_steps", type=int, default=5000, help="Number of test steps when playing, each step is an action")
parser.add_argument('--reward-recent-update', dest="reward_recent_update", type=int, default=10000, help="The number of episodes before resetting recent reward")
parser.add_argument('--num-games', dest="num_games", type=int, default=5000, help="Number of test games to play minesweeper")
# Parameters for outputting/debugging
parser.add_argument('--intsummary', dest="interval_summary", type=int, default=200, help="frequency of adding training summaries, currently depending on train_iteration")
parser.add_argument('--intcheckpoint', dest="interval_checkpoint", type=int, default=10000, help="frequency of saving model checkpoints")
parser.add_argument('--memorycheckpoint', dest="memory_checkpoint", type=int, default=int(1e5), help="Frequency of saving memory based on addition counter.")
parser.add_argument('--restore-memory', dest="restore_memory", type=bool, default=False, help="If True, restore replay memory.")
parser.add_argument('--show', dest="show_game", action="store_true", help="show the Minesweeper game output")
parser.add_argument('--eval-mode', dest="eval_mode",
help="0 = evaluate models in range (only used for selecting the best model), 1 = test model win-rate by playing the game, 2 = win-rate for random mines")
parser.add_argument('--seed', dest="seed", type=int, default=0, help="The random seed value. Default at 0 means deterministic for all ops in Tensorflow 1.4")
# Parse command line arguments and run the training process
parser.set_defaults(game="minesweeper")
parser.set_defaults(env='minesweeper')
parser.set_defaults(mines_min=6)
parser.set_defaults(mines_max=6)
parser.set_defaults(input_width=6)
parser.set_defaults(input_height=6)
parser.set_defaults(history_length=1)
parser.set_defaults(train_freq=1)
parser.set_defaults(nchannels=2)
parser.set_defaults(discount=0.0)
parser.set_defaults(network_type=1) # 2 is the one for the report graph results
#parser.set_defaults(clip_delta=True) # This does not really seem to do much since the rewards are so small
#parser.set_defaults(dueling_type="mean") # Without this and with fc, the same network as Jacob
parser.set_defaults(seed=0) # 9, 11
if mode == 0: # Train
print("Training the network")
parser.set_defaults(is_train=True)
params = parser.parse_args()
run_model(params)
elif mode == 1: # Test minesweeper
print("Test minesweeper model on 6x6 board with 6 mines")
parser.set_defaults(output_dir='./q_learning/output_best/')
parser.set_defaults(eval_iterations=10000)
parser.set_defaults(model_file='model-best')
parser.set_defaults(eval_mode=1)
params = parser.parse_args(args=[])
run_model(params)
elif mode == 2: # Evaluate for a different number of mines
print("Test minesweeper model on 6x6 board with a random number of mines")
parser.set_defaults(output_dir='./output_best/')
parser.set_defaults(eval_iterations=10000)
parser.set_defaults(model_file='model-best-random')
parser.set_defaults(eval_mode=2)
params = parser.parse_args()
print("\nTesting with best model on random number of mines")
run_model(params)
print("\nTesting with best model on board with 6 mines")
params.model_file = "model-best"
tf.reset_default_graph()
run_model(params)
elif mode == 3: # Play 10 games
parser.set_defaults(show_game=True)
parser.set_defaults(output_dir='./output_best/')
parser.set_defaults(eval_iterations=10)
parser.set_defaults(model_file='model-best')
parser.set_defaults(eval_mode=1)
params = parser.parse_args()
run_model(params)
# View tensorboard with
# tensorboard --logdir output
def run_model(params):
# https://stackoverflow.com/questions/11526975/set-random-seed-programwide-in-python
# https://stackoverflow.com/questions/30517513/global-seed-for-multiple-numpy-imports
random.seed(params.seed)
np.random.seed(params.seed)
# Must be called before Session
# https://stackoverflow.com/questions/38469632/tensorflow-non-repeatable-results/40247201#40247201
tf.set_random_seed(params.seed)
qagent = QAgent(params)
if params.is_train:
qagent.fit()
elif params.eval_mode == 0:
qagent.evaluate_mine()
elif params.eval_mode == 1:
qagent.test_mine()
elif params.eval_mode == 2:
for mines in range(1, 13):
params.mines_min=mines
params.mines_max=mines
print("Mines =", mines)
qagent.test_mine()
tf.reset_default_graph()
qagent = QAgent(params)
if __name__ == "__main__":
setup_model(1) | 60.302857 | 205 | 0.719795 |
import argparse
import random
import numpy as np
import tensorflow as tf
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from agent import QAgent
def setup_model(mode = 0):
parser = argparse.ArgumentParser(prog="train.py", description="Train Deep Q-Network for Minesweeper game")
parser.add_argument('--model', dest='model_file', type=str, required=False, help="path to TensorFlow model file")
parser.add_argument('--out', dest='output_dir', type=str, default="./q_learning/output/", help="output path models and screen captures")
parser.add_argument('--train', dest="is_train", action="store_true", help="training or only playing")
parser.add_argument('--randomstart', dest='random_start_wait', type=int, default=30, help="random number of frames to wait at start of episode")
parser.add_argument('--game', dest='game', type=str, default="DemonAttack-v0", help="The game we play")
parser.add_argument('--env', dest='env', type=str, default="atari", help="If we want to use atari or minesweeper")
parser.add_argument('--gpumemory', dest="gpu_memory", type=float, default=0.5, help="The percentage of GPU memory allowed to be used by Tensorflow")
parser.add_argument('--inputheight', dest="input_height", type=int, default=84, help="screen input height")
parser.add_argument('--inputwidth', dest="input_width", type=int, default=84, help="screen input width")
parser.add_argument('--historylength', dest="history_length", type=int, default=4, help="Numbe of moves which are repeated in atari")
parser.add_argument('--mines-min', dest="mines_min", type=int, default=5, help="The number of mines")
parser.add_argument('--mines-max', dest="mines_max", type=int, default=7, help="The number of mines")
parser.add_argument('--nchannels', dest="nchannels", type=int, default=4, help="screen input depth")
parser.add_argument('--network-type', dest='network_type', type=int, default=1, help="Different networks")
parser.add_argument('--filtersizes', dest="filter_sizes", type=str, default="8,4,3", help="CNN filter sizes")
parser.add_argument('--filterstrides', dest="filter_strides", type=str, default="4,2,1", help="CNN filter strides")
parser.add_argument('--numfilters', dest="num_filters", type=str, default="32,64,64", help="CNN number of filters per layer")
parser.add_argument('--numhidden', dest="num_hidden", type=int, default=512, help="CNN number of neurons in FC layer")
parser.add_argument('--duelingtype', dest="dueling_type", default=None, type=str, help="Type of dueling enabled")
parser.add_argument('--bias-init', dest="bias_init", type=float, default=0.01, help="The initial value of the biases")
parser.add_argument('--num-iterations', dest="num_iterations", type=int, default=50000000, help="Number of training iterations, i.e., number of passes, each pass using [batch size] number of examples")
parser.add_argument('--batchsize', dest="batch_size", type=int, default=32, help="training batch size")
parser.add_argument('--trainfreq', dest="train_freq", type=int, default=4, help="training frequency, default every frame")
parser.add_argument('--epsilonstep', dest="epsilon_step", type=float, default=1e6, help="epsilon decrease step, linear annealing over iterations")
parser.add_argument('--learnrate', dest="learning_rate", type=float, default=0.00025, help="optimization learning rate")
parser.add_argument('--learnratedecay', dest="learning_rate_decay", type=float, default=0.98, help="learning rate decay")
parser.add_argument('--learnratestep', dest="learning_rate_step", type=float, default=100000, help="learning rate decay step over iterations")
parser.add_argument('--learnratemin', dest="learning_rate_minimum", type=float, default=0.0001, help="minimum learning rate")
parser.add_argument('--discount', dest="discount", type=float, default=0.99, help="gamma for future discounted rewards")
parser.add_argument('--clipdelta', dest="clip_delta", type=bool, default=True, help="clipping of error term in loss function")
parser.add_argument('--networkupdate', dest="network_update_rate", type=float, default=10000, help="number of steps after which the Q-network is copied for predicting targets")
parser.add_argument('--batchaccumulator', dest="batch_accumulator", type=str, default="mean", help="batch accumulator in loss function (mean or sum)")
parser.add_argument('--replaycap', dest="replay_capacity", type=int, default=int(1e6), help="maximum number of samples in replay memory")
parser.add_argument('--trainstart', dest="train_start", type=int, default=50000, help="start training when replay memory is of this size")
parser.add_argument('--evalfreq', dest="eval_frequency", type=int, default=250000, help="frequency of model evaluation")
parser.add_argument('--evaliterations', dest="eval_iterations", type=int, default=125000, help="number of games played in each evaluation")
parser.add_argument('--evalepsilon', dest="eval_epsilon", type=float, default=0.05, help="epsilon random move when evaluating")
parser.add_argument('--minepsilon', dest="min_epsilon", type=float, default=0.1, help="Lowest epsilon when exploring")
parser.add_argument('--num-steps', dest="num_steps", type=int, default=5000, help="Number of test steps when playing, each step is an action")
parser.add_argument('--reward-recent-update', dest="reward_recent_update", type=int, default=10000, help="The number of episodes before resetting recent reward")
parser.add_argument('--num-games', dest="num_games", type=int, default=5000, help="Number of test games to play minesweeper")
parser.add_argument('--intsummary', dest="interval_summary", type=int, default=200, help="frequency of adding training summaries, currently depending on train_iteration")
parser.add_argument('--intcheckpoint', dest="interval_checkpoint", type=int, default=10000, help="frequency of saving model checkpoints")
parser.add_argument('--memorycheckpoint', dest="memory_checkpoint", type=int, default=int(1e5), help="Frequency of saving memory based on addition counter.")
parser.add_argument('--restore-memory', dest="restore_memory", type=bool, default=False, help="If True, restore replay memory.")
parser.add_argument('--show', dest="show_game", action="store_true", help="show the Minesweeper game output")
parser.add_argument('--eval-mode', dest="eval_mode",
help="0 = evaluate models in range (only used for selecting the best model), 1 = test model win-rate by playing the game, 2 = win-rate for random mines")
parser.add_argument('--seed', dest="seed", type=int, default=0, help="The random seed value. Default at 0 means deterministic for all ops in Tensorflow 1.4")
parser.set_defaults(game="minesweeper")
parser.set_defaults(env='minesweeper')
parser.set_defaults(mines_min=6)
parser.set_defaults(mines_max=6)
parser.set_defaults(input_width=6)
parser.set_defaults(input_height=6)
parser.set_defaults(history_length=1)
parser.set_defaults(train_freq=1)
parser.set_defaults(nchannels=2)
parser.set_defaults(discount=0.0)
parser.set_defaults(network_type=1)
lts(is_train=True)
params = parser.parse_args()
run_model(params)
elif mode == 1:
print("Test minesweeper model on 6x6 board with 6 mines")
parser.set_defaults(output_dir='./q_learning/output_best/')
parser.set_defaults(eval_iterations=10000)
parser.set_defaults(model_file='model-best')
parser.set_defaults(eval_mode=1)
params = parser.parse_args(args=[])
run_model(params)
elif mode == 2:
print("Test minesweeper model on 6x6 board with a random number of mines")
parser.set_defaults(output_dir='./output_best/')
parser.set_defaults(eval_iterations=10000)
parser.set_defaults(model_file='model-best-random')
parser.set_defaults(eval_mode=2)
params = parser.parse_args()
print("\nTesting with best model on random number of mines")
run_model(params)
print("\nTesting with best model on board with 6 mines")
params.model_file = "model-best"
tf.reset_default_graph()
run_model(params)
elif mode == 3:
parser.set_defaults(show_game=True)
parser.set_defaults(output_dir='./output_best/')
parser.set_defaults(eval_iterations=10)
parser.set_defaults(model_file='model-best')
parser.set_defaults(eval_mode=1)
params = parser.parse_args()
run_model(params)
def run_model(params):
random.seed(params.seed)
np.random.seed(params.seed)
et_random_seed(params.seed)
qagent = QAgent(params)
if params.is_train:
qagent.fit()
elif params.eval_mode == 0:
qagent.evaluate_mine()
elif params.eval_mode == 1:
qagent.test_mine()
elif params.eval_mode == 2:
for mines in range(1, 13):
params.mines_min=mines
params.mines_max=mines
print("Mines =", mines)
qagent.test_mine()
tf.reset_default_graph()
qagent = QAgent(params)
if __name__ == "__main__":
setup_model(1) | true | true |
f7f539e59dab61b4502c869063b1a80807789293 | 845 | py | Python | danish.py | danish123gupta/assignment10.1 | ba87192ee7c9ff7cdf70f2eed192157a81deee70 | [
"MIT"
] | null | null | null | danish.py | danish123gupta/assignment10.1 | ba87192ee7c9ff7cdf70f2eed192157a81deee70 | [
"MIT"
] | null | null | null | danish.py | danish123gupta/assignment10.1 | ba87192ee7c9ff7cdf70f2eed192157a81deee70 | [
"MIT"
] | null | null | null | #q.no.1
f=open(r"danish.txt","r")
f1=f.readlines()
for i in f1:
print(i)
f.close()
#q.no.2
f=open(r"danish.txt","r")
f1=f.read()
s=input("enter a word to count its occurence: ")
c=0
for i in f1:
if i==s:
c+=1
f.close()
print(s,"occurs",c,"times.")
#q.no.3
f=open(r"danish.txt","r")
f1=f.read()
f.close()
f2=open(r"abc.txt","w")
f2.write(f1)
f2.close()
#q.no.4
f=open(r"danish.txt","r")
f1=open(r"abc.txt","r+")
for i,j in zip(f,f1):
f1.write(i+j)
f.close()
f1.close()
#q.no.5
import random
f=open(r"abc.txt","w+")
for i in range(10):
num=random.randint(1,10)
f.write(str(num))
f.close()
f=open("abc.txt","r")
f1=f.read()
l=[]
for i in f1:
i=int(i)
l.append(i)
l.sort()
f2=open(r"danish.txt","w")
for j in l:
f2.write(str(j))
f2.close()
f.close()
| 15.363636 | 49 | 0.543195 |
f=open(r"danish.txt","r")
f1=f.readlines()
for i in f1:
print(i)
f.close()
f=open(r"danish.txt","r")
f1=f.read()
s=input("enter a word to count its occurence: ")
c=0
for i in f1:
if i==s:
c+=1
f.close()
print(s,"occurs",c,"times.")
f=open(r"danish.txt","r")
f1=f.read()
f.close()
f2=open(r"abc.txt","w")
f2.write(f1)
f2.close()
f=open(r"danish.txt","r")
f1=open(r"abc.txt","r+")
for i,j in zip(f,f1):
f1.write(i+j)
f.close()
f1.close()
import random
f=open(r"abc.txt","w+")
for i in range(10):
num=random.randint(1,10)
f.write(str(num))
f.close()
f=open("abc.txt","r")
f1=f.read()
l=[]
for i in f1:
i=int(i)
l.append(i)
l.sort()
f2=open(r"danish.txt","w")
for j in l:
f2.write(str(j))
f2.close()
f.close()
| true | true |
f7f539efef8cdfab11f19a9f5c164febfc944e77 | 11,553 | py | Python | tests/loops/test_training_loop_flow_scalar.py | rhjohnstone/pytorch-lightning | 4cd7e77ad2471379eaf768c20d8a3284aeb8b0b5 | [
"Apache-2.0"
] | 2 | 2021-11-11T12:34:18.000Z | 2021-11-17T08:34:14.000Z | tests/loops/test_training_loop_flow_scalar.py | rhjohnstone/pytorch-lightning | 4cd7e77ad2471379eaf768c20d8a3284aeb8b0b5 | [
"Apache-2.0"
] | null | null | null | tests/loops/test_training_loop_flow_scalar.py | rhjohnstone/pytorch-lightning | 4cd7e77ad2471379eaf768c20d8a3284aeb8b0b5 | [
"Apache-2.0"
] | 1 | 2021-11-07T12:34:34.000Z | 2021-11-07T12:34:34.000Z | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
from torch.utils.data import DataLoader
from torch.utils.data._utils.collate import default_collate
from pytorch_lightning import Trainer
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.loops.optimization.optimizer_loop import Closure
from pytorch_lightning.trainer.states import RunningStage
from tests.helpers.boring_model import BoringModel, RandomDataset
from tests.helpers.deterministic_model import DeterministicModel
from tests.helpers.utils import no_warning_call
def test__training_step__flow_scalar(tmpdir):
"""Tests that only training_step can be used."""
class TestModel(DeterministicModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch, batch_idx)
acc = acc + batch_idx
self.training_step_called = True
return acc
def backward(self, loss, optimizer, optimizer_idx):
return LightningModule.backward(self, loss, optimizer, optimizer_idx)
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
enable_model_summary=False,
)
trainer.fit(model)
# make sure correct steps were called
assert model.training_step_called
assert not model.training_step_end_called
assert not model.training_epoch_end_called
def test__training_step__tr_step_end__flow_scalar(tmpdir):
"""Tests that only training_step can be used."""
class TestModel(DeterministicModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch, batch_idx)
acc = acc + batch_idx
self.training_step_called = True
self.out = acc
return acc
def training_step_end(self, tr_step_output):
assert self.out == tr_step_output
assert self.count_num_graphs({"loss": tr_step_output}) == 1
self.training_step_end_called = True
return tr_step_output
def backward(self, loss, optimizer, optimizer_idx):
return LightningModule.backward(self, loss, optimizer, optimizer_idx)
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
enable_model_summary=False,
)
trainer.fit(model)
# make sure correct steps were called
assert model.training_step_called
assert model.training_step_end_called
assert not model.training_epoch_end_called
def test__training_step__epoch_end__flow_scalar(tmpdir):
"""Tests that only training_step can be used."""
class TestModel(DeterministicModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch, batch_idx)
acc = acc + batch_idx
self.training_step_called = True
return acc
def training_epoch_end(self, outputs):
self.training_epoch_end_called = True
# verify we saw the current num of batches
assert len(outputs) == 2
for b in outputs:
# time = 1
assert len(b) == 1
assert "loss" in b
assert isinstance(b, dict)
def backward(self, loss, optimizer, optimizer_idx):
return LightningModule.backward(self, loss, optimizer, optimizer_idx)
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
enable_model_summary=False,
)
trainer.fit(model)
# make sure correct steps were called
assert model.training_step_called
assert not model.training_step_end_called
assert model.training_epoch_end_called
# assert epoch end metrics were added
assert len(trainer.callback_metrics) == 0
assert len(trainer.progress_bar_metrics) == 0
trainer.state.stage = RunningStage.TRAINING
# make sure training outputs what is expected
batch_idx, batch = 0, next(iter(model.train_dataloader()))
train_step_out = trainer.fit_loop.epoch_loop.batch_loop.run(batch, batch_idx)
assert len(train_step_out) == 1
train_step_out = train_step_out[0][0]
assert isinstance(train_step_out["loss"], torch.Tensor)
assert train_step_out["loss"].item() == 171
# make sure the optimizer closure returns the correct things
opt_closure = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop._make_closure(
batch, batch_idx, 0, trainer.optimizers[0]
)
opt_closure_result = opt_closure()
assert opt_closure_result.item() == 171
def test__training_step__step_end__epoch_end__flow_scalar(tmpdir):
"""Checks train_step + training_step_end + training_epoch_end (all with scalar return from train_step)."""
class TestModel(DeterministicModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch, batch_idx)
acc = acc + batch_idx
self.training_step_called = True
return acc
def training_step_end(self, tr_step_output):
assert isinstance(tr_step_output, torch.Tensor)
assert self.count_num_graphs({"loss": tr_step_output}) == 1
self.training_step_end_called = True
return tr_step_output
def training_epoch_end(self, outputs):
self.training_epoch_end_called = True
# verify we saw the current num of batches
assert len(outputs) == 2
for b in outputs:
# time = 1
assert len(b) == 1
assert "loss" in b
assert isinstance(b, dict)
def backward(self, loss, optimizer, optimizer_idx):
return LightningModule.backward(self, loss, optimizer, optimizer_idx)
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
enable_model_summary=False,
)
trainer.fit(model)
# make sure correct steps were called
assert model.training_step_called
assert model.training_step_end_called
assert model.training_epoch_end_called
# assert epoch end metrics were added
assert len(trainer.callback_metrics) == 0
assert len(trainer.progress_bar_metrics) == 0
trainer.state.stage = RunningStage.TRAINING
# make sure training outputs what is expected
batch_idx, batch = 0, next(iter(model.train_dataloader()))
train_step_out = trainer.fit_loop.epoch_loop.batch_loop.run(batch, batch_idx)
assert len(train_step_out) == 1
train_step_out = train_step_out[0][0]
assert isinstance(train_step_out["loss"], torch.Tensor)
assert train_step_out["loss"].item() == 171
# make sure the optimizer closure returns the correct things
opt_closure = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop._make_closure(
batch, batch_idx, 0, trainer.optimizers[0]
)
opt_closure_result = opt_closure()
assert opt_closure_result.item() == 171
def test_train_step_no_return(tmpdir):
"""Tests that only training_step raises a warning when nothing is returned in case of
automatic_optimization."""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
self.training_step_called = True
loss = self.step(batch[0])
self.log("a", loss, on_step=True, on_epoch=True)
def training_epoch_end(self, outputs) -> None:
assert len(outputs) == 0, outputs
def validation_step(self, batch, batch_idx):
self.validation_step_called = True
def validation_epoch_end(self, outputs):
assert len(outputs) == 0, outputs
model = TestModel()
trainer_args = dict(default_root_dir=tmpdir, fast_dev_run=2)
trainer = Trainer(**trainer_args)
Closure.warning_cache.clear()
with pytest.warns(UserWarning, match=r"training_step` returned `None"):
trainer.fit(model)
assert model.training_step_called
assert model.validation_step_called
model = TestModel()
model.automatic_optimization = False
trainer = Trainer(**trainer_args)
Closure.warning_cache.clear()
with no_warning_call(UserWarning, match=r"training_step` returned `None"):
trainer.fit(model)
def test_training_step_no_return_when_even(tmpdir):
"""Tests correctness when some training steps have been skipped."""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
self.training_step_called = True
loss = self.step(batch[0])
self.log("a", loss, on_step=True, on_epoch=True)
return loss if batch_idx % 2 else None
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=4,
limit_val_batches=1,
max_epochs=4,
enable_model_summary=False,
logger=False,
enable_checkpointing=False,
)
Closure.warning_cache.clear()
with pytest.warns(UserWarning, match=r".*training_step` returned `None.*"):
trainer.fit(model)
trainer.state.stage = RunningStage.TRAINING
# manually check a few batches
for batch_idx, batch in enumerate(model.train_dataloader()):
out = trainer.fit_loop.epoch_loop.batch_loop.run(batch, batch_idx)
if not batch_idx % 2:
assert out == []
def test_training_step_none_batches(tmpdir):
"""Tests correctness when the train dataloader gives None for some steps."""
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.counter = 0
def collate_none_when_even(self, batch):
if self.counter % 2 == 0:
result = None
else:
result = default_collate(batch)
self.counter += 1
return result
def train_dataloader(self):
return DataLoader(RandomDataset(32, 4), collate_fn=self.collate_none_when_even)
def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
if batch_idx % 2 == 0:
assert outputs == []
else:
assert outputs
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_val_batches=1,
max_epochs=4,
enable_model_summary=False,
logger=False,
enable_checkpointing=False,
)
with pytest.warns(UserWarning, match=r".*train_dataloader yielded None.*"):
trainer.fit(model)
| 33.008571 | 110 | 0.674284 |
import pytest
import torch
from torch.utils.data import DataLoader
from torch.utils.data._utils.collate import default_collate
from pytorch_lightning import Trainer
from pytorch_lightning.core.lightning import LightningModule
from pytorch_lightning.loops.optimization.optimizer_loop import Closure
from pytorch_lightning.trainer.states import RunningStage
from tests.helpers.boring_model import BoringModel, RandomDataset
from tests.helpers.deterministic_model import DeterministicModel
from tests.helpers.utils import no_warning_call
def test__training_step__flow_scalar(tmpdir):
class TestModel(DeterministicModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch, batch_idx)
acc = acc + batch_idx
self.training_step_called = True
return acc
def backward(self, loss, optimizer, optimizer_idx):
return LightningModule.backward(self, loss, optimizer, optimizer_idx)
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
enable_model_summary=False,
)
trainer.fit(model)
assert model.training_step_called
assert not model.training_step_end_called
assert not model.training_epoch_end_called
def test__training_step__tr_step_end__flow_scalar(tmpdir):
class TestModel(DeterministicModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch, batch_idx)
acc = acc + batch_idx
self.training_step_called = True
self.out = acc
return acc
def training_step_end(self, tr_step_output):
assert self.out == tr_step_output
assert self.count_num_graphs({"loss": tr_step_output}) == 1
self.training_step_end_called = True
return tr_step_output
def backward(self, loss, optimizer, optimizer_idx):
return LightningModule.backward(self, loss, optimizer, optimizer_idx)
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
enable_model_summary=False,
)
trainer.fit(model)
assert model.training_step_called
assert model.training_step_end_called
assert not model.training_epoch_end_called
def test__training_step__epoch_end__flow_scalar(tmpdir):
class TestModel(DeterministicModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch, batch_idx)
acc = acc + batch_idx
self.training_step_called = True
return acc
def training_epoch_end(self, outputs):
self.training_epoch_end_called = True
assert len(outputs) == 2
for b in outputs:
assert len(b) == 1
assert "loss" in b
assert isinstance(b, dict)
def backward(self, loss, optimizer, optimizer_idx):
return LightningModule.backward(self, loss, optimizer, optimizer_idx)
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
enable_model_summary=False,
)
trainer.fit(model)
assert model.training_step_called
assert not model.training_step_end_called
assert model.training_epoch_end_called
assert len(trainer.callback_metrics) == 0
assert len(trainer.progress_bar_metrics) == 0
trainer.state.stage = RunningStage.TRAINING
batch_idx, batch = 0, next(iter(model.train_dataloader()))
train_step_out = trainer.fit_loop.epoch_loop.batch_loop.run(batch, batch_idx)
assert len(train_step_out) == 1
train_step_out = train_step_out[0][0]
assert isinstance(train_step_out["loss"], torch.Tensor)
assert train_step_out["loss"].item() == 171
opt_closure = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop._make_closure(
batch, batch_idx, 0, trainer.optimizers[0]
)
opt_closure_result = opt_closure()
assert opt_closure_result.item() == 171
def test__training_step__step_end__epoch_end__flow_scalar(tmpdir):
class TestModel(DeterministicModel):
def training_step(self, batch, batch_idx):
acc = self.step(batch, batch_idx)
acc = acc + batch_idx
self.training_step_called = True
return acc
def training_step_end(self, tr_step_output):
assert isinstance(tr_step_output, torch.Tensor)
assert self.count_num_graphs({"loss": tr_step_output}) == 1
self.training_step_end_called = True
return tr_step_output
def training_epoch_end(self, outputs):
self.training_epoch_end_called = True
assert len(outputs) == 2
for b in outputs:
assert len(b) == 1
assert "loss" in b
assert isinstance(b, dict)
def backward(self, loss, optimizer, optimizer_idx):
return LightningModule.backward(self, loss, optimizer, optimizer_idx)
model = TestModel()
model.val_dataloader = None
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=2,
limit_val_batches=2,
max_epochs=2,
log_every_n_steps=1,
enable_model_summary=False,
)
trainer.fit(model)
assert model.training_step_called
assert model.training_step_end_called
assert model.training_epoch_end_called
assert len(trainer.callback_metrics) == 0
assert len(trainer.progress_bar_metrics) == 0
trainer.state.stage = RunningStage.TRAINING
batch_idx, batch = 0, next(iter(model.train_dataloader()))
train_step_out = trainer.fit_loop.epoch_loop.batch_loop.run(batch, batch_idx)
assert len(train_step_out) == 1
train_step_out = train_step_out[0][0]
assert isinstance(train_step_out["loss"], torch.Tensor)
assert train_step_out["loss"].item() == 171
opt_closure = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop._make_closure(
batch, batch_idx, 0, trainer.optimizers[0]
)
opt_closure_result = opt_closure()
assert opt_closure_result.item() == 171
def test_train_step_no_return(tmpdir):
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
self.training_step_called = True
loss = self.step(batch[0])
self.log("a", loss, on_step=True, on_epoch=True)
def training_epoch_end(self, outputs) -> None:
assert len(outputs) == 0, outputs
def validation_step(self, batch, batch_idx):
self.validation_step_called = True
def validation_epoch_end(self, outputs):
assert len(outputs) == 0, outputs
model = TestModel()
trainer_args = dict(default_root_dir=tmpdir, fast_dev_run=2)
trainer = Trainer(**trainer_args)
Closure.warning_cache.clear()
with pytest.warns(UserWarning, match=r"training_step` returned `None"):
trainer.fit(model)
assert model.training_step_called
assert model.validation_step_called
model = TestModel()
model.automatic_optimization = False
trainer = Trainer(**trainer_args)
Closure.warning_cache.clear()
with no_warning_call(UserWarning, match=r"training_step` returned `None"):
trainer.fit(model)
def test_training_step_no_return_when_even(tmpdir):
class TestModel(BoringModel):
def training_step(self, batch, batch_idx):
self.training_step_called = True
loss = self.step(batch[0])
self.log("a", loss, on_step=True, on_epoch=True)
return loss if batch_idx % 2 else None
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_train_batches=4,
limit_val_batches=1,
max_epochs=4,
enable_model_summary=False,
logger=False,
enable_checkpointing=False,
)
Closure.warning_cache.clear()
with pytest.warns(UserWarning, match=r".*training_step` returned `None.*"):
trainer.fit(model)
trainer.state.stage = RunningStage.TRAINING
for batch_idx, batch in enumerate(model.train_dataloader()):
out = trainer.fit_loop.epoch_loop.batch_loop.run(batch, batch_idx)
if not batch_idx % 2:
assert out == []
def test_training_step_none_batches(tmpdir):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.counter = 0
def collate_none_when_even(self, batch):
if self.counter % 2 == 0:
result = None
else:
result = default_collate(batch)
self.counter += 1
return result
def train_dataloader(self):
return DataLoader(RandomDataset(32, 4), collate_fn=self.collate_none_when_even)
def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx):
if batch_idx % 2 == 0:
assert outputs == []
else:
assert outputs
model = TestModel()
trainer = Trainer(
default_root_dir=tmpdir,
limit_val_batches=1,
max_epochs=4,
enable_model_summary=False,
logger=False,
enable_checkpointing=False,
)
with pytest.warns(UserWarning, match=r".*train_dataloader yielded None.*"):
trainer.fit(model)
| true | true |
f7f53c9b431c38939880386dfe84de00452510eb | 17,710 | py | Python | oauth_provider/tests/protocol.py | ELSUru/ADL_LRS | aabeb9cf3e56795b148f37d07e1bb2b41e61e470 | [
"Apache-2.0"
] | null | null | null | oauth_provider/tests/protocol.py | ELSUru/ADL_LRS | aabeb9cf3e56795b148f37d07e1bb2b41e61e470 | [
"Apache-2.0"
] | null | null | null | oauth_provider/tests/protocol.py | ELSUru/ADL_LRS | aabeb9cf3e56795b148f37d07e1bb2b41e61e470 | [
"Apache-2.0"
] | 3 | 2021-01-14T12:51:24.000Z | 2022-03-15T17:11:11.000Z | import time
import cgi
import oauth2 as oauth
from django.test import Client
from oauth_provider.tests.auth import BaseOAuthTestCase
from oauth_provider.models import Token, Consumer, Scope
from oauth_provider.compat import get_user_model
User = get_user_model()
class ProtocolExample(BaseOAuthTestCase):
"""Set of tests, based on ProtocolExample document
"""
def _last_created_request_token(self):
return list(Token.objects.filter(token_type=Token.REQUEST))[-1]
def _last_created_access_token(self):
return list(Token.objects.filter(token_type=Token.ACCESS))[-1]
def _update_token_from_db(self, request_token):
"""Get fresh copy of the token from the DB"""
return Token.objects.get(key=request_token.key)
def _make_request_token_parameters(self):
return {
'oauth_consumer_key': self.CONSUMER_KEY,
'oauth_signature_method': 'PLAINTEXT',
'oauth_signature': '%s&' % self.CONSUMER_SECRET,
'oauth_timestamp': str(int(time.time())),
'oauth_nonce': 'requestnonce',
'oauth_version': '1.0',
'oauth_callback': 'http://printer.example.com/request_token_ready',
'scope': 'photos', # custom argument to specify Protected Resource
}
def _make_access_token_parameters(self, token):
return {
'oauth_consumer_key': self.CONSUMER_KEY,
'oauth_token': token.key,
'oauth_signature_method': 'PLAINTEXT',
'oauth_signature': '%s&%s' % (self.CONSUMER_SECRET, token.secret),
'oauth_timestamp': str(int(time.time())),
'oauth_nonce': 'accessnonce',
'oauth_version': '1.0',
'oauth_verifier': token.verifier,
'scope': 'photos',
}
def _make_protected_access_parameters(self, access_token):
return {
'oauth_consumer_key': self.CONSUMER_KEY,
'oauth_token': access_token.key,
'oauth_signature_method': 'HMAC-SHA1',
'oauth_timestamp': str(int(time.time())),
'oauth_nonce': 'accessresourcenonce',
'oauth_version': '1.0',
}
def test_returns_invalid_params_empty_request(self):
"""Printer website tries to access the photo and receives
HTTP 401 Unauthorized indicating it is private.
The Service Provider includes the following header with the response:
"""
response = self.c.get("/oauth/request_token/")
self.assertEqual(response.status_code, 401)
self.assertEqual(response._headers['www-authenticate'], ('WWW-Authenticate', 'OAuth realm=""'))
self.assertEqual(response.content, 'Invalid request parameters.')
def test_returns_401_wrong_callback(self):
#If you try to put a wrong callback, it will return an error
parameters = self._make_request_token_parameters()
parameters['oauth_callback'] = 'wrongcallback'
parameters['oauth_nonce'] = 'requestnoncewrongcallback'
response = self.c.get("/oauth/request_token/", parameters)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.content, 'Invalid callback URL.')
def test_401_for_wrong_scope(self):
# If you try to access a resource with a wrong scope, it will return an error
parameters = self._make_request_token_parameters()
parameters['scope'] = 'videos'
parameters['oauth_nonce'] = 'requestnoncevideos'
response = self.c.get("/oauth/request_token/", parameters)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.content, 'Scope does not exist.')
def test_oob_callback(self):
# If you do not provide any callback (i.e. oob), the Service Provider SHOULD display the value of the verification code
parameters = self._make_request_token_parameters()
parameters['oauth_callback'] = 'oob'
parameters['oauth_nonce'] = 'requestnonceoob'
response = self.c.get("/oauth/request_token/", parameters)
self.assertEqual(response.status_code, 200)
response_params = cgi.parse_qs(response.content)
oob_token = self._last_created_request_token()
self.assertTrue(oob_token.key in response_params['oauth_token'])
self.assertTrue(oob_token.secret in response_params['oauth_token_secret'])
self.assertFalse(oob_token.callback_confirmed)
self.assertIsNone(oob_token.callback)
def _validate_request_token_response(self, response):
self.assertEqual(response.status_code, 200)
response_params = cgi.parse_qs(response.content)
last_token = self._last_created_request_token()
self.assertTrue(last_token.key in response_params['oauth_token'])
self.assertTrue(last_token.secret in response_params['oauth_token_secret'])
self.assertTrue(response_params['oauth_callback_confirmed'])
def _obtain_request_token(self):
parameters = self._make_request_token_parameters()
response = self.c.get("/oauth/request_token/", parameters)
# The Service Provider checks the signature and replies with an unauthorized Request Token in the body of the HTTP response
self._validate_request_token_response(response)
return self._last_created_request_token()
def test_obtain_request_token(self):
self._obtain_request_token()
def test_provider_redirects_to_login_page(self):
"""The Service Provider asks Jane to sign-in using her username and password
"""
token = self._obtain_request_token()
parameters = {
'oauth_token': token.key,
}
response = self.c.get("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 302)
self.assertTrue(token.key in response['Location'])
self.c.login(username='jane', password='toto')
response = self.c.get("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 200)
def test_authorize_without_session_parameter(self):
# Then consumer obtains a Request Token
token = self._obtain_request_token()
parameters = {'oauth_token': token.key}
self.c.login(username='jane', password='toto')
parameters['authorize_access'] = True
response = self.c.post("/oauth/authorize/", parameters)
# without session parameter (previous POST removed it)
response = self.c.post("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.content, 'Action not allowed.')
def test_access_not_granted_by_the_user(self):
token = self._obtain_request_token()
parameters = {'oauth_token': token.key}
self.c.login(username='jane', password='toto')
self.c.get("/oauth/authorize/", parameters) # set session id
parameters['authorize_access'] = False
response = self.c.post("/oauth/authorize/", parameters)
self.assertTrue('error=Access+not+granted+by+user' in response['Location'])
def _request_authorization(self, request_token):
"""Request authorization for the request token.
"""
self.assertFalse(request_token.is_approved)
parameters = {'oauth_token': request_token.key}
self.c.login(username='jane', password='toto')
self.c.get("/oauth/authorize/", parameters)
parameters['authorize_access'] = 1
self.c.post("/oauth/authorize/", parameters)
request_token = self._update_token_from_db(request_token)
self.assertTrue(request_token.is_approved)
def test_request_authorization(self):
token = self._obtain_request_token()
self._request_authorization(token)
def _obtain_access_token(self, request_token):
parameters = self._make_access_token_parameters(request_token)
response = self.c.get("/oauth/access_token/", parameters)
response_params = cgi.parse_qs(response.content)
access_token = self._last_created_access_token()
self.assertEqual(response.status_code, 200)
self.assertEqual(response_params['oauth_token'][0], access_token.key)
self.assertEqual(response_params['oauth_token_secret'][0], access_token.secret)
self.assertEqual(access_token.user.username, 'jane')
return access_token
def test_request_another_access_token(self):
"""The Consumer will not be able to request another Access Token
with the same parameters because the Request Token has been deleted
once Access Token is created
"""
request_token = self._obtain_request_token()
self._request_authorization(request_token)
request_token = self._update_token_from_db(request_token)
self._obtain_access_token(request_token)
parameters = self._make_access_token_parameters(request_token)
response = self.c.get("/oauth/access_token/", parameters)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, 'Invalid request token.')
def test_request_access_token_invalid_verifier(self):
"""The Consumer will not be able to request another Access Token
with a missing or invalid verifier
"""
jane = User.objects.get(username='jane')
new_request_token = Token.objects.create_token(
token_type=Token.REQUEST,
timestamp=str(int(time.time())),
consumer=Consumer.objects.get(key=self.CONSUMER_KEY),
user=jane,
scope=Scope.objects.get(name='photos'))
new_request_token.is_approved = True
new_request_token.save()
parameters = self._make_access_token_parameters(new_request_token)
parameters['oauth_token'] = new_request_token.key
parameters['oauth_signature'] = '%s&%s' % (self.CONSUMER_SECRET, new_request_token.secret)
parameters['oauth_verifier'] = 'invalidverifier'
response = self.c.get("/oauth/access_token/", parameters)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, 'Invalid OAuth verifier.')
def test_request_access_token_not_approved_request_token(self):
"""The Consumer will not be able to request an Access Token if the token is not approved
"""
jane = User.objects.get(username='jane')
new_request_token = Token.objects.create_token(
token_type=Token.REQUEST,
timestamp=str(int(time.time())),
consumer=Consumer.objects.get(key=self.CONSUMER_KEY),
user=jane,
scope=Scope.objects.get(name='photos'))
new_request_token.is_approved = False
new_request_token.save()
parameters = self._make_access_token_parameters(new_request_token)
response = self.c.get("/oauth/access_token/", parameters)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, 'Request Token not approved by the user.')
def test_error_accessing_protected_resource(self):
request_token = self._obtain_request_token()
self._request_authorization(request_token)
request_token = self._update_token_from_db(request_token)
access_token = self._obtain_access_token(request_token)
parameters = self._make_protected_access_parameters(access_token)
parameters['oauth_signature'] = 'wrongsignature'
parameters['oauth_nonce'] = 'anotheraccessresourcenonce'
response = self.c.get("/oauth/photo/", parameters)
self.assertEqual(response.status_code, 401)
self.assertTrue(response.content.startswith('Could not verify OAuth request.'))
response = self.c.get("/oauth/photo/")
self.assertEqual(response.status_code, 401)
self.assertEqual(response.content, 'Invalid request parameters.')
def test_positive(self):
# Then consumer obtains a Request Token
parameters = self._make_request_token_parameters()
response = self.c.get("/oauth/request_token/", parameters)
# The Service Provider checks the signature and replies with an unauthorized Request Token in the body of the HTTP response
self._validate_request_token_response(response)
token = self._last_created_request_token()
parameters = {'oauth_token': token.key}
"""The Consumer redirects Jane's browser to the Service Provider User Authorization URL
to obtain Jane's approval for accessing her private photos.
"""
response = self.c.get("/oauth/authorize/", parameters)
"""The Service Provider asks Jane to sign-in using her username and password
"""
self.assertEqual(response.status_code, 302)
expected_redirect = 'http://testserver/accounts/login/?next=/oauth/authorize/%3Foauth_token%3D{0}'.format(token.key)
self.assertEqual(response['Location'], expected_redirect)
# Jane logins
self.c.login(username='jane', password='toto')
"""If successful, Service Provider asks her if she approves granting printer.example.com
access to her private photos.
"""
response = self.c.get("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.content.startswith(
'Fake authorize view for printer.example.com with params: oauth_token='))
# Jane approves the request.
self.assertEqual(token.is_approved, 0) # token is not approved yet
parameters['authorize_access'] = 1
response = self.c.post("/oauth/authorize/", parameters)
# The Service Provider redirects her back to the Consumer's callback URL
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].startswith(
'http://printer.example.com/request_token_ready?oauth_verifier='))
self.assertTrue('oauth_token=' in response['Location'])
token = self._last_created_request_token() # get from the DB updated token
self.assertTrue(token.is_approved)
"""
Obtaining an Access Token
"""
"""Now that the Consumer knows Jane approved the Request Token,
it asks the Service Provider to exchange it for an Access Token
"""
# reset Client
self.c = Client()
parameters = self._make_access_token_parameters(token)
response = self.c.get("/oauth/access_token/", parameters)
"""The Service Provider checks the signature and replies with an
Access Token in the body of the HTTP response
"""
self.assertEqual(response.status_code, 200)
response_params = cgi.parse_qs(response.content)
access_token = list(Token.objects.filter(token_type=Token.ACCESS))[-1]
self.assertEqual(response_params['oauth_token'][0], access_token.key)
self.assertEqual(response_params['oauth_token_secret'][0], access_token.secret)
self.assertEqual(access_token.user.username, 'jane')
"""
Accessing protected resources
"""
"""The Consumer is now ready to request the private photo.
Since the photo URL is not secure (HTTP), it must use HMAC-SHA1.
"""
""" Generating Signature Base String
To generate the signature, it first needs to generate the Signature Base String.
The request contains the following parameters (oauth_signature excluded)
which are ordered and concatenated into a normalized string
"""
parameters = self._make_protected_access_parameters(access_token)
""" Calculating Signature Value
HMAC-SHA1 produces the following digest value as a base64-encoded string
(using the Signature Base String as text and self.CONSUMER_SECRET as key)
"""
oauth_request = oauth.Request.from_token_and_callback(access_token,
http_url='http://testserver/oauth/photo/',
parameters=parameters)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
""" Requesting Protected Resource
All together, the Consumer request for the photo is:
"""
parameters['oauth_signature'] = signature
response = self.c.get("/oauth/photo/", parameters)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Protected Resource access!')
""" Revoking Access
If Jane deletes the Access Token of printer.example.com,
the Consumer will not be able to access the Protected Resource anymore
"""
access_token.delete()
# Note that an "Invalid signature" error will be raised here if the
# token is not revoked by Jane because we reuse a previously used one.
parameters['oauth_signature'] = signature
parameters['oauth_nonce'] = 'yetanotheraccessscopenonce'
response = self.c.get(self.scope.url, parameters)
self.assertEqual(response.status_code, 401)
self.assertTrue(response.content.startswith('Invalid access token:'))
| 43.945409 | 132 | 0.666064 | import time
import cgi
import oauth2 as oauth
from django.test import Client
from oauth_provider.tests.auth import BaseOAuthTestCase
from oauth_provider.models import Token, Consumer, Scope
from oauth_provider.compat import get_user_model
User = get_user_model()
class ProtocolExample(BaseOAuthTestCase):
def _last_created_request_token(self):
return list(Token.objects.filter(token_type=Token.REQUEST))[-1]
def _last_created_access_token(self):
return list(Token.objects.filter(token_type=Token.ACCESS))[-1]
def _update_token_from_db(self, request_token):
return Token.objects.get(key=request_token.key)
def _make_request_token_parameters(self):
return {
'oauth_consumer_key': self.CONSUMER_KEY,
'oauth_signature_method': 'PLAINTEXT',
'oauth_signature': '%s&' % self.CONSUMER_SECRET,
'oauth_timestamp': str(int(time.time())),
'oauth_nonce': 'requestnonce',
'oauth_version': '1.0',
'oauth_callback': 'http://printer.example.com/request_token_ready',
'scope': 'photos',
}
def _make_access_token_parameters(self, token):
return {
'oauth_consumer_key': self.CONSUMER_KEY,
'oauth_token': token.key,
'oauth_signature_method': 'PLAINTEXT',
'oauth_signature': '%s&%s' % (self.CONSUMER_SECRET, token.secret),
'oauth_timestamp': str(int(time.time())),
'oauth_nonce': 'accessnonce',
'oauth_version': '1.0',
'oauth_verifier': token.verifier,
'scope': 'photos',
}
def _make_protected_access_parameters(self, access_token):
return {
'oauth_consumer_key': self.CONSUMER_KEY,
'oauth_token': access_token.key,
'oauth_signature_method': 'HMAC-SHA1',
'oauth_timestamp': str(int(time.time())),
'oauth_nonce': 'accessresourcenonce',
'oauth_version': '1.0',
}
def test_returns_invalid_params_empty_request(self):
response = self.c.get("/oauth/request_token/")
self.assertEqual(response.status_code, 401)
self.assertEqual(response._headers['www-authenticate'], ('WWW-Authenticate', 'OAuth realm=""'))
self.assertEqual(response.content, 'Invalid request parameters.')
def test_returns_401_wrong_callback(self):
parameters = self._make_request_token_parameters()
parameters['oauth_callback'] = 'wrongcallback'
parameters['oauth_nonce'] = 'requestnoncewrongcallback'
response = self.c.get("/oauth/request_token/", parameters)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.content, 'Invalid callback URL.')
def test_401_for_wrong_scope(self):
parameters = self._make_request_token_parameters()
parameters['scope'] = 'videos'
parameters['oauth_nonce'] = 'requestnoncevideos'
response = self.c.get("/oauth/request_token/", parameters)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.content, 'Scope does not exist.')
def test_oob_callback(self):
parameters = self._make_request_token_parameters()
parameters['oauth_callback'] = 'oob'
parameters['oauth_nonce'] = 'requestnonceoob'
response = self.c.get("/oauth/request_token/", parameters)
self.assertEqual(response.status_code, 200)
response_params = cgi.parse_qs(response.content)
oob_token = self._last_created_request_token()
self.assertTrue(oob_token.key in response_params['oauth_token'])
self.assertTrue(oob_token.secret in response_params['oauth_token_secret'])
self.assertFalse(oob_token.callback_confirmed)
self.assertIsNone(oob_token.callback)
def _validate_request_token_response(self, response):
self.assertEqual(response.status_code, 200)
response_params = cgi.parse_qs(response.content)
last_token = self._last_created_request_token()
self.assertTrue(last_token.key in response_params['oauth_token'])
self.assertTrue(last_token.secret in response_params['oauth_token_secret'])
self.assertTrue(response_params['oauth_callback_confirmed'])
def _obtain_request_token(self):
parameters = self._make_request_token_parameters()
response = self.c.get("/oauth/request_token/", parameters)
self._validate_request_token_response(response)
return self._last_created_request_token()
def test_obtain_request_token(self):
self._obtain_request_token()
def test_provider_redirects_to_login_page(self):
token = self._obtain_request_token()
parameters = {
'oauth_token': token.key,
}
response = self.c.get("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 302)
self.assertTrue(token.key in response['Location'])
self.c.login(username='jane', password='toto')
response = self.c.get("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 200)
def test_authorize_without_session_parameter(self):
token = self._obtain_request_token()
parameters = {'oauth_token': token.key}
self.c.login(username='jane', password='toto')
parameters['authorize_access'] = True
response = self.c.post("/oauth/authorize/", parameters)
response = self.c.post("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 401)
self.assertEqual(response.content, 'Action not allowed.')
def test_access_not_granted_by_the_user(self):
token = self._obtain_request_token()
parameters = {'oauth_token': token.key}
self.c.login(username='jane', password='toto')
self.c.get("/oauth/authorize/", parameters)
parameters['authorize_access'] = False
response = self.c.post("/oauth/authorize/", parameters)
self.assertTrue('error=Access+not+granted+by+user' in response['Location'])
def _request_authorization(self, request_token):
self.assertFalse(request_token.is_approved)
parameters = {'oauth_token': request_token.key}
self.c.login(username='jane', password='toto')
self.c.get("/oauth/authorize/", parameters)
parameters['authorize_access'] = 1
self.c.post("/oauth/authorize/", parameters)
request_token = self._update_token_from_db(request_token)
self.assertTrue(request_token.is_approved)
def test_request_authorization(self):
token = self._obtain_request_token()
self._request_authorization(token)
def _obtain_access_token(self, request_token):
parameters = self._make_access_token_parameters(request_token)
response = self.c.get("/oauth/access_token/", parameters)
response_params = cgi.parse_qs(response.content)
access_token = self._last_created_access_token()
self.assertEqual(response.status_code, 200)
self.assertEqual(response_params['oauth_token'][0], access_token.key)
self.assertEqual(response_params['oauth_token_secret'][0], access_token.secret)
self.assertEqual(access_token.user.username, 'jane')
return access_token
def test_request_another_access_token(self):
request_token = self._obtain_request_token()
self._request_authorization(request_token)
request_token = self._update_token_from_db(request_token)
self._obtain_access_token(request_token)
parameters = self._make_access_token_parameters(request_token)
response = self.c.get("/oauth/access_token/", parameters)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, 'Invalid request token.')
def test_request_access_token_invalid_verifier(self):
jane = User.objects.get(username='jane')
new_request_token = Token.objects.create_token(
token_type=Token.REQUEST,
timestamp=str(int(time.time())),
consumer=Consumer.objects.get(key=self.CONSUMER_KEY),
user=jane,
scope=Scope.objects.get(name='photos'))
new_request_token.is_approved = True
new_request_token.save()
parameters = self._make_access_token_parameters(new_request_token)
parameters['oauth_token'] = new_request_token.key
parameters['oauth_signature'] = '%s&%s' % (self.CONSUMER_SECRET, new_request_token.secret)
parameters['oauth_verifier'] = 'invalidverifier'
response = self.c.get("/oauth/access_token/", parameters)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, 'Invalid OAuth verifier.')
def test_request_access_token_not_approved_request_token(self):
jane = User.objects.get(username='jane')
new_request_token = Token.objects.create_token(
token_type=Token.REQUEST,
timestamp=str(int(time.time())),
consumer=Consumer.objects.get(key=self.CONSUMER_KEY),
user=jane,
scope=Scope.objects.get(name='photos'))
new_request_token.is_approved = False
new_request_token.save()
parameters = self._make_access_token_parameters(new_request_token)
response = self.c.get("/oauth/access_token/", parameters)
self.assertEqual(response.status_code, 400)
self.assertEqual(response.content, 'Request Token not approved by the user.')
def test_error_accessing_protected_resource(self):
request_token = self._obtain_request_token()
self._request_authorization(request_token)
request_token = self._update_token_from_db(request_token)
access_token = self._obtain_access_token(request_token)
parameters = self._make_protected_access_parameters(access_token)
parameters['oauth_signature'] = 'wrongsignature'
parameters['oauth_nonce'] = 'anotheraccessresourcenonce'
response = self.c.get("/oauth/photo/", parameters)
self.assertEqual(response.status_code, 401)
self.assertTrue(response.content.startswith('Could not verify OAuth request.'))
response = self.c.get("/oauth/photo/")
self.assertEqual(response.status_code, 401)
self.assertEqual(response.content, 'Invalid request parameters.')
def test_positive(self):
parameters = self._make_request_token_parameters()
response = self.c.get("/oauth/request_token/", parameters)
self._validate_request_token_response(response)
token = self._last_created_request_token()
parameters = {'oauth_token': token.key}
response = self.c.get("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 302)
expected_redirect = 'http://testserver/accounts/login/?next=/oauth/authorize/%3Foauth_token%3D{0}'.format(token.key)
self.assertEqual(response['Location'], expected_redirect)
self.c.login(username='jane', password='toto')
response = self.c.get("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 200)
self.assertTrue(response.content.startswith(
'Fake authorize view for printer.example.com with params: oauth_token='))
self.assertEqual(token.is_approved, 0)
parameters['authorize_access'] = 1
response = self.c.post("/oauth/authorize/", parameters)
self.assertEqual(response.status_code, 302)
self.assertTrue(response['Location'].startswith(
'http://printer.example.com/request_token_ready?oauth_verifier='))
self.assertTrue('oauth_token=' in response['Location'])
token = self._last_created_request_token() # get from the DB updated token
self.assertTrue(token.is_approved)
# reset Client
self.c = Client()
parameters = self._make_access_token_parameters(token)
response = self.c.get("/oauth/access_token/", parameters)
self.assertEqual(response.status_code, 200)
response_params = cgi.parse_qs(response.content)
access_token = list(Token.objects.filter(token_type=Token.ACCESS))[-1]
self.assertEqual(response_params['oauth_token'][0], access_token.key)
self.assertEqual(response_params['oauth_token_secret'][0], access_token.secret)
self.assertEqual(access_token.user.username, 'jane')
parameters = self._make_protected_access_parameters(access_token)
oauth_request = oauth.Request.from_token_and_callback(access_token,
http_url='http://testserver/oauth/photo/',
parameters=parameters)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
signature = signature_method.sign(oauth_request, self.consumer, access_token)
parameters['oauth_signature'] = signature
response = self.c.get("/oauth/photo/", parameters)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content, 'Protected Resource access!')
access_token.delete()
# Note that an "Invalid signature" error will be raised here if the
# token is not revoked by Jane because we reuse a previously used one.
parameters['oauth_signature'] = signature
parameters['oauth_nonce'] = 'yetanotheraccessscopenonce'
response = self.c.get(self.scope.url, parameters)
self.assertEqual(response.status_code, 401)
self.assertTrue(response.content.startswith('Invalid access token:'))
| true | true |
f7f53cf8e09154955a709d43ceaab987a1984569 | 3,858 | py | Python | u1_2d/s_nthmc_l64_b4_t2_lr5e-4.py | nftqcd/nthmc | 010c70e297c904219e9d8a04cc20b9c75a4b61e5 | [
"MIT"
] | 2 | 2021-07-29T19:09:30.000Z | 2022-01-17T21:13:40.000Z | u1_2d/s_nthmc_l64_b4_t2_lr5e-4.py | nftqcd/nthmc | 010c70e297c904219e9d8a04cc20b9c75a4b61e5 | [
"MIT"
] | null | null | null | u1_2d/s_nthmc_l64_b4_t2_lr5e-4.py | nftqcd/nthmc | 010c70e297c904219e9d8a04cc20b9c75a4b61e5 | [
"MIT"
] | null | null | null | import tensorflow as tf
import tensorflow.keras as tk
import numpy
import nthmc, ftr, evolve, forcetrain
trajLength = 4.0
nstep = 8
conf = nthmc.Conf(nbatch=32, nepoch=4, nstepEpoch=1024, nstepMixing=128, initDt=trajLength/nstep, stepPerTraj=nstep, trainDt=False, nthr=10, nthrIop=1, seed=7*11*13*7)
op0 = (((1,2,-1,-2), (1,-2,-1,2)),
((1,1,2,-1,-1,-2), (1,1,-2,-1,-1,2), (1,2,-1,-1,-2,1), (1,-2,-1,-1,2,1)))
# requires different coefficient bounds:
# (1,2,-1,-2,1,-2,-1,2)
# (1,2,-1,-2,1,2,-1,-2)
# (1,-2,-1,2,1,-2,-1,2)
op1 = (((2,-1,-2,1), (2,1,-2,-1)),
((2,2,-1,-2,-2,1), (2,2,1,-2,-2,-1), (2,-1,-2,-2,1,2), (2,1,-2,-2,-1,2)))
fixedP = (1,2,-1,-2)
fixedR0 = (2,2,1,-2,-2,-1)
fixedR1 = (1,1,2,-1,-1,-2)
convP0 = lambda: ftr.PeriodicConv((
tk.layers.Conv2D(4, (3,2), activation='gelu', kernel_initializer=tk.initializers.RandomNormal(), bias_initializer=tk.initializers.RandomNormal()),
))
convP1 = lambda: ftr.PeriodicConv((
tk.layers.Conv2D(4, (2,3), activation='gelu', kernel_initializer=tk.initializers.RandomNormal(), bias_initializer=tk.initializers.RandomNormal()),
))
convR = lambda pad: ftr.PeriodicConv((
tk.layers.Conv2D(4, (3,3), activation='gelu', kernel_initializer=tk.initializers.RandomNormal(), bias_initializer=tk.initializers.RandomNormal()),
), pad)
conv = lambda: ftr.PeriodicConv((
tk.layers.Conv2D(4, (3,3), activation='gelu', kernel_initializer=tk.initializers.RandomNormal(), bias_initializer=tk.initializers.RandomNormal()),
tk.layers.Conv2D(2, (3,3), activation=None, kernel_initializer=tk.initializers.RandomNormal(), bias_initializer=tk.initializers.RandomNormal()),
))
transform = lambda: ftr.TransformChain([
ftr.GenericStoutSmear(((0,0),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((0,1),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((1,0),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((1,1),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((0,0),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
ftr.GenericStoutSmear(((1,0),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
ftr.GenericStoutSmear(((0,1),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
ftr.GenericStoutSmear(((1,1),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
ftr.GenericStoutSmear(((0,0),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((0,1),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((1,0),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((1,1),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((0,0),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
ftr.GenericStoutSmear(((1,0),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
ftr.GenericStoutSmear(((0,1),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
ftr.GenericStoutSmear(((1,1),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
])
nthmc.setup(conf)
rng = tf.random.Generator.from_seed(conf.seed)
actionFun = lambda: nthmc.U1d2(beta=7.0, beta0=3.0, size=(64,64), transform=transform(), nbatch=conf.nbatch, rng=rng.split()[0])
mcmcFun = lambda action: nthmc.Metropolis(conf, evolve.Omelyan2MN(conf, action))
x0 = tf.constant(numpy.load('configs/s_hmc_l64_b4/conf.b4.0.n128.l64_64.08192.npy')[0:32], dtype=tf.float64)
forcetrain.runInfer(conf, actionFun, mcmcFun, x0=x0, saveFile='configs/s_nthmc_l64_b4_t2_lr5e-4/conf', transformWeights='weights/t_force_b25_l64_b4_t2_lr5e-4')
| 65.389831 | 167 | 0.637377 | import tensorflow as tf
import tensorflow.keras as tk
import numpy
import nthmc, ftr, evolve, forcetrain
trajLength = 4.0
nstep = 8
conf = nthmc.Conf(nbatch=32, nepoch=4, nstepEpoch=1024, nstepMixing=128, initDt=trajLength/nstep, stepPerTraj=nstep, trainDt=False, nthr=10, nthrIop=1, seed=7*11*13*7)
op0 = (((1,2,-1,-2), (1,-2,-1,2)),
((1,1,2,-1,-1,-2), (1,1,-2,-1,-1,2), (1,2,-1,-1,-2,1), (1,-2,-1,-1,2,1)))
op1 = (((2,-1,-2,1), (2,1,-2,-1)),
((2,2,-1,-2,-2,1), (2,2,1,-2,-2,-1), (2,-1,-2,-2,1,2), (2,1,-2,-2,-1,2)))
fixedP = (1,2,-1,-2)
fixedR0 = (2,2,1,-2,-2,-1)
fixedR1 = (1,1,2,-1,-1,-2)
convP0 = lambda: ftr.PeriodicConv((
tk.layers.Conv2D(4, (3,2), activation='gelu', kernel_initializer=tk.initializers.RandomNormal(), bias_initializer=tk.initializers.RandomNormal()),
))
convP1 = lambda: ftr.PeriodicConv((
tk.layers.Conv2D(4, (2,3), activation='gelu', kernel_initializer=tk.initializers.RandomNormal(), bias_initializer=tk.initializers.RandomNormal()),
))
convR = lambda pad: ftr.PeriodicConv((
tk.layers.Conv2D(4, (3,3), activation='gelu', kernel_initializer=tk.initializers.RandomNormal(), bias_initializer=tk.initializers.RandomNormal()),
), pad)
conv = lambda: ftr.PeriodicConv((
tk.layers.Conv2D(4, (3,3), activation='gelu', kernel_initializer=tk.initializers.RandomNormal(), bias_initializer=tk.initializers.RandomNormal()),
tk.layers.Conv2D(2, (3,3), activation=None, kernel_initializer=tk.initializers.RandomNormal(), bias_initializer=tk.initializers.RandomNormal()),
))
transform = lambda: ftr.TransformChain([
ftr.GenericStoutSmear(((0,0),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((0,1),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((1,0),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((1,1),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((0,0),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
ftr.GenericStoutSmear(((1,0),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
ftr.GenericStoutSmear(((0,1),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
ftr.GenericStoutSmear(((1,1),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
ftr.GenericStoutSmear(((0,0),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((0,1),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((1,0),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((1,1),(2,2)), op0, [(fixedP, convP0()), (fixedR0, convR((1,2)))], conv()),
ftr.GenericStoutSmear(((0,0),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
ftr.GenericStoutSmear(((1,0),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
ftr.GenericStoutSmear(((0,1),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
ftr.GenericStoutSmear(((1,1),(2,2)), op1, [(fixedP, convP1()), (fixedR1, convR((2,1)))], conv()),
])
nthmc.setup(conf)
rng = tf.random.Generator.from_seed(conf.seed)
actionFun = lambda: nthmc.U1d2(beta=7.0, beta0=3.0, size=(64,64), transform=transform(), nbatch=conf.nbatch, rng=rng.split()[0])
mcmcFun = lambda action: nthmc.Metropolis(conf, evolve.Omelyan2MN(conf, action))
x0 = tf.constant(numpy.load('configs/s_hmc_l64_b4/conf.b4.0.n128.l64_64.08192.npy')[0:32], dtype=tf.float64)
forcetrain.runInfer(conf, actionFun, mcmcFun, x0=x0, saveFile='configs/s_nthmc_l64_b4_t2_lr5e-4/conf', transformWeights='weights/t_force_b25_l64_b4_t2_lr5e-4')
| true | true |
f7f53e9a0a79079c0eeda880ae01e5cdba350c47 | 5,134 | py | Python | django/forms/utils.py | Bashar/django | e520a73eeea6b185b719901ab9985ecef00e5664 | [
"BSD-3-Clause"
] | 1 | 2015-01-07T10:29:03.000Z | 2015-01-07T10:29:03.000Z | django/forms/utils.py | Bashar/django | e520a73eeea6b185b719901ab9985ecef00e5664 | [
"BSD-3-Clause"
] | null | null | null | django/forms/utils.py | Bashar/django | e520a73eeea6b185b719901ab9985ecef00e5664 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals
import json
import sys
try:
from collections import UserList
except ImportError: # Python 2
from UserList import UserList
from django.conf import settings
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import format_html, format_html_join, escape
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils import six
# Import ValidationError so that it can be imported from this
# module to maintain backwards compatibility.
from django.core.exceptions import ValidationError
def flatatt(attrs):
"""
Convert a dictionary of attributes to a single string.
The returned string will contain a leading space followed by key="value",
XML-style pairs. It is assumed that the keys do not need to be XML-escaped.
If the passed dictionary is empty, then return an empty string.
The result is passed through 'mark_safe'.
"""
boolean_attrs = []
for attr, value in list(attrs.items()):
if value is True:
boolean_attrs.append((attr,))
del attrs[attr]
elif value is False:
del attrs[attr]
return (
format_html_join('', ' {0}="{1}"', sorted(attrs.items())) +
format_html_join('', ' {0}', sorted(boolean_attrs))
)
@python_2_unicode_compatible
class ErrorDict(dict):
"""
A collection of errors that knows how to display itself in various formats.
The dictionary keys are the field names, and the values are the errors.
"""
def as_data(self):
return {f: e.as_data() for f, e in self.items()}
def as_json(self, escape_html=False):
return json.dumps({f: e.get_json_data(escape_html) for f, e in self.items()})
def as_ul(self):
if not self:
return ''
return format_html(
'<ul class="errorlist">{0}</ul>',
format_html_join('', '<li>{0}{1}</li>', ((k, force_text(v)) for k, v in self.items()))
)
def as_text(self):
output = []
for field, errors in self.items():
output.append('* %s' % field)
output.append('\n'.join(' * %s' % e for e in errors))
return '\n'.join(output)
def __str__(self):
return self.as_ul()
@python_2_unicode_compatible
class ErrorList(UserList, list):
"""
A collection of errors that knows how to display itself in various formats.
"""
def as_data(self):
return ValidationError(self.data).error_list
def get_json_data(self, escape_html=False):
errors = []
for error in self.as_data():
message = list(error)[0]
errors.append({
'message': escape(message) if escape_html else message,
'code': error.code or '',
})
return errors
def as_json(self, escape_html=False):
return json.dumps(self.get_json_data(escape_html))
def as_ul(self):
if not self.data:
return ''
return format_html(
'<ul class="errorlist">{0}</ul>',
format_html_join('', '<li>{0}</li>', ((force_text(e),) for e in self))
)
def as_text(self):
return '\n'.join('* %s' % e for e in self)
def __str__(self):
return self.as_ul()
def __repr__(self):
return repr(list(self))
def __contains__(self, item):
return item in list(self)
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __getitem__(self, i):
error = self.data[i]
if isinstance(error, ValidationError):
return list(error)[0]
return force_text(error)
# Utilities for time zone support in DateTimeField et al.
def from_current_timezone(value):
"""
When time zone support is enabled, convert naive datetimes
entered in the current time zone to aware datetimes.
"""
if settings.USE_TZ and value is not None and timezone.is_naive(value):
current_timezone = timezone.get_current_timezone()
try:
return timezone.make_aware(value, current_timezone)
except Exception:
message = _(
'%(datetime)s couldn\'t be interpreted '
'in time zone %(current_timezone)s; it '
'may be ambiguous or it may not exist.'
)
params = {'datetime': value, 'current_timezone': current_timezone}
six.reraise(ValidationError, ValidationError(
message,
code='ambiguous_timezone',
params=params,
), sys.exc_info()[2])
return value
def to_current_timezone(value):
"""
When time zone support is enabled, convert aware datetimes
to naive dateimes in the current time zone for display.
"""
if settings.USE_TZ and value is not None and timezone.is_aware(value):
current_timezone = timezone.get_current_timezone()
return timezone.make_naive(value, current_timezone)
return value
| 30.742515 | 98 | 0.626412 | from __future__ import unicode_literals
import json
import sys
try:
from collections import UserList
except ImportError:
from UserList import UserList
from django.conf import settings
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.html import format_html, format_html_join, escape
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.utils import six
from django.core.exceptions import ValidationError
def flatatt(attrs):
boolean_attrs = []
for attr, value in list(attrs.items()):
if value is True:
boolean_attrs.append((attr,))
del attrs[attr]
elif value is False:
del attrs[attr]
return (
format_html_join('', ' {0}="{1}"', sorted(attrs.items())) +
format_html_join('', ' {0}', sorted(boolean_attrs))
)
@python_2_unicode_compatible
class ErrorDict(dict):
def as_data(self):
return {f: e.as_data() for f, e in self.items()}
def as_json(self, escape_html=False):
return json.dumps({f: e.get_json_data(escape_html) for f, e in self.items()})
def as_ul(self):
if not self:
return ''
return format_html(
'<ul class="errorlist">{0}</ul>',
format_html_join('', '<li>{0}{1}</li>', ((k, force_text(v)) for k, v in self.items()))
)
def as_text(self):
output = []
for field, errors in self.items():
output.append('* %s' % field)
output.append('\n'.join(' * %s' % e for e in errors))
return '\n'.join(output)
def __str__(self):
return self.as_ul()
@python_2_unicode_compatible
class ErrorList(UserList, list):
def as_data(self):
return ValidationError(self.data).error_list
def get_json_data(self, escape_html=False):
errors = []
for error in self.as_data():
message = list(error)[0]
errors.append({
'message': escape(message) if escape_html else message,
'code': error.code or '',
})
return errors
def as_json(self, escape_html=False):
return json.dumps(self.get_json_data(escape_html))
def as_ul(self):
if not self.data:
return ''
return format_html(
'<ul class="errorlist">{0}</ul>',
format_html_join('', '<li>{0}</li>', ((force_text(e),) for e in self))
)
def as_text(self):
return '\n'.join('* %s' % e for e in self)
def __str__(self):
return self.as_ul()
def __repr__(self):
return repr(list(self))
def __contains__(self, item):
return item in list(self)
def __eq__(self, other):
return list(self) == other
def __ne__(self, other):
return list(self) != other
def __getitem__(self, i):
error = self.data[i]
if isinstance(error, ValidationError):
return list(error)[0]
return force_text(error)
def from_current_timezone(value):
if settings.USE_TZ and value is not None and timezone.is_naive(value):
current_timezone = timezone.get_current_timezone()
try:
return timezone.make_aware(value, current_timezone)
except Exception:
message = _(
'%(datetime)s couldn\'t be interpreted '
'in time zone %(current_timezone)s; it '
'may be ambiguous or it may not exist.'
)
params = {'datetime': value, 'current_timezone': current_timezone}
six.reraise(ValidationError, ValidationError(
message,
code='ambiguous_timezone',
params=params,
), sys.exc_info()[2])
return value
def to_current_timezone(value):
if settings.USE_TZ and value is not None and timezone.is_aware(value):
current_timezone = timezone.get_current_timezone()
return timezone.make_naive(value, current_timezone)
return value
| true | true |
f7f53f2e2e060c6aaeb272c81acbd4d562ea3e78 | 10,147 | py | Python | tests/cases/py_client/additional_properties/client.py | Parquery/swagger-to | 95cde991ea2602ef625630e69ac9f8501bd89f4d | [
"MIT"
] | 38 | 2018-08-06T15:11:10.000Z | 2022-02-13T22:43:00.000Z | tests/cases/py_client/additional_properties/client.py | Parquery/swagger-to | 95cde991ea2602ef625630e69ac9f8501bd89f4d | [
"MIT"
] | 42 | 2018-08-07T08:25:07.000Z | 2021-11-28T19:32:48.000Z | tests/cases/py_client/additional_properties/client.py | Parquery/swagger-to | 95cde991ea2602ef625630e69ac9f8501bd89f4d | [
"MIT"
] | 16 | 2019-02-26T12:39:43.000Z | 2022-01-29T06:38:41.000Z | #!/usr/bin/env python3
# Automatically generated file by swagger_to. DO NOT EDIT OR APPEND ANYTHING!
"""Implements the client for test."""
# pylint: skip-file
# pydocstyle: add-ignore=D105,D107,D401
import contextlib
import json
from typing import Any, BinaryIO, Dict, List, MutableMapping, Optional, cast
import requests
import requests.auth
def from_obj(obj: Any, expected: List[type], path: str = '') -> Any:
"""
Checks and converts the given obj along the expected types.
:param obj: to be converted
:param expected: list of types representing the (nested) structure
:param path: to the object used for debugging
:return: the converted object
"""
if not expected:
raise ValueError("`expected` is empty, but at least one type needs to be specified.")
exp = expected[0]
if exp == float:
if isinstance(obj, int):
return float(obj)
if isinstance(obj, float):
return obj
raise ValueError(
'Expected object of type int or float at {!r}, but got {}.'.format(path, type(obj)))
if exp in [bool, int, str, list, dict]:
if not isinstance(obj, exp):
raise ValueError(
'Expected object of type {} at {!r}, but got {}.'.format(exp, path, type(obj)))
if exp in [bool, int, float, str]:
return obj
if exp == list:
lst = [] # type: List[Any]
for i, value in enumerate(obj):
lst.append(
from_obj(value, expected=expected[1:], path='{}[{}]'.format(path, i)))
return lst
if exp == dict:
adict = dict() # type: Dict[str, Any]
for key, value in obj.items():
if not isinstance(key, str):
raise ValueError(
'Expected a key of type str at path {!r}, got: {}'.format(path, type(key)))
adict[key] = from_obj(value, expected=expected[1:], path='{}[{!r}]'.format(path, key))
return adict
if exp == AnyTypeValuesContainerInProperty:
return any_type_values_container_in_property_from_obj(obj, path=path)
raise ValueError("Unexpected `expected` type: {}".format(exp))
def to_jsonable(obj: Any, expected: List[type], path: str = "") -> Any:
"""
Checks and converts the given object along the expected types to a JSON-able representation.
:param obj: to be converted
:param expected: list of types representing the (nested) structure
:param path: path to the object used for debugging
:return: JSON-able representation of the object
"""
if not expected:
raise ValueError("`expected` is empty, but at least one type needs to be specified.")
exp = expected[0]
if not isinstance(obj, exp):
raise ValueError('Expected object of type {} at path {!r}, but got {}.'.format(
exp, path, type(obj)))
# Assert on primitive types to help type-hinting.
if exp == bool:
assert isinstance(obj, bool)
return obj
if exp == int:
assert isinstance(obj, int)
return obj
if exp == float:
assert isinstance(obj, float)
return obj
if exp == str:
assert isinstance(obj, str)
return obj
if exp == list:
assert isinstance(obj, list)
lst = [] # type: List[Any]
for i, value in enumerate(obj):
lst.append(
to_jsonable(value, expected=expected[1:], path='{}[{}]'.format(path, i)))
return lst
if exp == dict:
assert isinstance(obj, dict)
adict = dict() # type: Dict[str, Any]
for key, value in obj.items():
if not isinstance(key, str):
raise ValueError(
'Expected a key of type str at path {!r}, got: {}'.format(path, type(key)))
adict[key] = to_jsonable(
value,
expected=expected[1:],
path='{}[{!r}]'.format(path, key))
return adict
if exp == AnyTypeValuesContainerInProperty:
assert isinstance(obj, AnyTypeValuesContainerInProperty)
return any_type_values_container_in_property_to_jsonable(obj, path=path)
raise ValueError("Unexpected `expected` type: {}".format(exp))
class AnyTypeValuesContainerInProperty:
def __init__(
self,
array: List[Any],
mapping: Dict[str, Any]) -> None:
"""Initializes with the given values."""
self.array = array
self.mapping = mapping
def to_jsonable(self) -> MutableMapping[str, Any]:
"""
Dispatches the conversion to any_type_values_container_in_property_to_jsonable.
:return: JSON-able representation
"""
return any_type_values_container_in_property_to_jsonable(self)
def new_any_type_values_container_in_property() -> AnyTypeValuesContainerInProperty:
"""Generates an instance of AnyTypeValuesContainerInProperty with default values."""
return AnyTypeValuesContainerInProperty(
array=[],
mapping=dict())
def any_type_values_container_in_property_from_obj(obj: Any, path: str = "") -> AnyTypeValuesContainerInProperty:
"""
Generates an instance of AnyTypeValuesContainerInProperty from a dictionary object.
:param obj: a JSON-ed dictionary object representing an instance of AnyTypeValuesContainerInProperty
:param path: path to the object used for debugging
:return: parsed instance of AnyTypeValuesContainerInProperty
"""
if not isinstance(obj, dict):
raise ValueError('Expected a dict at path {}, but got: {}'.format(path, type(obj)))
for key in obj:
if not isinstance(key, str):
raise ValueError(
'Expected a key of type str at path {}, but got: {}'.format(path, type(key)))
array_from_obj = from_obj(
obj['array'],
expected=[list, Any],
path=path + '.array') # type: List[Any]
mapping_from_obj = from_obj(
obj['mapping'],
expected=[dict, Any],
path=path + '.mapping') # type: Dict[str, Any]
return AnyTypeValuesContainerInProperty(
array=array_from_obj,
mapping=mapping_from_obj)
def any_type_values_container_in_property_to_jsonable(
any_type_values_container_in_property: AnyTypeValuesContainerInProperty,
path: str = "") -> MutableMapping[str, Any]:
"""
Generates a JSON-able mapping from an instance of AnyTypeValuesContainerInProperty.
:param any_type_values_container_in_property: instance of AnyTypeValuesContainerInProperty to be JSON-ized
:param path: path to the any_type_values_container_in_property used for debugging
:return: a JSON-able representation
"""
res = dict() # type: Dict[str, Any]
res['array'] = to_jsonable(
any_type_values_container_in_property.array,
expected=[list, Any],
path='{}.array'.format(path))
res['mapping'] = to_jsonable(
any_type_values_container_in_property.mapping,
expected=[dict, Any],
path='{}.mapping'.format(path))
return res
class RemoteCaller:
"""Executes the remote calls to the server."""
def __init__(
self,
url_prefix: str,
auth: Optional[requests.auth.AuthBase] = None,
session: Optional[requests.Session] = None) -> None:
self.url_prefix = url_prefix
self.auth = auth
self.session = session
if not self.session:
self.session = requests.Session()
self.session.auth = self.auth
def get_foo(
self,
body: List[Any]) -> List[Any]:
"""
Send a post request to /foo.
:param body:
:return: response
"""
url = self.url_prefix + '/foo'
data = to_jsonable(
body,
expected=[list, Any])
resp = self.session.request(
method='post',
url=url,
json=data,
)
with contextlib.closing(resp):
resp.raise_for_status()
return from_obj(
obj=resp.json(),
expected=[list, Any])
def get_bar(
self,
body: Dict[str, Any]) -> Dict[str, Any]:
"""
Send a post request to /bar.
:param body:
:return: response
"""
url = self.url_prefix + '/bar'
data = to_jsonable(
body,
expected=[dict, Any])
resp = self.session.request(
method='post',
url=url,
json=data,
)
with contextlib.closing(resp):
resp.raise_for_status()
return from_obj(
obj=resp.json(),
expected=[dict, Any])
def get_baz(
self,
body: 'AnyTypeValuesContainerInProperty') -> 'AnyTypeValuesContainerInProperty':
"""
Send a post request to /baz.
:param body:
:return: response
"""
url = self.url_prefix + '/baz'
data = to_jsonable(
body,
expected=[AnyTypeValuesContainerInProperty])
resp = self.session.request(
method='post',
url=url,
json=data,
)
with contextlib.closing(resp):
resp.raise_for_status()
return from_obj(
obj=resp.json(),
expected=[AnyTypeValuesContainerInProperty])
def get_qux(
self,
body: Any) -> Any:
"""
Send a post request to /qux.
:param body:
:return: response
"""
url = self.url_prefix + '/qux'
data = to_jsonable(
body,
expected=[Any])
resp = self.session.request(
method='post',
url=url,
json=data,
)
with contextlib.closing(resp):
resp.raise_for_status()
return from_obj(
obj=resp.json(),
expected=[Any])
# Automatically generated file by swagger_to. DO NOT EDIT OR APPEND ANYTHING!
| 28.502809 | 113 | 0.587169 |
import contextlib
import json
from typing import Any, BinaryIO, Dict, List, MutableMapping, Optional, cast
import requests
import requests.auth
def from_obj(obj: Any, expected: List[type], path: str = '') -> Any:
if not expected:
raise ValueError("`expected` is empty, but at least one type needs to be specified.")
exp = expected[0]
if exp == float:
if isinstance(obj, int):
return float(obj)
if isinstance(obj, float):
return obj
raise ValueError(
'Expected object of type int or float at {!r}, but got {}.'.format(path, type(obj)))
if exp in [bool, int, str, list, dict]:
if not isinstance(obj, exp):
raise ValueError(
'Expected object of type {} at {!r}, but got {}.'.format(exp, path, type(obj)))
if exp in [bool, int, float, str]:
return obj
if exp == list:
lst = []
for i, value in enumerate(obj):
lst.append(
from_obj(value, expected=expected[1:], path='{}[{}]'.format(path, i)))
return lst
if exp == dict:
adict = dict()
for key, value in obj.items():
if not isinstance(key, str):
raise ValueError(
'Expected a key of type str at path {!r}, got: {}'.format(path, type(key)))
adict[key] = from_obj(value, expected=expected[1:], path='{}[{!r}]'.format(path, key))
return adict
if exp == AnyTypeValuesContainerInProperty:
return any_type_values_container_in_property_from_obj(obj, path=path)
raise ValueError("Unexpected `expected` type: {}".format(exp))
def to_jsonable(obj: Any, expected: List[type], path: str = "") -> Any:
if not expected:
raise ValueError("`expected` is empty, but at least one type needs to be specified.")
exp = expected[0]
if not isinstance(obj, exp):
raise ValueError('Expected object of type {} at path {!r}, but got {}.'.format(
exp, path, type(obj)))
if exp == bool:
assert isinstance(obj, bool)
return obj
if exp == int:
assert isinstance(obj, int)
return obj
if exp == float:
assert isinstance(obj, float)
return obj
if exp == str:
assert isinstance(obj, str)
return obj
if exp == list:
assert isinstance(obj, list)
lst = []
for i, value in enumerate(obj):
lst.append(
to_jsonable(value, expected=expected[1:], path='{}[{}]'.format(path, i)))
return lst
if exp == dict:
assert isinstance(obj, dict)
adict = dict()
for key, value in obj.items():
if not isinstance(key, str):
raise ValueError(
'Expected a key of type str at path {!r}, got: {}'.format(path, type(key)))
adict[key] = to_jsonable(
value,
expected=expected[1:],
path='{}[{!r}]'.format(path, key))
return adict
if exp == AnyTypeValuesContainerInProperty:
assert isinstance(obj, AnyTypeValuesContainerInProperty)
return any_type_values_container_in_property_to_jsonable(obj, path=path)
raise ValueError("Unexpected `expected` type: {}".format(exp))
class AnyTypeValuesContainerInProperty:
def __init__(
self,
array: List[Any],
mapping: Dict[str, Any]) -> None:
self.array = array
self.mapping = mapping
def to_jsonable(self) -> MutableMapping[str, Any]:
return any_type_values_container_in_property_to_jsonable(self)
def new_any_type_values_container_in_property() -> AnyTypeValuesContainerInProperty:
return AnyTypeValuesContainerInProperty(
array=[],
mapping=dict())
def any_type_values_container_in_property_from_obj(obj: Any, path: str = "") -> AnyTypeValuesContainerInProperty:
if not isinstance(obj, dict):
raise ValueError('Expected a dict at path {}, but got: {}'.format(path, type(obj)))
for key in obj:
if not isinstance(key, str):
raise ValueError(
'Expected a key of type str at path {}, but got: {}'.format(path, type(key)))
array_from_obj = from_obj(
obj['array'],
expected=[list, Any],
path=path + '.array')
mapping_from_obj = from_obj(
obj['mapping'],
expected=[dict, Any],
path=path + '.mapping')
return AnyTypeValuesContainerInProperty(
array=array_from_obj,
mapping=mapping_from_obj)
def any_type_values_container_in_property_to_jsonable(
any_type_values_container_in_property: AnyTypeValuesContainerInProperty,
path: str = "") -> MutableMapping[str, Any]:
res = dict()
res['array'] = to_jsonable(
any_type_values_container_in_property.array,
expected=[list, Any],
path='{}.array'.format(path))
res['mapping'] = to_jsonable(
any_type_values_container_in_property.mapping,
expected=[dict, Any],
path='{}.mapping'.format(path))
return res
class RemoteCaller:
def __init__(
self,
url_prefix: str,
auth: Optional[requests.auth.AuthBase] = None,
session: Optional[requests.Session] = None) -> None:
self.url_prefix = url_prefix
self.auth = auth
self.session = session
if not self.session:
self.session = requests.Session()
self.session.auth = self.auth
def get_foo(
self,
body: List[Any]) -> List[Any]:
url = self.url_prefix + '/foo'
data = to_jsonable(
body,
expected=[list, Any])
resp = self.session.request(
method='post',
url=url,
json=data,
)
with contextlib.closing(resp):
resp.raise_for_status()
return from_obj(
obj=resp.json(),
expected=[list, Any])
def get_bar(
self,
body: Dict[str, Any]) -> Dict[str, Any]:
url = self.url_prefix + '/bar'
data = to_jsonable(
body,
expected=[dict, Any])
resp = self.session.request(
method='post',
url=url,
json=data,
)
with contextlib.closing(resp):
resp.raise_for_status()
return from_obj(
obj=resp.json(),
expected=[dict, Any])
def get_baz(
self,
body: 'AnyTypeValuesContainerInProperty') -> 'AnyTypeValuesContainerInProperty':
url = self.url_prefix + '/baz'
data = to_jsonable(
body,
expected=[AnyTypeValuesContainerInProperty])
resp = self.session.request(
method='post',
url=url,
json=data,
)
with contextlib.closing(resp):
resp.raise_for_status()
return from_obj(
obj=resp.json(),
expected=[AnyTypeValuesContainerInProperty])
def get_qux(
self,
body: Any) -> Any:
url = self.url_prefix + '/qux'
data = to_jsonable(
body,
expected=[Any])
resp = self.session.request(
method='post',
url=url,
json=data,
)
with contextlib.closing(resp):
resp.raise_for_status()
return from_obj(
obj=resp.json(),
expected=[Any])
| true | true |
f7f53f650dcfbc710ba362ad11e333adcde0e4e9 | 2,061 | py | Python | src/property_app/database/types.py | almostprod/property-app | 2e9dc6c64e7fd91d287fc95e513fa3ab1079fa54 | [
"Apache-2.0"
] | 2 | 2020-03-03T16:52:31.000Z | 2020-03-17T21:35:30.000Z | src/property_app/database/types.py | amcclosky/property-app | 9afb0210739955ff19bfcb477acdbd07521ce851 | [
"Apache-2.0"
] | 1 | 2021-05-11T16:54:56.000Z | 2021-05-11T16:54:56.000Z | src/property_app/database/types.py | amcclosky/property-app | 9afb0210739955ff19bfcb477acdbd07521ce851 | [
"Apache-2.0"
] | 1 | 2020-05-04T06:39:35.000Z | 2020-05-04T06:39:35.000Z | __all__ = ["JSON", "Enum", "PendulumType"]
from datetime import datetime
import pendulum
import sqlalchemy as sa
from sqlalchemy import Enum as _Enum
from sqlalchemy.types import TypeDecorator as _TypeDecorator
from sqlalchemy.dialects.postgresql import JSONB as _JSONB
from sqlalchemy_utils.types.scalar_coercible import ScalarCoercible as _ScalarCoercible
class JSON(_TypeDecorator): # noqa
impl = _JSONB
def __init__(self, *args, **kwargs):
if "none_as_null" not in kwargs:
kwargs["none_as_null"] = True
super().__init__(*args, **kwargs)
class Enum(_TypeDecorator): # noqa
impl = _Enum
def __init__(self, *args, **kwargs):
if "native_enum" not in kwargs:
kwargs["native_enum"] = False
if "create_constraint" not in kwargs:
kwargs["create_constraint"] = False
if "values_callable" not in kwargs:
kwargs["values_callable"] = lambda x: [e.value for e in x]
super().__init__(*args, **kwargs)
class PendulumType(_TypeDecorator, _ScalarCoercible):
impl = sa.TIMESTAMP
def __init__(self, *args, **kwargs):
if "timezone" not in kwargs:
kwargs["timezone"] = True
super().__init__(*args, **kwargs)
def process_bind_param(self, value, dialect):
if value:
utc_val = self._coerce(value).in_tz("UTC")
return utc_val if self.impl.timezone else utc_val.naive()
return value
def process_result_value(self, value, dialect):
if value:
return pendulum.instance(value)
return value
def process_literal_param(self, value, dialect):
return str(value)
def _coerce(self, value):
if value is None:
return None
elif isinstance(value, str):
value = pendulum.parse(value, strict=False)
elif isinstance(value, datetime):
value = pendulum.instance(value)
return value
@property
def python_type(self):
return self.impl.type.python_type
| 25.444444 | 87 | 0.644833 | __all__ = ["JSON", "Enum", "PendulumType"]
from datetime import datetime
import pendulum
import sqlalchemy as sa
from sqlalchemy import Enum as _Enum
from sqlalchemy.types import TypeDecorator as _TypeDecorator
from sqlalchemy.dialects.postgresql import JSONB as _JSONB
from sqlalchemy_utils.types.scalar_coercible import ScalarCoercible as _ScalarCoercible
class JSON(_TypeDecorator):
impl = _JSONB
def __init__(self, *args, **kwargs):
if "none_as_null" not in kwargs:
kwargs["none_as_null"] = True
super().__init__(*args, **kwargs)
class Enum(_TypeDecorator):
impl = _Enum
def __init__(self, *args, **kwargs):
if "native_enum" not in kwargs:
kwargs["native_enum"] = False
if "create_constraint" not in kwargs:
kwargs["create_constraint"] = False
if "values_callable" not in kwargs:
kwargs["values_callable"] = lambda x: [e.value for e in x]
super().__init__(*args, **kwargs)
class PendulumType(_TypeDecorator, _ScalarCoercible):
impl = sa.TIMESTAMP
def __init__(self, *args, **kwargs):
if "timezone" not in kwargs:
kwargs["timezone"] = True
super().__init__(*args, **kwargs)
def process_bind_param(self, value, dialect):
if value:
utc_val = self._coerce(value).in_tz("UTC")
return utc_val if self.impl.timezone else utc_val.naive()
return value
def process_result_value(self, value, dialect):
if value:
return pendulum.instance(value)
return value
def process_literal_param(self, value, dialect):
return str(value)
def _coerce(self, value):
if value is None:
return None
elif isinstance(value, str):
value = pendulum.parse(value, strict=False)
elif isinstance(value, datetime):
value = pendulum.instance(value)
return value
@property
def python_type(self):
return self.impl.type.python_type
| true | true |
f7f53f6b03374d1e9d565a2959e40987c1c55cf8 | 9,980 | py | Python | core/argParser.py | AmberLJC/FedScale | 45dfda70820e071ea36b0741837419311d9bddaf | [
"Apache-2.0"
] | 1 | 2022-03-28T22:05:31.000Z | 2022-03-28T22:05:31.000Z | core/argParser.py | AmberLJC/FedScale | 45dfda70820e071ea36b0741837419311d9bddaf | [
"Apache-2.0"
] | null | null | null | core/argParser.py | AmberLJC/FedScale | 45dfda70820e071ea36b0741837419311d9bddaf | [
"Apache-2.0"
] | null | null | null | import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--job_name', type=str, default='kuiper_job')
parser.add_argument('--log_path', type=str, default='../', help="default path is ../log")
# The basic configuration of the cluster
parser.add_argument('--ps_ip', type=str, default='127.0.0.1')
parser.add_argument('--ps_port', type=str, default='29501')
parser.add_argument('--manager_port', type=int, default='9005')
parser.add_argument('--this_rank', type=int, default=1)
parser.add_argument('--num_executors', type=int, default=4)
parser.add_argument('--executor_configs', type=str, default='') # seperated by ;
parser.add_argument('--total_worker', type=int, default=0)
parser.add_argument('--data_map_file', type=str, default=None)
parser.add_argument('--use_cuda', type=bool, default=True)
parser.add_argument('--cuda_device', type=str, default=None)
parser.add_argument('--time_stamp', type=str, default='logs')
parser.add_argument('--task', type=str, default='cv')
parser.add_argument('--pacer_delta', type=float, default=5)
parser.add_argument('--pacer_step', type=int, default=20)
parser.add_argument('--exploration_alpha', type=float, default=0.3)
parser.add_argument('--exploration_factor', type=float, default=0.9)
parser.add_argument('--exploration_decay', type=float, default=0.98)
parser.add_argument('--sample_window', type=float, default=5.0)
parser.add_argument('--device_avail_file', type=str, default=None)
parser.add_argument('--clock_factor', type=float, default=1.0, help="Refactor the clock time given the profile")
# The configuration of model and dataset
parser.add_argument('--data_dir', type=str, default='~/cifar10/')
parser.add_argument('--device_conf_file', type=str, default='/tmp/client.cfg')
parser.add_argument('--model', type=str, default='shufflenet_v2_x2_0')
parser.add_argument('--data_set', type=str, default='cifar10')
parser.add_argument('--sample_mode', type=str, default='random')
parser.add_argument('--filter_less', type=int, default=32)
parser.add_argument('--filter_more', type=int, default=1e15)
parser.add_argument('--train_uniform', type=bool, default=False)
parser.add_argument('--conf_path', type=str, default='~/dataset/')
parser.add_argument('--overcommitment', type=float, default=1.3)
parser.add_argument('--model_size', type=float, default=65536)
parser.add_argument('--round_threshold', type=float, default=30)
parser.add_argument('--round_penalty', type=float, default=2.0)
parser.add_argument('--clip_bound', type=float, default=0.9)
parser.add_argument('--blacklist_rounds', type=int, default=-1)
parser.add_argument('--blacklist_max_len', type=float, default=0.3)
parser.add_argument('--embedding_file', type=str, default = 'glove.840B.300d.txt')
# The configuration of different hyper-parameters for training
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--local_steps', type=int, default=20)
parser.add_argument('--batch_size', type=int, default=30)
parser.add_argument('--test_bsz', type=int, default=128)
parser.add_argument('--backend', type=str, default="gloo")
parser.add_argument('--upload_epoch', type=int, default=20)
parser.add_argument('--learning_rate', type=float, default=5e-2)
parser.add_argument('--min_learning_rate', type=float, default=5e-5)
parser.add_argument('--input_dim', type=int, default=0)
parser.add_argument('--output_dim', type=int, default=0)
parser.add_argument('--dump_epoch', type=int, default=1e10)
parser.add_argument('--decay_factor', type=float, default=0.98)
parser.add_argument('--decay_epoch', type=float, default=10)
parser.add_argument('--num_loaders', type=int, default=2)
parser.add_argument('--eval_interval', type=int, default=5)
parser.add_argument('--sample_seed', type=int, default=233) #123 #233
parser.add_argument('--test_ratio', type=float, default=0.05)
parser.add_argument('--loss_decay', type=float, default=0.2)
parser.add_argument('--exploration_min', type=float, default=0.3)
parser.add_argument('--cut_off_util', type=float, default=0.05) # 95 percentile
parser.add_argument('--gradient_policy', type=str, default=None)
# for yogi
parser.add_argument('--yogi_eta', type=float, default=3e-3)
parser.add_argument('--yogi_tau', type=float, default=1e-8)
parser.add_argument('--yogi_beta', type=float, default=0.9)
parser.add_argument('--yogi_beta2', type=float, default=0.99)
# for prox
parser.add_argument('--proxy_mu', type=float, default=0.1)
# for detection
parser.add_argument('--cfg_file', type=str, default='./utils/rcnn/cfgs/res101.yml')
parser.add_argument('--test_output_dir', type=str, default='./logs/server')
parser.add_argument('--train_size_file', type=str, default='')
parser.add_argument('--test_size_file', type=str, default='')
parser.add_argument('--data_cache', type=str, default='')
parser.add_argument('--backbone', type=str, default='./resnet50.pth')
# for malicious
parser.add_argument('--malicious_factor', type=int, default=1e15)
# for differential privacy
parser.add_argument('--noise_factor', type=float, default=0.1)
# for albert
parser.add_argument(
"--line_by_line",
action="store_true",
help="Whether distinct lines of text in the dataset are to be handled as distinct sequences.",
)
parser.add_argument('--clf_block_size', type=int, default=32)
parser.add_argument(
"--mlm", type=bool, default=False, help="Train with masked-language modeling loss instead of language modeling."
)
parser.add_argument(
"--mlm_probability", type=float, default=0.15, help="Ratio of tokens to mask for masked language modeling loss"
)
parser.add_argument(
"--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--block_size",
default=64,
type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).",
)
parser.add_argument("--weight_decay", default=0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
# for tag prediction
parser.add_argument("--vocab_token_size", type=int, default=10000, help="For vocab token size")
parser.add_argument("--vocab_tag_size", type=int, default=500, help="For vocab tag size")
# for rl example
parser.add_argument("--epsilon", type=float, default=0.9, help="greedy policy")
parser.add_argument("--gamma", type=float, default=0.9, help="reward discount")
parser.add_argument("--memory_capacity", type=int, default=2000, help="memory capacity")
parser.add_argument("--target_replace_iter", type=int, default=15, help="update frequency")
parser.add_argument("--n_actions", type=int, default=2, help="action number")
parser.add_argument("--n_states", type=int, default=4, help="state number")
# for speech
parser.add_argument("--num_classes", type=int, default=35, help="For number of classes in speech")
# for voice
parser.add_argument('--train-manifest', metavar='DIR',
help='path to train manifest csv', default='data/train_manifest.csv')
parser.add_argument('--test-manifest', metavar='DIR',
help='path to test manifest csv', default='data/test_manifest.csv')
parser.add_argument('--sample-rate', default=16000, type=int, help='Sample rate')
parser.add_argument('--labels-path', default='labels.json', help='Contains all characters for transcription')
parser.add_argument('--window-size', default=.02, type=float, help='Window size for spectrogram in seconds')
parser.add_argument('--window-stride', default=.01, type=float, help='Window stride for spectrogram in seconds')
parser.add_argument('--window', default='hamming', help='Window type for spectrogram generation')
parser.add_argument('--hidden-size', default=256, type=int, help='Hidden size of RNNs')
parser.add_argument('--hidden-layers', default=7, type=int, help='Number of RNN layers')
parser.add_argument('--rnn-type', default='lstm', help='Type of the RNN. rnn|gru|lstm are supported')
parser.add_argument('--finetune', dest='finetune', action='store_true',
help='Finetune the model from checkpoint "continue_from"')
parser.add_argument('--speed-volume-perturb', dest='speed_volume_perturb', action='store_true',
help='Use random tempo and gain perturbations.')
parser.add_argument('--spec-augment', dest='spec_augment', action='store_true',
help='Use simple spectral augmentation on mel spectograms.')
parser.add_argument('--noise-dir', default=None,
help='Directory to inject noise into audio. If default, noise Inject not added')
parser.add_argument('--noise-prob', default=0.4, help='Probability of noise being added per sample')
parser.add_argument('--noise-min', default=0.0,
help='Minimum noise level to sample from. (1.0 means all noise, not original signal)', type=float)
parser.add_argument('--noise-max', default=0.5,
help='Maximum noise levels to sample from. Maximum 1.0', type=float)
parser.add_argument('--no-bidirectional', dest='bidirectional', action='store_false', default=True,
help='Turn off bi-directional RNNs, introduces lookahead convolution')
args = parser.parse_args()
datasetCategories = {'Mnist': 10, 'cifar10': 10, "imagenet": 1000, 'emnist': 47,
'openImg': 596, 'google_speech': 35, 'femnist': 62, 'yelp': 5
}
# Profiled relative speech w.r.t. Mobilenet
model_factor = {'shufflenet': 0.0644/0.0554,
'albert': 0.335/0.0554,
'resnet': 0.135/0.0554,
}
args.num_class = datasetCategories[args.data_set] if args.data_set in datasetCategories else 10
for model_name in model_factor:
if model_name in args.model:
args.clock_factor = args.clock_factor * model_factor[model_name]
break
| 50.659898 | 118 | 0.734369 | import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--job_name', type=str, default='kuiper_job')
parser.add_argument('--log_path', type=str, default='../', help="default path is ../log")
parser.add_argument('--ps_ip', type=str, default='127.0.0.1')
parser.add_argument('--ps_port', type=str, default='29501')
parser.add_argument('--manager_port', type=int, default='9005')
parser.add_argument('--this_rank', type=int, default=1)
parser.add_argument('--num_executors', type=int, default=4)
parser.add_argument('--executor_configs', type=str, default='')
parser.add_argument('--total_worker', type=int, default=0)
parser.add_argument('--data_map_file', type=str, default=None)
parser.add_argument('--use_cuda', type=bool, default=True)
parser.add_argument('--cuda_device', type=str, default=None)
parser.add_argument('--time_stamp', type=str, default='logs')
parser.add_argument('--task', type=str, default='cv')
parser.add_argument('--pacer_delta', type=float, default=5)
parser.add_argument('--pacer_step', type=int, default=20)
parser.add_argument('--exploration_alpha', type=float, default=0.3)
parser.add_argument('--exploration_factor', type=float, default=0.9)
parser.add_argument('--exploration_decay', type=float, default=0.98)
parser.add_argument('--sample_window', type=float, default=5.0)
parser.add_argument('--device_avail_file', type=str, default=None)
parser.add_argument('--clock_factor', type=float, default=1.0, help="Refactor the clock time given the profile")
parser.add_argument('--data_dir', type=str, default='~/cifar10/')
parser.add_argument('--device_conf_file', type=str, default='/tmp/client.cfg')
parser.add_argument('--model', type=str, default='shufflenet_v2_x2_0')
parser.add_argument('--data_set', type=str, default='cifar10')
parser.add_argument('--sample_mode', type=str, default='random')
parser.add_argument('--filter_less', type=int, default=32)
parser.add_argument('--filter_more', type=int, default=1e15)
parser.add_argument('--train_uniform', type=bool, default=False)
parser.add_argument('--conf_path', type=str, default='~/dataset/')
parser.add_argument('--overcommitment', type=float, default=1.3)
parser.add_argument('--model_size', type=float, default=65536)
parser.add_argument('--round_threshold', type=float, default=30)
parser.add_argument('--round_penalty', type=float, default=2.0)
parser.add_argument('--clip_bound', type=float, default=0.9)
parser.add_argument('--blacklist_rounds', type=int, default=-1)
parser.add_argument('--blacklist_max_len', type=float, default=0.3)
parser.add_argument('--embedding_file', type=str, default = 'glove.840B.300d.txt')
parser.add_argument('--epochs', type=int, default=1000)
parser.add_argument('--local_steps', type=int, default=20)
parser.add_argument('--batch_size', type=int, default=30)
parser.add_argument('--test_bsz', type=int, default=128)
parser.add_argument('--backend', type=str, default="gloo")
parser.add_argument('--upload_epoch', type=int, default=20)
parser.add_argument('--learning_rate', type=float, default=5e-2)
parser.add_argument('--min_learning_rate', type=float, default=5e-5)
parser.add_argument('--input_dim', type=int, default=0)
parser.add_argument('--output_dim', type=int, default=0)
parser.add_argument('--dump_epoch', type=int, default=1e10)
parser.add_argument('--decay_factor', type=float, default=0.98)
parser.add_argument('--decay_epoch', type=float, default=10)
parser.add_argument('--num_loaders', type=int, default=2)
parser.add_argument('--eval_interval', type=int, default=5)
parser.add_argument('--sample_seed', type=int, default=233) ser.add_argument('--test_ratio', type=float, default=0.05)
parser.add_argument('--loss_decay', type=float, default=0.2)
parser.add_argument('--exploration_min', type=float, default=0.3)
parser.add_argument('--cut_off_util', type=float, default=0.05)
parser.add_argument('--gradient_policy', type=str, default=None)
parser.add_argument('--yogi_eta', type=float, default=3e-3)
parser.add_argument('--yogi_tau', type=float, default=1e-8)
parser.add_argument('--yogi_beta', type=float, default=0.9)
parser.add_argument('--yogi_beta2', type=float, default=0.99)
parser.add_argument('--proxy_mu', type=float, default=0.1)
parser.add_argument('--cfg_file', type=str, default='./utils/rcnn/cfgs/res101.yml')
parser.add_argument('--test_output_dir', type=str, default='./logs/server')
parser.add_argument('--train_size_file', type=str, default='')
parser.add_argument('--test_size_file', type=str, default='')
parser.add_argument('--data_cache', type=str, default='')
parser.add_argument('--backbone', type=str, default='./resnet50.pth')
parser.add_argument('--malicious_factor', type=int, default=1e15)
parser.add_argument('--noise_factor', type=float, default=0.1)
parser.add_argument(
"--line_by_line",
action="store_true",
help="Whether distinct lines of text in the dataset are to be handled as distinct sequences.",
)
parser.add_argument('--clf_block_size', type=int, default=32)
parser.add_argument(
"--mlm", type=bool, default=False, help="Train with masked-language modeling loss instead of language modeling."
)
parser.add_argument(
"--mlm_probability", type=float, default=0.15, help="Ratio of tokens to mask for masked language modeling loss"
)
parser.add_argument(
"--overwrite_cache", type=bool, default=False, help="Overwrite the cached training and evaluation sets"
)
parser.add_argument(
"--block_size",
default=64,
type=int,
help="Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens).",
)
parser.add_argument("--weight_decay", default=0, type=float, help="Weight decay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--vocab_token_size", type=int, default=10000, help="For vocab token size")
parser.add_argument("--vocab_tag_size", type=int, default=500, help="For vocab tag size")
parser.add_argument("--epsilon", type=float, default=0.9, help="greedy policy")
parser.add_argument("--gamma", type=float, default=0.9, help="reward discount")
parser.add_argument("--memory_capacity", type=int, default=2000, help="memory capacity")
parser.add_argument("--target_replace_iter", type=int, default=15, help="update frequency")
parser.add_argument("--n_actions", type=int, default=2, help="action number")
parser.add_argument("--n_states", type=int, default=4, help="state number")
parser.add_argument("--num_classes", type=int, default=35, help="For number of classes in speech")
parser.add_argument('--train-manifest', metavar='DIR',
help='path to train manifest csv', default='data/train_manifest.csv')
parser.add_argument('--test-manifest', metavar='DIR',
help='path to test manifest csv', default='data/test_manifest.csv')
parser.add_argument('--sample-rate', default=16000, type=int, help='Sample rate')
parser.add_argument('--labels-path', default='labels.json', help='Contains all characters for transcription')
parser.add_argument('--window-size', default=.02, type=float, help='Window size for spectrogram in seconds')
parser.add_argument('--window-stride', default=.01, type=float, help='Window stride for spectrogram in seconds')
parser.add_argument('--window', default='hamming', help='Window type for spectrogram generation')
parser.add_argument('--hidden-size', default=256, type=int, help='Hidden size of RNNs')
parser.add_argument('--hidden-layers', default=7, type=int, help='Number of RNN layers')
parser.add_argument('--rnn-type', default='lstm', help='Type of the RNN. rnn|gru|lstm are supported')
parser.add_argument('--finetune', dest='finetune', action='store_true',
help='Finetune the model from checkpoint "continue_from"')
parser.add_argument('--speed-volume-perturb', dest='speed_volume_perturb', action='store_true',
help='Use random tempo and gain perturbations.')
parser.add_argument('--spec-augment', dest='spec_augment', action='store_true',
help='Use simple spectral augmentation on mel spectograms.')
parser.add_argument('--noise-dir', default=None,
help='Directory to inject noise into audio. If default, noise Inject not added')
parser.add_argument('--noise-prob', default=0.4, help='Probability of noise being added per sample')
parser.add_argument('--noise-min', default=0.0,
help='Minimum noise level to sample from. (1.0 means all noise, not original signal)', type=float)
parser.add_argument('--noise-max', default=0.5,
help='Maximum noise levels to sample from. Maximum 1.0', type=float)
parser.add_argument('--no-bidirectional', dest='bidirectional', action='store_false', default=True,
help='Turn off bi-directional RNNs, introduces lookahead convolution')
args = parser.parse_args()
datasetCategories = {'Mnist': 10, 'cifar10': 10, "imagenet": 1000, 'emnist': 47,
'openImg': 596, 'google_speech': 35, 'femnist': 62, 'yelp': 5
}
model_factor = {'shufflenet': 0.0644/0.0554,
'albert': 0.335/0.0554,
'resnet': 0.135/0.0554,
}
args.num_class = datasetCategories[args.data_set] if args.data_set in datasetCategories else 10
for model_name in model_factor:
if model_name in args.model:
args.clock_factor = args.clock_factor * model_factor[model_name]
break
| true | true |
f7f540737ee2ab40548a794d026d0f314441b9f0 | 443 | py | Python | day05/python/util.py | jaredkwright/AdventOfCode2018 | fe27e994d5241d723971cdfbb99b0d59fe0d8736 | [
"Apache-2.0"
] | null | null | null | day05/python/util.py | jaredkwright/AdventOfCode2018 | fe27e994d5241d723971cdfbb99b0d59fe0d8736 | [
"Apache-2.0"
] | null | null | null | day05/python/util.py | jaredkwright/AdventOfCode2018 | fe27e994d5241d723971cdfbb99b0d59fe0d8736 | [
"Apache-2.0"
] | null | null | null | def is_uppercase_letter(c):
return ord(c) >= 65 or ord(c) <= 90
def is_lowercase_letter(c):
return ord(c) >= 65 or ord(c) <= 90
def is_polarized(a, b):
if not (is_lowercase_letter(a) or is_uppercase_letter(a)) and \
(is_lowercase_letter(b) or is_uppercase_letter(b)):
return False
val_a = ord(a)
val_b = ord(b)
if val_a < val_b:
return is_polarized(b, a)
return val_a - val_b == 32
| 23.315789 | 67 | 0.61851 | def is_uppercase_letter(c):
return ord(c) >= 65 or ord(c) <= 90
def is_lowercase_letter(c):
return ord(c) >= 65 or ord(c) <= 90
def is_polarized(a, b):
if not (is_lowercase_letter(a) or is_uppercase_letter(a)) and \
(is_lowercase_letter(b) or is_uppercase_letter(b)):
return False
val_a = ord(a)
val_b = ord(b)
if val_a < val_b:
return is_polarized(b, a)
return val_a - val_b == 32
| true | true |
f7f542233e1bd5f2f48a70af256ceebab1295dae | 2,094 | py | Python | examples/adspygoogle/dfp/v201306/get_custom_targeting_keys_by_statement.py | cherry-wb/googleads-python-lib | 24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04 | [
"Apache-2.0"
] | null | null | null | examples/adspygoogle/dfp/v201306/get_custom_targeting_keys_by_statement.py | cherry-wb/googleads-python-lib | 24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04 | [
"Apache-2.0"
] | null | null | null | examples/adspygoogle/dfp/v201306/get_custom_targeting_keys_by_statement.py | cherry-wb/googleads-python-lib | 24a1ecb7c1cca5af3624a3b03ebaa7f5147b4a04 | [
"Apache-2.0"
] | 2 | 2020-04-02T19:00:31.000Z | 2020-08-06T03:28:38.000Z | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example gets all predefined custom targeting keys. The statement
retrieves up to the maximum page size limit of 500. To create custom
targeting keys, run create_custom_targeting_keys_and_values.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201306')
values = [{
'key': 'type',
'value': {
'xsi_type': 'TextValue',
'value': 'PREDEFINED'
}
}]
filter_statement = {'query': 'WHERE type = :type LIMIT 500',
'values': values}
# Get custom targeting keys by statement.
response = custom_targeting_service.GetCustomTargetingKeysByStatement(
filter_statement)[0]
keys = []
if 'results' in response:
keys = response['results']
# Display results.
if keys:
for key in keys:
print ('Custom targeting key with id \'%s\', name \'%s\', display name '
'\'%s\', and type \'%s\' was found.'
% (key['id'], key['name'], key['displayName'], key['type']))
else:
print 'No keys were found.'
| 32.215385 | 80 | 0.69341 |
"""This example gets all predefined custom targeting keys. The statement
retrieves up to the maximum page size limit of 500. To create custom
targeting keys, run create_custom_targeting_keys_and_values.py."""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
from adspygoogle import DfpClient
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201306')
values = [{
'key': 'type',
'value': {
'xsi_type': 'TextValue',
'value': 'PREDEFINED'
}
}]
filter_statement = {'query': 'WHERE type = :type LIMIT 500',
'values': values}
response = custom_targeting_service.GetCustomTargetingKeysByStatement(
filter_statement)[0]
keys = []
if 'results' in response:
keys = response['results']
if keys:
for key in keys:
print ('Custom targeting key with id \'%s\', name \'%s\', display name '
'\'%s\', and type \'%s\' was found.'
% (key['id'], key['name'], key['displayName'], key['type']))
else:
print 'No keys were found.'
| false | true |
f7f5423260697bd9a94eb7c0636a3d89f592027e | 1,065 | py | Python | tests/basic/for_in.py | nanobowers/py2cr | b7deb8c227cf43ce0e1bf7d638d5b4c00b7e9bdb | [
"MIT"
] | 61 | 2021-10-06T03:29:45.000Z | 2022-02-11T20:42:16.000Z | tests/basic/for_in.py | nanobowers/py2cr | b7deb8c227cf43ce0e1bf7d638d5b4c00b7e9bdb | [
"MIT"
] | 2 | 2021-12-27T03:05:30.000Z | 2021-12-27T18:10:33.000Z | tests/basic/for_in.py | nanobowers/py2cr | b7deb8c227cf43ce0e1bf7d638d5b4c00b7e9bdb | [
"MIT"
] | 2 | 2021-12-27T16:35:46.000Z | 2021-12-28T10:41:49.000Z | from typing import List, Dict
# iterating over a list
print('-- list --')
a = [1,2,3,4,5]
for x in a:
print(x)
# iterating over a tuple
print('-- tuple else case --')
t = ('cats','dogs','squirrels')
for x1 in t:
print(x1)
else:
print('ok')
print('-- tuple else break case --')
for x2 in t:
print(x2)
if x2 == 'squirrels':
break
else:
print('ok')
# iterating over a dictionary
# sort order in python is undefined, so need to sort the results
# explictly before comparing output
print('-- dict keys --')
dct = {'a':1,'b':2,'c':3 }
keys : List[str] = []
for x3 in dct.keys():
keys.append(x3)
keys.sort()
for k in keys:
print(k)
print('-- dict values --')
values : List[int] = list()
for v in dct.values():
values.append(v)
values.sort()
for v in values:
print(v)
items : Dict[str,int] = dict()
for k, v in dct.items():
items[k] = v
print('-- dict item --')
print(items['a'])
print(items['b'])
print(items['c'])
# iterating over a string
print('-- string --')
aaa = 'defabc'
for x4 in aaa:
print(x4)
| 16.640625 | 64 | 0.598122 | from typing import List, Dict
print('-- list --')
a = [1,2,3,4,5]
for x in a:
print(x)
print('-- tuple else case --')
t = ('cats','dogs','squirrels')
for x1 in t:
print(x1)
else:
print('ok')
print('-- tuple else break case --')
for x2 in t:
print(x2)
if x2 == 'squirrels':
break
else:
print('ok')
print('-- dict keys --')
dct = {'a':1,'b':2,'c':3 }
keys : List[str] = []
for x3 in dct.keys():
keys.append(x3)
keys.sort()
for k in keys:
print(k)
print('-- dict values --')
values : List[int] = list()
for v in dct.values():
values.append(v)
values.sort()
for v in values:
print(v)
items : Dict[str,int] = dict()
for k, v in dct.items():
items[k] = v
print('-- dict item --')
print(items['a'])
print(items['b'])
print(items['c'])
print('-- string --')
aaa = 'defabc'
for x4 in aaa:
print(x4)
| true | true |
f7f5441d878fdc6a64f1efc370d539c613bcae2b | 1,652 | py | Python | irekua_database/admin/models.py | CONABIO-audio/irekua-database | abaf3eb3c5273cdb973c7ac1b921ab2f9759042c | [
"BSD-4-Clause"
] | null | null | null | irekua_database/admin/models.py | CONABIO-audio/irekua-database | abaf3eb3c5273cdb973c7ac1b921ab2f9759042c | [
"BSD-4-Clause"
] | 18 | 2019-10-31T21:41:42.000Z | 2022-03-12T00:03:54.000Z | selia_admin/admin/models.py | CONABIO-audio/selia-admin | 0fe3326d63de7904f86f3040cb613801737880f7 | [
"BSD-4-Clause"
] | 1 | 2021-05-06T19:38:21.000Z | 2021-05-06T19:38:21.000Z | from django.contrib import admin
class CustomModelAdmin(admin.ModelAdmin):
date_hierarchy = 'created_on'
def save_model(self, request, instance, form, change):
user = request.user
instance = form.save(commit=False)
if not change or not instance.created_by:
instance.created_by = user
instance.modified_by = user
instance.save()
form.save_m2m()
return instance
class ModelAdmin(CustomModelAdmin):
search_fields = ['name']
list_display = (
'id',
'name',
'annotation_type',
)
fields = (
('name', 'repository'),
'description',
('annotation_type', 'item_types'),
('terms', 'event_types'),
)
autocomplete_fields = [
'terms',
'annotation_type',
'event_types',
'item_types']
class ModelVersionAdmin(CustomModelAdmin):
search_fields = ['model__name']
list_display_links = ('version', )
list_filter = ('created_on', 'created_by')
list_display = (
'id',
'model',
'version',
'created_on'
)
fields = (
('model', 'version'),)
autocomplete_fields = ['model']
class ModelPredictionAdmin(CustomModelAdmin):
list_display = (
'id',
'item',
'model_version',
'event_type',
'certainty',
'created_on',
'created_by',
)
fields = (
('item', 'model_version', 'event_type'),
'annotation',
('certainty', 'labels'))
autocomplete_fields = [
'item',
'labels',
'model_version',
'event_type']
| 22.944444 | 58 | 0.556295 | from django.contrib import admin
class CustomModelAdmin(admin.ModelAdmin):
date_hierarchy = 'created_on'
def save_model(self, request, instance, form, change):
user = request.user
instance = form.save(commit=False)
if not change or not instance.created_by:
instance.created_by = user
instance.modified_by = user
instance.save()
form.save_m2m()
return instance
class ModelAdmin(CustomModelAdmin):
search_fields = ['name']
list_display = (
'id',
'name',
'annotation_type',
)
fields = (
('name', 'repository'),
'description',
('annotation_type', 'item_types'),
('terms', 'event_types'),
)
autocomplete_fields = [
'terms',
'annotation_type',
'event_types',
'item_types']
class ModelVersionAdmin(CustomModelAdmin):
search_fields = ['model__name']
list_display_links = ('version', )
list_filter = ('created_on', 'created_by')
list_display = (
'id',
'model',
'version',
'created_on'
)
fields = (
('model', 'version'),)
autocomplete_fields = ['model']
class ModelPredictionAdmin(CustomModelAdmin):
list_display = (
'id',
'item',
'model_version',
'event_type',
'certainty',
'created_on',
'created_by',
)
fields = (
('item', 'model_version', 'event_type'),
'annotation',
('certainty', 'labels'))
autocomplete_fields = [
'item',
'labels',
'model_version',
'event_type']
| true | true |
f7f544decaa6c54e8d88a2dd1c8c1db481c1336b | 2,951 | py | Python | test/cl/func/conv/testcase.py | pulp-platform/q-eegnet_wolf | f028a727d4ae346b81539ed78ecba5f059c9029f | [
"Apache-2.0"
] | null | null | null | test/cl/func/conv/testcase.py | pulp-platform/q-eegnet_wolf | f028a727d4ae346b81539ed78ecba5f059c9029f | [
"Apache-2.0"
] | null | null | null | test/cl/func/conv/testcase.py | pulp-platform/q-eegnet_wolf | f028a727d4ae346b81539ed78ecba5f059c9029f | [
"Apache-2.0"
] | null | null | null | """
This file will test the convolution implementation
"""
__author__ = "Tibor Schneider"
__email__ = "sctibor@student.ethz.ch"
__version__ = "1.0"
__license__ = "Apache 2.0"
__copyright__ = """
Copyright (C) 2020 ETH Zurich. All rights reserved.
Author: Tibor Schneider, ETH Zurich
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the License); you may
not use this file except in compliance with the License.
You may obtain a copy of the License at
www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import os
import numpy as np
from test_utils import parse_output, TestLogger
from header_file import HeaderFile, HeaderConstant, HeaderArray
from makefile import Makefile
TESTNAME = "cl::func::conv"
RESULT_FILE = "result.out"
def gen_stimuli(size_a = 1125, size_b = 64):
"""
This function generates the stimuli (input and output) for the test
"""
vecA = [random.randint(-128, 127) for _ in range(size_a)]
vecB = [random.randint(-128, 127) for _ in range(size_b)]
result = list(np.convolve(vecA, vecB, mode="valid"))
return vecA, vecB, result
def test():
"""
Execute the tests
Returns: (n_total, n_success)
"""
logger = TestLogger(TESTNAME)
for size_a, size_b in [(155, 16), (1188, 64), (4096, 128)]:
for conv_version in [0, 1, 2, 3]:
# generate makefile
mkf = Makefile()
mkf.add_fc_test_source("test.c")
mkf.add_cl_test_source("cluster.c")
mkf.add_cl_prog_source("func/conv.c")
mkf.add_define("CONV_VERSION", conv_version)
mkf.write()
# generate the stimuli
vecA, vecB, vecExp = gen_stimuli(size_a, size_b)
# prepare header file
header = HeaderFile("test_stimuli.h")
header.add(HeaderConstant("LENGTH_A", size_a))
header.add(HeaderConstant("LENGTH_B", size_b))
header.add(HeaderConstant("LENGTH_RES", len(vecExp)))
header.add(HeaderArray("vecA", "int8_t", vecA))
header.add(HeaderArray("vecB", "int8_t", vecB))
header.add(HeaderArray("vecExp", "int32_t", vecExp))
header.write()
# compile and run
os.system("make clean all run > {}".format(RESULT_FILE))
# parse output
result = parse_output(RESULT_FILE)
casename = "V{}, {}x{}".format(conv_version, size_a, size_b)
# log the result
logger.show_subcase_result(casename, result)
# return summary
return logger.summary()
| 30.739583 | 75 | 0.645883 |
__author__ = "Tibor Schneider"
__email__ = "sctibor@student.ethz.ch"
__version__ = "1.0"
__license__ = "Apache 2.0"
__copyright__ = """
Copyright (C) 2020 ETH Zurich. All rights reserved.
Author: Tibor Schneider, ETH Zurich
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the License); you may
not use this file except in compliance with the License.
You may obtain a copy of the License at
www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an AS IS BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import os
import numpy as np
from test_utils import parse_output, TestLogger
from header_file import HeaderFile, HeaderConstant, HeaderArray
from makefile import Makefile
TESTNAME = "cl::func::conv"
RESULT_FILE = "result.out"
def gen_stimuli(size_a = 1125, size_b = 64):
vecA = [random.randint(-128, 127) for _ in range(size_a)]
vecB = [random.randint(-128, 127) for _ in range(size_b)]
result = list(np.convolve(vecA, vecB, mode="valid"))
return vecA, vecB, result
def test():
logger = TestLogger(TESTNAME)
for size_a, size_b in [(155, 16), (1188, 64), (4096, 128)]:
for conv_version in [0, 1, 2, 3]:
mkf = Makefile()
mkf.add_fc_test_source("test.c")
mkf.add_cl_test_source("cluster.c")
mkf.add_cl_prog_source("func/conv.c")
mkf.add_define("CONV_VERSION", conv_version)
mkf.write()
vecA, vecB, vecExp = gen_stimuli(size_a, size_b)
header = HeaderFile("test_stimuli.h")
header.add(HeaderConstant("LENGTH_A", size_a))
header.add(HeaderConstant("LENGTH_B", size_b))
header.add(HeaderConstant("LENGTH_RES", len(vecExp)))
header.add(HeaderArray("vecA", "int8_t", vecA))
header.add(HeaderArray("vecB", "int8_t", vecB))
header.add(HeaderArray("vecExp", "int32_t", vecExp))
header.write()
os.system("make clean all run > {}".format(RESULT_FILE))
result = parse_output(RESULT_FILE)
casename = "V{}, {}x{}".format(conv_version, size_a, size_b)
logger.show_subcase_result(casename, result)
return logger.summary()
| true | true |
f7f54521315129e0072a7002d1400b31a711e849 | 2,486 | py | Python | test.py | hoangdzung/DGI | 12203ec30bd3e09770e79da2d967b613b8c8e79d | [
"MIT"
] | null | null | null | test.py | hoangdzung/DGI | 12203ec30bd3e09770e79da2d967b613b8c8e79d | [
"MIT"
] | null | null | null | test.py | hoangdzung/DGI | 12203ec30bd3e09770e79da2d967b613b8c8e79d | [
"MIT"
] | null | null | null | import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch import nn
from torch_geometric.nn import GCNConv
from sklearn.linear_model import LogisticRegression
import numpy as np
from tqdm import tqdm
from gumbel import gumbel_softmax
from utils import process
num_epochs = 100000
lr = 0.001
weight_decay = 0
class GCNet(nn.Module):
def __init__(self, num_features, num_embedding = 128):
super(GCNet, self).__init__()
self.conv = GCNConv(num_features, num_embedding, cached=True)
def forward(self, x, edge_index):
x = self.conv(x, edge_index)
return x
adj, features, labels, idx_train, idx_val, idx_test = process.load_data('cora')
#features, _ = process.preprocess_features(features)
features = features.toarray()
#features=np.array(features)
nb_nodes = features.shape[0]
ft_size = features.shape[1]
nb_classes = labels.shape[1]
labels = np.argmax(labels, 1)
model = GCNet(ft_size)
adj = Variable(torch.FloatTensor(adj.toarray()), requires_grad=False)
features = Variable(torch.FloatTensor(features), requires_grad=False)
edge_index = torch.transpose(adj.nonzero(),0,1)
edge_index = edge_index.long()
if torch.cuda.is_available():
model = model.cuda()
adj = adj.cuda()
features = features.cuda()
edge_index = edge_index.cuda()
optimizer = torch.optim.Adam(filter(lambda p : p.requires_grad, model.parameters()), lr=lr, weight_decay=weight_decay)
smallest_loss = 1e20
embeddings_np = None
best_at = 0
for epoch in tqdm(range(num_epochs)):
model.train()
model.zero_grad()
embeddings = model(features, edge_index)
assign_tensor = gumbel_softmax(embeddings, temp=0.1,hard=True)
assign_tensor_t = torch.transpose(assign_tensor, 0, 1)
super_adj = assign_tensor_t @ adj @ assign_tensor # A' = S^T*A*S
vol = super_adj.sum(1)
diag = torch.diagonal(super_adj)
norm_cut = (vol - diag)/(vol+1e-20)
loss = norm_cut.sum()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 2.0)
optimizer.step()
if loss.item() < smallest_loss:
smallest_loss = loss.item()
embeddings_np = embeddings.cpu().detach().numpy()
X_train = embeddings_np[idx_train]
Y_train = labels[idx_train]
X_test = embeddings_np[idx_test]
Y_test = labels[idx_test]
clf = LogisticRegression(solver="lbfgs", max_iter=4000)
clf.fit(X_train, Y_train)
print(loss.item(), clf.score(X_test, Y_test))
# import pdb;pdb.set_trace()a
| 29.595238 | 118 | 0.720032 | import torch
import torch.nn.functional as F
from torch.autograd import Variable
from torch import nn
from torch_geometric.nn import GCNConv
from sklearn.linear_model import LogisticRegression
import numpy as np
from tqdm import tqdm
from gumbel import gumbel_softmax
from utils import process
num_epochs = 100000
lr = 0.001
weight_decay = 0
class GCNet(nn.Module):
def __init__(self, num_features, num_embedding = 128):
super(GCNet, self).__init__()
self.conv = GCNConv(num_features, num_embedding, cached=True)
def forward(self, x, edge_index):
x = self.conv(x, edge_index)
return x
adj, features, labels, idx_train, idx_val, idx_test = process.load_data('cora')
features = features.toarray()
nb_nodes = features.shape[0]
ft_size = features.shape[1]
nb_classes = labels.shape[1]
labels = np.argmax(labels, 1)
model = GCNet(ft_size)
adj = Variable(torch.FloatTensor(adj.toarray()), requires_grad=False)
features = Variable(torch.FloatTensor(features), requires_grad=False)
edge_index = torch.transpose(adj.nonzero(),0,1)
edge_index = edge_index.long()
if torch.cuda.is_available():
model = model.cuda()
adj = adj.cuda()
features = features.cuda()
edge_index = edge_index.cuda()
optimizer = torch.optim.Adam(filter(lambda p : p.requires_grad, model.parameters()), lr=lr, weight_decay=weight_decay)
smallest_loss = 1e20
embeddings_np = None
best_at = 0
for epoch in tqdm(range(num_epochs)):
model.train()
model.zero_grad()
embeddings = model(features, edge_index)
assign_tensor = gumbel_softmax(embeddings, temp=0.1,hard=True)
assign_tensor_t = torch.transpose(assign_tensor, 0, 1)
super_adj = assign_tensor_t @ adj @ assign_tensor
vol = super_adj.sum(1)
diag = torch.diagonal(super_adj)
norm_cut = (vol - diag)/(vol+1e-20)
loss = norm_cut.sum()
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), 2.0)
optimizer.step()
if loss.item() < smallest_loss:
smallest_loss = loss.item()
embeddings_np = embeddings.cpu().detach().numpy()
X_train = embeddings_np[idx_train]
Y_train = labels[idx_train]
X_test = embeddings_np[idx_test]
Y_test = labels[idx_test]
clf = LogisticRegression(solver="lbfgs", max_iter=4000)
clf.fit(X_train, Y_train)
print(loss.item(), clf.score(X_test, Y_test))
# import pdb;pdb.set_trace()a
| true | true |
f7f5463b9e9178777682c4a8a5359d269a6d4668 | 1,893 | py | Python | mypyc/test/test_external.py | jag426/mypy | 62d3bdf1f5114a669c7499258d7e766c1a6fa640 | [
"PSF-2.0"
] | 35 | 2016-03-30T09:25:14.000Z | 2022-03-12T10:53:11.000Z | mypyc/test/test_external.py | jag426/mypy | 62d3bdf1f5114a669c7499258d7e766c1a6fa640 | [
"PSF-2.0"
] | 36 | 2020-07-27T23:26:53.000Z | 2021-08-02T23:22:37.000Z | mypyc/test/test_external.py | jag426/mypy | 62d3bdf1f5114a669c7499258d7e766c1a6fa640 | [
"PSF-2.0"
] | 6 | 2016-01-29T04:33:27.000Z | 2019-11-03T19:19:43.000Z | """Test cases that run tests as subprocesses."""
from typing import List
import os
import subprocess
import sys
import unittest
base_dir = os.path.join(os.path.dirname(__file__), '..', '..')
class TestExternal(unittest.TestCase):
# TODO: Get this to work on Windows.
# (Or don't. It is probably not a good use of time.)
@unittest.skipIf(sys.platform.startswith("win"), "rt tests don't work on windows")
def test_c_unit_test(self) -> None:
"""Run C unit tests in a subprocess."""
# Build Google Test, the C++ framework we use for testing C code.
# The source code for Google Test is copied to this repository.
cppflags = [] # type: List[str]
env = os.environ.copy()
if sys.platform == 'darwin':
cppflags += ['-mmacosx-version-min=10.10', '-stdlib=libc++']
env['CPPFLAGS'] = ' '.join(cppflags)
subprocess.check_call(
['make', 'libgtest.a'],
env=env,
cwd=os.path.join(base_dir, 'mypyc', 'external', 'googletest', 'make'))
# Build Python wrapper for C unit tests.
env = os.environ.copy()
env['CPPFLAGS'] = ' '.join(cppflags)
status = subprocess.check_call(
[sys.executable, 'setup.py', 'build_ext', '--inplace'],
env=env,
cwd=os.path.join(base_dir, 'mypyc', 'lib-rt'))
# Run C unit tests.
env = os.environ.copy()
if 'GTEST_COLOR' not in os.environ:
env['GTEST_COLOR'] = 'yes' # Use fancy colors
status = subprocess.call([sys.executable, '-c',
'import sys, test_capi; sys.exit(test_capi.run_tests())'],
env=env,
cwd=os.path.join(base_dir, 'mypyc', 'lib-rt'))
if status != 0:
raise AssertionError("make test: C unit test failure")
| 39.4375 | 92 | 0.566825 |
from typing import List
import os
import subprocess
import sys
import unittest
base_dir = os.path.join(os.path.dirname(__file__), '..', '..')
class TestExternal(unittest.TestCase):
@unittest.skipIf(sys.platform.startswith("win"), "rt tests don't work on windows")
def test_c_unit_test(self) -> None:
cppflags = []
env = os.environ.copy()
if sys.platform == 'darwin':
cppflags += ['-mmacosx-version-min=10.10', '-stdlib=libc++']
env['CPPFLAGS'] = ' '.join(cppflags)
subprocess.check_call(
['make', 'libgtest.a'],
env=env,
cwd=os.path.join(base_dir, 'mypyc', 'external', 'googletest', 'make'))
env = os.environ.copy()
env['CPPFLAGS'] = ' '.join(cppflags)
status = subprocess.check_call(
[sys.executable, 'setup.py', 'build_ext', '--inplace'],
env=env,
cwd=os.path.join(base_dir, 'mypyc', 'lib-rt'))
env = os.environ.copy()
if 'GTEST_COLOR' not in os.environ:
env['GTEST_COLOR'] = 'yes'
status = subprocess.call([sys.executable, '-c',
'import sys, test_capi; sys.exit(test_capi.run_tests())'],
env=env,
cwd=os.path.join(base_dir, 'mypyc', 'lib-rt'))
if status != 0:
raise AssertionError("make test: C unit test failure")
| true | true |
f7f5469b19b55eac52f8ed4a007ccd58991e077d | 4,634 | py | Python | opencv_utilities/bounding_rect.py | satinder147/opencv-utilities | ec2d0469949924dd89f8159e9c49191f3c43f720 | [
"MIT"
] | 1 | 2020-06-27T10:40:12.000Z | 2020-06-27T10:40:12.000Z | opencv_utilities/bounding_rect.py | satinder147/opencv-utilities | ec2d0469949924dd89f8159e9c49191f3c43f720 | [
"MIT"
] | null | null | null | opencv_utilities/bounding_rect.py | satinder147/opencv-utilities | ec2d0469949924dd89f8159e9c49191f3c43f720 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import time
def check(scale, text, font, line_width, box_width, box_height, offset, p=1):
"""
:param scale: parameter binary search is optimising
:return: A boolean, whether this scale is ok or if p==2 sends back the string of words in each line.
"""
last_word = 0
prev_line_break = 0
strings = []
word_height = None
for i in range(len(text)):
if text[i] == ' ':
last_word = i
word_width, word_height = cv2.getTextSize(text[prev_line_break:i+1], font, scale, line_width)[0]
if word_width > box_width:
i = last_word
# print("The text is "+text[prev_line_break:last_word],prev_line_break,last_word)
if text[prev_line_break:last_word] == " " or last_word+1 == prev_line_break:
return False
strings.append(text[prev_line_break:last_word])
prev_line_break = last_word+1
strings.append(text[prev_line_break:len(text)])
if p == 2:
return strings
if (len(strings) * word_height + (len(strings) - 1) * offset) < box_height:
return True
else:
return False
def get_scale(text, font, line_width, box_width, box_height, offset):
lo = 0
hi = 100
while hi-lo > 1:
mid = lo+(hi-lo)//2
if check(mid, text, font, line_width, box_width,
box_height, offset):
lo = mid
else:
hi = mid
increment = 0.1
precision = 5
ans = lo
for _ in range(precision):
while check(ans+increment, text, font, line_width, box_width, box_height, offset):
ans += increment
increment /= 10
return ans
def get_image(x, y, box_width, box_height, image, text, pad_percent_height, pad_percent_width, line_width, align="left",
font=cv2.FONT_HERSHEY_SIMPLEX, color=(255, 0, 0), rect=False):
if rect:
cv2.rectangle(image, (x, y), (x+box_width, y+box_height), (255, 0, 0), 1)
padding = int(box_height*pad_percent_height)
padding_width = int(pad_percent_width * box_width)
box_width -= int(2*box_width*pad_percent_width)
box_height -= int(2*box_height*pad_percent_height)
offset = int(box_height/10)
# print(box_width,box_height)
ans = get_scale(text, font, line_width, box_width, box_height, offset)
p = cv2.getTextSize(text, font, ans, line_width)
x1 = x + padding_width
y1 = y+p[0][1]+padding
strings = check(ans, text, font, line_width, box_width, box_height, offset, p=2)
for i in range(len(strings)):
if align == 'left':
cv2.putText(image, strings[i], (x1, y1), font, ans, color, line_width, cv2.LINE_AA)
if align == 'center' or align == 'right':
remaining = box_width-cv2.getTextSize(strings[i], font, ans, line_width)[0][0]
if align == 'center':
cv2.putText(image, strings[i], (x1+remaining//2, y1), font, ans, color, line_width,cv2.LINE_AA)
else:
cv2.putText(image, strings[i], (x1+remaining, y1), font, ans, color, line_width, cv2.LINE_AA)
y1 += p[0][1]+offset
return image
def get_transformed(text, pts_dest, img):
pts_src = np.array([[0, 0], [600, 0], [600, 400], [0, 400]], dtype=float)
src = np.zeros((400, 600, 3), dtype=np.uint8)
src = get_image(0, 0, 600, 400, src, text, 0.05, 0.05, 3, 'center')
h, status = cv2.findHomography(pts_src, pts_dest)
im_temp = cv2.warpPerspective(src, h, (img.shape[1], img.shape[0]))
grey = cv2.cvtColor(im_temp, cv2.COLOR_BGR2GRAY)
mask = cv2.threshold(grey, 0, 255, cv2.THRESH_BINARY)[1]
only_text = cv2.bitwise_and(im_temp, im_temp, mask=mask)
only_back = cv2.bitwise_and(img, img, mask=cv2.bitwise_not(mask))
result = cv2.bitwise_or(only_back, only_text)
# cv2.imshow("result",result)
# cv2.waitKey(0)
return result
if __name__ == "__main__":
"""
As per my knowledge there is no method in opencv that can automatically scale text
inside a bounding box. My code uses binary search to find the optimal scale to fit the largest
text inside the box. Apart from this it also provides functionality to align text and provide padding
inside the box. It is as simple as just passing the coordinates of the box and the text.
"""
st = time.time()
text = '''I am Satinder Singh, engineering student from India.'''
img = np.zeros((480, 640, 3), dtype=np.uint8)
result = get_image(10, 10, 100, 100, img, text, 0.05, 0.05, 1)
cv2.imshow("results", result)
cv2.waitKey(0)
en = time.time()
print(en-st)
| 39.271186 | 120 | 0.632715 | import cv2
import numpy as np
import time
def check(scale, text, font, line_width, box_width, box_height, offset, p=1):
last_word = 0
prev_line_break = 0
strings = []
word_height = None
for i in range(len(text)):
if text[i] == ' ':
last_word = i
word_width, word_height = cv2.getTextSize(text[prev_line_break:i+1], font, scale, line_width)[0]
if word_width > box_width:
i = last_word
if text[prev_line_break:last_word] == " " or last_word+1 == prev_line_break:
return False
strings.append(text[prev_line_break:last_word])
prev_line_break = last_word+1
strings.append(text[prev_line_break:len(text)])
if p == 2:
return strings
if (len(strings) * word_height + (len(strings) - 1) * offset) < box_height:
return True
else:
return False
def get_scale(text, font, line_width, box_width, box_height, offset):
lo = 0
hi = 100
while hi-lo > 1:
mid = lo+(hi-lo)//2
if check(mid, text, font, line_width, box_width,
box_height, offset):
lo = mid
else:
hi = mid
increment = 0.1
precision = 5
ans = lo
for _ in range(precision):
while check(ans+increment, text, font, line_width, box_width, box_height, offset):
ans += increment
increment /= 10
return ans
def get_image(x, y, box_width, box_height, image, text, pad_percent_height, pad_percent_width, line_width, align="left",
font=cv2.FONT_HERSHEY_SIMPLEX, color=(255, 0, 0), rect=False):
if rect:
cv2.rectangle(image, (x, y), (x+box_width, y+box_height), (255, 0, 0), 1)
padding = int(box_height*pad_percent_height)
padding_width = int(pad_percent_width * box_width)
box_width -= int(2*box_width*pad_percent_width)
box_height -= int(2*box_height*pad_percent_height)
offset = int(box_height/10)
ans = get_scale(text, font, line_width, box_width, box_height, offset)
p = cv2.getTextSize(text, font, ans, line_width)
x1 = x + padding_width
y1 = y+p[0][1]+padding
strings = check(ans, text, font, line_width, box_width, box_height, offset, p=2)
for i in range(len(strings)):
if align == 'left':
cv2.putText(image, strings[i], (x1, y1), font, ans, color, line_width, cv2.LINE_AA)
if align == 'center' or align == 'right':
remaining = box_width-cv2.getTextSize(strings[i], font, ans, line_width)[0][0]
if align == 'center':
cv2.putText(image, strings[i], (x1+remaining//2, y1), font, ans, color, line_width,cv2.LINE_AA)
else:
cv2.putText(image, strings[i], (x1+remaining, y1), font, ans, color, line_width, cv2.LINE_AA)
y1 += p[0][1]+offset
return image
def get_transformed(text, pts_dest, img):
pts_src = np.array([[0, 0], [600, 0], [600, 400], [0, 400]], dtype=float)
src = np.zeros((400, 600, 3), dtype=np.uint8)
src = get_image(0, 0, 600, 400, src, text, 0.05, 0.05, 3, 'center')
h, status = cv2.findHomography(pts_src, pts_dest)
im_temp = cv2.warpPerspective(src, h, (img.shape[1], img.shape[0]))
grey = cv2.cvtColor(im_temp, cv2.COLOR_BGR2GRAY)
mask = cv2.threshold(grey, 0, 255, cv2.THRESH_BINARY)[1]
only_text = cv2.bitwise_and(im_temp, im_temp, mask=mask)
only_back = cv2.bitwise_and(img, img, mask=cv2.bitwise_not(mask))
result = cv2.bitwise_or(only_back, only_text)
return result
if __name__ == "__main__":
st = time.time()
text = '''I am Satinder Singh, engineering student from India.'''
img = np.zeros((480, 640, 3), dtype=np.uint8)
result = get_image(10, 10, 100, 100, img, text, 0.05, 0.05, 1)
cv2.imshow("results", result)
cv2.waitKey(0)
en = time.time()
print(en-st)
| true | true |
f7f5475d10b337d14bc0306cff41c388917bc77d | 2,091 | py | Python | tests/models/linear/test_lasso.py | harmsm/epistasis | 741b25b3e28015aeeba8d4efc94af1e1d811cd63 | [
"Unlicense"
] | null | null | null | tests/models/linear/test_lasso.py | harmsm/epistasis | 741b25b3e28015aeeba8d4efc94af1e1d811cd63 | [
"Unlicense"
] | null | null | null | tests/models/linear/test_lasso.py | harmsm/epistasis | 741b25b3e28015aeeba8d4efc94af1e1d811cd63 | [
"Unlicense"
] | 2 | 2020-04-02T00:58:24.000Z | 2021-11-16T13:30:30.000Z | # External imports
import unittest
import pytest
import numpy as np
from gpmap import GenotypePhenotypeMap
# Module to test
import epistasis
from epistasis.models.linear import EpistasisLasso
@pytest.fixture
def gpm(test_data):
"""
Create a genotype-phenotype map
"""
d = test_data[0]
return GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"],
wildtype=d["wildtype"])
class TestEpistasisLasso(object):
order = 3
def test_init(self, gpm):
model = EpistasisLasso(order=self.order, model_type="local")
model.add_gpm(gpm)
# Checks
check1 = model.order
check2 = model.model_type
assert check1 == self.order
assert check2 == "local"
def test_fit(self, gpm):
model = EpistasisLasso(order=self.order, model_type="local")
model.add_gpm(gpm)
model.fit()
# Checks
check2 = hasattr(model, "coef_")
check3 = hasattr(model, "epistasis")
# Tests
assert check2 is True
assert check3 is True
def test_predict(self, gpm):
model = EpistasisLasso(order=self.order, model_type="local")
model.add_gpm(gpm)
model.fit()
check1 = model.predict()
def test_score(self, gpm):
model = EpistasisLasso(order=self.order, model_type="local", alpha=0.01)
model.add_gpm(gpm)
model.fit()
score = model.score()
# Tests
assert score >= 0
assert score <= 1
def test_hypothesis(self, gpm):
model = EpistasisLasso(order=self.order, model_type="local")
model.add_gpm(gpm)
model.fit()
# Checks
check1 = model.hypothesis(thetas=model.coef_)
# Tests
True
def test_lnlikelihood(self, gpm):
model = EpistasisLasso(order=self.order, model_type="local")
model.add_gpm(gpm)
model.fit()
# Calculate lnlikelihood
lnlike = model.lnlikelihood()
assert lnlike.dtype == float
| 24.313953 | 80 | 0.601626 |
import unittest
import pytest
import numpy as np
from gpmap import GenotypePhenotypeMap
import epistasis
from epistasis.models.linear import EpistasisLasso
@pytest.fixture
def gpm(test_data):
d = test_data[0]
return GenotypePhenotypeMap(genotype=d["genotype"],
phenotype=d["phenotype"],
wildtype=d["wildtype"])
class TestEpistasisLasso(object):
order = 3
def test_init(self, gpm):
model = EpistasisLasso(order=self.order, model_type="local")
model.add_gpm(gpm)
check1 = model.order
check2 = model.model_type
assert check1 == self.order
assert check2 == "local"
def test_fit(self, gpm):
model = EpistasisLasso(order=self.order, model_type="local")
model.add_gpm(gpm)
model.fit()
check2 = hasattr(model, "coef_")
check3 = hasattr(model, "epistasis")
assert check2 is True
assert check3 is True
def test_predict(self, gpm):
model = EpistasisLasso(order=self.order, model_type="local")
model.add_gpm(gpm)
model.fit()
check1 = model.predict()
def test_score(self, gpm):
model = EpistasisLasso(order=self.order, model_type="local", alpha=0.01)
model.add_gpm(gpm)
model.fit()
score = model.score()
assert score >= 0
assert score <= 1
def test_hypothesis(self, gpm):
model = EpistasisLasso(order=self.order, model_type="local")
model.add_gpm(gpm)
model.fit()
check1 = model.hypothesis(thetas=model.coef_)
True
def test_lnlikelihood(self, gpm):
model = EpistasisLasso(order=self.order, model_type="local")
model.add_gpm(gpm)
model.fit()
lnlike = model.lnlikelihood()
assert lnlike.dtype == float
| true | true |
f7f547cc651c3cf1a292f0663e6d4052c3ff7ed9 | 6,922 | py | Python | src/flask/views.py | jlshix/flask-cn | 1e0dc9181ae626e7069c4020f62b0355cd82f5d2 | [
"BSD-3-Clause"
] | 1 | 2021-11-20T08:05:38.000Z | 2021-11-20T08:05:38.000Z | src/flask/views.py | jlshix/flask-cn | 1e0dc9181ae626e7069c4020f62b0355cd82f5d2 | [
"BSD-3-Clause"
] | null | null | null | src/flask/views.py | jlshix/flask-cn | 1e0dc9181ae626e7069c4020f62b0355cd82f5d2 | [
"BSD-3-Clause"
] | 1 | 2021-05-12T13:06:34.000Z | 2021-05-12T13:06:34.000Z | # -*- coding: utf-8 -*-
"""
flask.views
~~~~~~~~~~~
This module provides class-based views inspired by the ones in Django.
此模块提供基于类的视图, 灵感来自 Django.
:copyright: 2010 Pallets
:license: BSD-3-Clause
"""
from ._compat import with_metaclass
from .globals import request
http_method_funcs = frozenset(
["get", "post", "head", "options", "delete", "put", "trace", "patch"]
)
class View(object):
"""Alternative way to use view functions. A subclass has to implement
:meth:`dispatch_request` which is called with the view arguments from
the URL routing system. If :attr:`methods` is provided the methods
do not have to be passed to the :meth:`~flask.Flask.add_url_rule`
method explicitly::
使用视图函数的可选方式. 子类需实现 `dispatch_request` 方法. URL 路由系统会传入
视图参数调用此方法. 如果提供了 `methods` 属性, 则请求方法不会显式传递给
`flask.Flask.add_url_rule` 方法:
class MyView(View):
methods = ['GET']
def dispatch_request(self, name):
return 'Hello %s!' % name
app.add_url_rule('/hello/<name>', view_func=MyView.as_view('myview'))
When you want to decorate a pluggable view you will have to either do that
when the view function is created (by wrapping the return value of
:meth:`as_view`) or you can use the :attr:`decorators` attribute::
当你想装饰一个可插拔的视图时, 你需要在视图函数创建时(通过包装 `as_view` 方法的返回值)或
使用 `decorators` 属性:
class SecretView(View):
methods = ['GET']
decorators = [superuser_required]
def dispatch_request(self):
...
The decorators stored in the decorators list are applied one after another
when the view function is created. Note that you can *not* use the class
based decorators since those would decorate the view class and not the
generated view function!
存储在装饰器列表中的装饰器在视图函数创建后会按先后顺序进行应用. 注意你不可用使用类装饰器,
因为是用于装饰视图类而不是视图函数的.
"""
#: A list of methods this view can handle.
# 这个视图可以处理的请求方法.
methods = None
#: Setting this disables or force-enables the automatic options handling.
# 设置此选项用于禁用或强制启用自动选项处理.
provide_automatic_options = None
#: The canonical way to decorate class-based views is to decorate the
#: return value of as_view(). However since this moves parts of the
#: logic from the class declaration to the place where it's hooked
#: into the routing system.
#
# 装饰基于类的视图的规范方法是装饰 `as_view()` 的返回值. 但这样会将一部分逻辑
# 从类的声明移到挂接到路由系统的地方.
#:
#: You can place one or more decorators in this list and whenever the
#: view function is created the result is automatically decorated.
#
# 你可以在这个列表中放置一到多个装饰器, 当视图函数创建时, 会自动装饰结果.
#:
#: .. versionadded:: 0.8
decorators = ()
def dispatch_request(self):
"""Subclasses have to override this method to implement the
actual view function code. This method is called with all
the arguments from the URL rule.
子类必须重写此方法实现真正的路由函数代码. 这个方法使用 URL 规则所有的参数调用.
"""
raise NotImplementedError()
@classmethod
def as_view(cls, name, *class_args, **class_kwargs):
"""Converts the class into an actual view function that can be used
with the routing system. Internally this generates a function on the
fly which will instantiate the :class:`View` on each request and call
the :meth:`dispatch_request` method on it.
将类转化成路由系统可以使用的真正的视图函数. 内部会动态生成一个函数, 在每个请求上
实例化 View 并调用其 `dispatch_request` 方法.
The arguments passed to :meth:`as_view` are forwarded to the
constructor of the class.
传递给 `as_view` 方法的参数将转发给类的构造函数.
"""
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__name__ = name
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
# We attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
# the view class so you can actually replace it with something else
# for testing purposes and debugging.
#
# 我们把视图类附加到视图函数有两个原因: 首先我们可以很容易地弄清楚这个是从
# 什么样的基于类的视图转化而来的, 然后也用于实例化视图类, 所以你可以在测试或调试
# 的时候使用其他的类进行替换.
view.view_class = cls
view.__name__ = name
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.methods = cls.methods
view.provide_automatic_options = cls.provide_automatic_options
return view
class MethodViewType(type):
"""Metaclass for :class:`MethodView` that determines what methods the view
defines.
`MethodView` 的元类, 指定视图定义了什么请求方法.
"""
def __init__(cls, name, bases, d):
super(MethodViewType, cls).__init__(name, bases, d)
if "methods" not in d:
methods = set()
for base in bases:
if getattr(base, "methods", None):
methods.update(base.methods)
for key in http_method_funcs:
if hasattr(cls, key):
methods.add(key.upper())
# If we have no method at all in there we don't want to add a
# method list. This is for instance the case for the base class
# or another subclass of a base method view that does not introduce
# new methods.
#
# 如果这里没有任何请求方法, 我们不想添加一个请求方法列表. 例如, 对于不引入
# 新方法的基本方法视图的基类或另一个子类, 就是这种情况.
if methods:
cls.methods = methods
class MethodView(with_metaclass(MethodViewType, View)):
"""A class-based view that dispatches request methods to the corresponding
class methods. For example, if you implement a ``get`` method, it will be
used to handle ``GET`` requests. ::
一个基于类的视图, 分发请求方法到相关的类方法中. 例如, 如果你实现一个 `get` 方法, 这个方法
将用于处理 `GET` 请求:
class CounterAPI(MethodView):
def get(self):
return session.get('counter', 0)
def post(self):
session['counter'] = session.get('counter', 0) + 1
return 'OK'
app.add_url_rule('/counter', view_func=CounterAPI.as_view('counter'))
"""
def dispatch_request(self, *args, **kwargs):
meth = getattr(self, request.method.lower(), None)
# If the request method is HEAD and we don't have a handler for it
# retry with GET.
#
# 如果请求方法是 HEAD, 无需进行处理, 会使用 GET 重试.
if meth is None and request.method == "HEAD":
meth = getattr(self, "get", None)
assert meth is not None, "Unimplemented method %r" % request.method
return meth(*args, **kwargs)
| 33.765854 | 79 | 0.635077 |
from ._compat import with_metaclass
from .globals import request
http_method_funcs = frozenset(
["get", "post", "head", "options", "delete", "put", "trace", "patch"]
)
class View(object):
methods = None
provide_automatic_options = None
#: into the routing system.
#
# 装饰基于类的视图的规范方法是装饰 `as_view()` 的返回值. 但这样会将一部分逻辑
# 从类的声明移到挂接到路由系统的地方.
#:
#: You can place one or more decorators in this list and whenever the
#: view function is created the result is automatically decorated.
#
# 你可以在这个列表中放置一到多个装饰器, 当视图函数创建时, 会自动装饰结果.
#:
#: .. versionadded:: 0.8
decorators = ()
def dispatch_request(self):
raise NotImplementedError()
@classmethod
def as_view(cls, name, *class_args, **class_kwargs):
def view(*args, **kwargs):
self = view.view_class(*class_args, **class_kwargs)
return self.dispatch_request(*args, **kwargs)
if cls.decorators:
view.__name__ = name
view.__module__ = cls.__module__
for decorator in cls.decorators:
view = decorator(view)
# We attach the view class to the view function for two reasons:
# first of all it allows us to easily figure out what class-based
# view this thing came from, secondly it's also used for instantiating
view.view_class = cls
view.__name__ = name
view.__doc__ = cls.__doc__
view.__module__ = cls.__module__
view.methods = cls.methods
view.provide_automatic_options = cls.provide_automatic_options
return view
class MethodViewType(type):
def __init__(cls, name, bases, d):
super(MethodViewType, cls).__init__(name, bases, d)
if "methods" not in d:
methods = set()
for base in bases:
if getattr(base, "methods", None):
methods.update(base.methods)
for key in http_method_funcs:
if hasattr(cls, key):
methods.add(key.upper())
# method list. This is for instance the case for the base class
# or another subclass of a base method view that does not introduce
# new methods.
#
# 如果这里没有任何请求方法, 我们不想添加一个请求方法列表. 例如, 对于不引入
# 新方法的基本方法视图的基类或另一个子类, 就是这种情况.
if methods:
cls.methods = methods
class MethodView(with_metaclass(MethodViewType, View)):
def dispatch_request(self, *args, **kwargs):
meth = getattr(self, request.method.lower(), None)
# If the request method is HEAD and we don't have a handler for it
if meth is None and request.method == "HEAD":
meth = getattr(self, "get", None)
assert meth is not None, "Unimplemented method %r" % request.method
return meth(*args, **kwargs)
| true | true |
f7f547fb4f073d0d0e07223c847a08a58893bbcd | 2,353 | py | Python | bin/split_dataset.py | FirstHandScientist/genhmm | 95954794a48c40486c9df4644a654c541866df4c | [
"MIT"
] | 8 | 2020-04-27T07:14:09.000Z | 2022-01-09T11:10:06.000Z | bin/split_dataset.py | FirstHandScientist/genhmm | 95954794a48c40486c9df4644a654c541866df4c | [
"MIT"
] | 1 | 2021-10-20T11:23:06.000Z | 2021-10-20T11:23:06.000Z | bin/split_dataset.py | FirstHandScientist/genhmm | 95954794a48c40486c9df4644a654c541866df4c | [
"MIT"
] | 2 | 2020-02-13T13:47:16.000Z | 2020-12-16T23:56:06.000Z | from parse import parse
import sys
import argparse
import os
import pickle as pkl
from gm_hmm.src.utils import read_classmap,to_phoneme_level,flip, phn61_to_phn39,remove_label, getsubset, normalize
from functools import partial
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Split a test dataset according to an existing class map.")
parser.add_argument('-input', metavar="<Input dataset to split>", type=str)
parser.add_argument('-classmap', metavar="<class_map.json file>", type=str)
parser.add_argument('-totclass', metavar="<Total number of classes>", type=int)
args = parser.parse_args()
fname_dtest = args.input
mode, nfeats, ntype, snr = parse("{}.{}.{}.{:d}db.pkl", os.path.basename(fname_dtest))
fname_dtrain = os.path.join(os.path.dirname(fname_dtest), "train.{}.pkl".format(nfeats))
cmap = read_classmap(os.path.dirname(args.classmap))
te_DATA, te_keys, te_lengths, phn2int_61, te_PHN = pkl.load(open(fname_dtest, "rb"))
tr_DATA, tr_keys, tr_lengths, tr_PHN = pkl.load(open(fname_dtrain, "rb"))
data_te, label_te = to_phoneme_level(te_DATA)
data_tr, label_tr = to_phoneme_level(tr_DATA)
phn2int = phn2int_61
if args.totclass == 39:
f = partial(phn61_to_phn39, int2phn_61=flip(phn2int_61), data_folder=os.path.dirname(fname_dtest))
label_tr, phn2int_39 = f(label_tr)
label_te, _ = f(label_te, phn2int_39=phn2int_39)
data_te, label_te = remove_label(data_te, label_te, phn2int_39)
data_te, label_te = remove_label(data_te, label_te, phn2int_39)
phn2int_39.pop('-', None)
phn2int = phn2int_39
iphn = [phn2int[v] for k, v in cmap.items()]
xtrain, ytrain = getsubset(data_tr, label_tr, iphn)
xtest, ytest = getsubset(data_te, label_te, iphn)
xtrain, xtest = normalize(xtrain, xtest)
# Assert length (If we add an already existing phoneme,
# the dictionary size will not be len(classmap) + len(class2phn)
test_outfiles = [fname_dtest.replace(".pkl", "_" + str(i+1) + ".pkl") for i in range(len(iphn))]
# Create only the classes that are left
for i, ic in zip(map(int, cmap.keys()), iphn):
assert (not os.path.isfile(test_outfiles[i]))
xtest_c = xtest[ytest == ic]
pkl.dump(xtest_c, open(test_outfiles[i], "wb"))
sys.exit(0)
| 40.568966 | 115 | 0.693158 | from parse import parse
import sys
import argparse
import os
import pickle as pkl
from gm_hmm.src.utils import read_classmap,to_phoneme_level,flip, phn61_to_phn39,remove_label, getsubset, normalize
from functools import partial
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Split a test dataset according to an existing class map.")
parser.add_argument('-input', metavar="<Input dataset to split>", type=str)
parser.add_argument('-classmap', metavar="<class_map.json file>", type=str)
parser.add_argument('-totclass', metavar="<Total number of classes>", type=int)
args = parser.parse_args()
fname_dtest = args.input
mode, nfeats, ntype, snr = parse("{}.{}.{}.{:d}db.pkl", os.path.basename(fname_dtest))
fname_dtrain = os.path.join(os.path.dirname(fname_dtest), "train.{}.pkl".format(nfeats))
cmap = read_classmap(os.path.dirname(args.classmap))
te_DATA, te_keys, te_lengths, phn2int_61, te_PHN = pkl.load(open(fname_dtest, "rb"))
tr_DATA, tr_keys, tr_lengths, tr_PHN = pkl.load(open(fname_dtrain, "rb"))
data_te, label_te = to_phoneme_level(te_DATA)
data_tr, label_tr = to_phoneme_level(tr_DATA)
phn2int = phn2int_61
if args.totclass == 39:
f = partial(phn61_to_phn39, int2phn_61=flip(phn2int_61), data_folder=os.path.dirname(fname_dtest))
label_tr, phn2int_39 = f(label_tr)
label_te, _ = f(label_te, phn2int_39=phn2int_39)
data_te, label_te = remove_label(data_te, label_te, phn2int_39)
data_te, label_te = remove_label(data_te, label_te, phn2int_39)
phn2int_39.pop('-', None)
phn2int = phn2int_39
iphn = [phn2int[v] for k, v in cmap.items()]
xtrain, ytrain = getsubset(data_tr, label_tr, iphn)
xtest, ytest = getsubset(data_te, label_te, iphn)
xtrain, xtest = normalize(xtrain, xtest)
test_outfiles = [fname_dtest.replace(".pkl", "_" + str(i+1) + ".pkl") for i in range(len(iphn))]
for i, ic in zip(map(int, cmap.keys()), iphn):
assert (not os.path.isfile(test_outfiles[i]))
xtest_c = xtest[ytest == ic]
pkl.dump(xtest_c, open(test_outfiles[i], "wb"))
sys.exit(0)
| true | true |
f7f5490bab7300b2279beff1b1bf6b3c33cfce6e | 1,750 | py | Python | ccdb/projects/models.py | thermokarst/ccdb-api | 01d76d75ffaaa9949991cdc3ac43b9ae388ad2a6 | [
"MIT"
] | null | null | null | ccdb/projects/models.py | thermokarst/ccdb-api | 01d76d75ffaaa9949991cdc3ac43b9ae388ad2a6 | [
"MIT"
] | 24 | 2017-01-09T12:51:13.000Z | 2018-04-30T17:40:27.000Z | ccdb/projects/models.py | thermokarst/ccdb-api | 01d76d75ffaaa9949991cdc3ac43b9ae388ad2a6 | [
"MIT"
] | null | null | null | from django.db import models
class Project(models.Model):
name = models.CharField(max_length=100)
code = models.CharField(max_length=10, blank=True)
iacuc_number = models.CharField(max_length=25, blank=True)
description = models.CharField(max_length=255, blank=True)
sort_order = models.IntegerField(blank=True, null=True)
def __str__(self):
return self.name
class Meta:
unique_together = ('name', 'code')
ordering = ['sort_order']
class Grant(models.Model):
title = models.CharField(max_length=200)
code = models.CharField(max_length=10, blank=True)
description = models.CharField(max_length=255, blank=True)
projects = models.ManyToManyField(Project, related_name='grants')
sort_order = models.IntegerField(blank=True, null=True)
def __str__(self):
return self.title
class Meta:
unique_together = ('title', 'code',)
ordering = ['sort_order']
class GrantReport(models.Model):
grant = models.ForeignKey(Grant, related_name='reports',
on_delete=models.CASCADE)
title = models.CharField(max_length=200)
report_type = models.CharField(max_length=50, blank=True)
description = models.CharField(max_length=255, blank=True)
due_date = models.DateField(blank=True, null=True)
submitted_date = models.DateField(blank=True, null=True)
attachment = models.FileField(
upload_to='projects/grants/grant_report_attachments/%Y/%m/%d',
blank=True, null=True)
sort_order = models.IntegerField(blank=True, null=True)
def __str__(self):
return self.title
class Meta:
unique_together = ('grant', 'title', 'due_date',)
ordering = ['sort_order']
| 33.018868 | 70 | 0.681714 | from django.db import models
class Project(models.Model):
name = models.CharField(max_length=100)
code = models.CharField(max_length=10, blank=True)
iacuc_number = models.CharField(max_length=25, blank=True)
description = models.CharField(max_length=255, blank=True)
sort_order = models.IntegerField(blank=True, null=True)
def __str__(self):
return self.name
class Meta:
unique_together = ('name', 'code')
ordering = ['sort_order']
class Grant(models.Model):
title = models.CharField(max_length=200)
code = models.CharField(max_length=10, blank=True)
description = models.CharField(max_length=255, blank=True)
projects = models.ManyToManyField(Project, related_name='grants')
sort_order = models.IntegerField(blank=True, null=True)
def __str__(self):
return self.title
class Meta:
unique_together = ('title', 'code',)
ordering = ['sort_order']
class GrantReport(models.Model):
grant = models.ForeignKey(Grant, related_name='reports',
on_delete=models.CASCADE)
title = models.CharField(max_length=200)
report_type = models.CharField(max_length=50, blank=True)
description = models.CharField(max_length=255, blank=True)
due_date = models.DateField(blank=True, null=True)
submitted_date = models.DateField(blank=True, null=True)
attachment = models.FileField(
upload_to='projects/grants/grant_report_attachments/%Y/%m/%d',
blank=True, null=True)
sort_order = models.IntegerField(blank=True, null=True)
def __str__(self):
return self.title
class Meta:
unique_together = ('grant', 'title', 'due_date',)
ordering = ['sort_order']
| true | true |
f7f5492934185b0b9704408a6dcd8f22add22710 | 3,306 | py | Python | pokemon/convert.py | RichieBzzzt/pokemon | 2028d12efb5036adf617cef5a1db920f629ade8a | [
"MIT"
] | 52 | 2018-02-09T08:05:22.000Z | 2022-02-22T15:41:24.000Z | pokemon/convert.py | RichieBzzzt/pokemon | 2028d12efb5036adf617cef5a1db920f629ade8a | [
"MIT"
] | 7 | 2018-01-21T17:29:19.000Z | 2021-02-21T05:18:27.000Z | pokemon/convert.py | RichieBzzzt/pokemon | 2028d12efb5036adf617cef5a1db920f629ade8a | [
"MIT"
] | 12 | 2019-02-20T21:29:21.000Z | 2022-01-28T15:31:27.000Z | """
Copyright (c) 2016-2020 Vanessa Sochat
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from PIL import Image
"""Credit goes to https://www.hackerearth.com/notes/beautiful-python-a-simple-ascii-art-generator-from-images/
The one modification I added was to scale the new_width by 2, because text characters tend to be thinner than they
are wide, and the current method produced images that were (generally) too tall!
"""
ASCII_CHARS = ["#", "?", "%", ".", "S", "+", ".", "*", ":", ",", "@"]
def scale_image(image, new_width):
"""Resizes an image preserving the aspect ratio.
"""
(original_width, original_height) = image.size
aspect_ratio = original_height / float(original_width)
new_height = int(aspect_ratio * new_width)
# This scales it wider than tall, since characters are biased
new_image = image.resize((new_width * 2, new_height))
return new_image
def convert_to_grayscale(image):
return image.convert("L")
def map_pixels_to_ascii_chars(image, range_width=25):
"""Maps each pixel to an ascii char based on the range
in which it lies.
0-255 is divided into 11 ranges of 25 pixels each.
"""
pixels_in_image = list(image.getdata())
pixels_to_chars = [
ASCII_CHARS[pixel_value // range_width] for pixel_value in pixels_in_image
]
return "".join(pixels_to_chars)
def convert_image_to_ascii(image, new_width):
image = scale_image(image, new_width)
image = convert_to_grayscale(image)
pixels_to_chars = map_pixels_to_ascii_chars(image)
len_pixels_to_chars = len(pixels_to_chars)
image_ascii = [
pixels_to_chars[index : index + new_width * 2]
for index in range(0, len_pixels_to_chars, new_width * 2)
]
return "\n".join(image_ascii)
def handle_image_conversion(image_filepath, new_width=100):
image = None
try:
image = Image.open(image_filepath)
except:
print(
"Unable to open image file {image_filepath}.".format(
image_filepath=image_filepath
)
)
print(e)
return
image_ascii = convert_image_to_ascii(image, new_width)
print(image_ascii)
return image_ascii
if __name__ == "__main__":
import sys
image_file_path = sys.argv[1]
handle_image_conversion(image_file_path)
| 34.082474 | 114 | 0.722021 |
from PIL import Image
ASCII_CHARS = ["#", "?", "%", ".", "S", "+", ".", "*", ":", ",", "@"]
def scale_image(image, new_width):
(original_width, original_height) = image.size
aspect_ratio = original_height / float(original_width)
new_height = int(aspect_ratio * new_width)
new_image = image.resize((new_width * 2, new_height))
return new_image
def convert_to_grayscale(image):
return image.convert("L")
def map_pixels_to_ascii_chars(image, range_width=25):
pixels_in_image = list(image.getdata())
pixels_to_chars = [
ASCII_CHARS[pixel_value // range_width] for pixel_value in pixels_in_image
]
return "".join(pixels_to_chars)
def convert_image_to_ascii(image, new_width):
image = scale_image(image, new_width)
image = convert_to_grayscale(image)
pixels_to_chars = map_pixels_to_ascii_chars(image)
len_pixels_to_chars = len(pixels_to_chars)
image_ascii = [
pixels_to_chars[index : index + new_width * 2]
for index in range(0, len_pixels_to_chars, new_width * 2)
]
return "\n".join(image_ascii)
def handle_image_conversion(image_filepath, new_width=100):
image = None
try:
image = Image.open(image_filepath)
except:
print(
"Unable to open image file {image_filepath}.".format(
image_filepath=image_filepath
)
)
print(e)
return
image_ascii = convert_image_to_ascii(image, new_width)
print(image_ascii)
return image_ascii
if __name__ == "__main__":
import sys
image_file_path = sys.argv[1]
handle_image_conversion(image_file_path)
| true | true |
f7f549b6a2122e7fab50819ef7f19f90ca772c47 | 33,744 | py | Python | tests/unit/task/processing/test_plot.py | lolwww/rally | fcb1fb6c608e29dd62549cf6b3cec2e90529932f | [
"Apache-2.0"
] | 263 | 2015-04-26T16:05:34.000Z | 2022-02-28T11:17:07.000Z | tests/unit/task/processing/test_plot.py | lolwww/rally | fcb1fb6c608e29dd62549cf6b3cec2e90529932f | [
"Apache-2.0"
] | 19 | 2015-04-23T11:53:10.000Z | 2019-02-20T11:23:09.000Z | tests/unit/task/processing/test_plot.py | lolwww/rally | fcb1fb6c608e29dd62549cf6b3cec2e90529932f | [
"Apache-2.0"
] | 287 | 2015-04-23T11:28:03.000Z | 2021-09-16T13:05:53.000Z | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import json
from unittest import mock
import ddt
from rally.task.processing import plot
from tests.unit import test
PLOT = "rally.task.processing.plot."
@ddt.ddt
class PlotTestCase(test.TestCase):
@mock.patch(PLOT + "charts")
def test__process_workload(self, mock_charts):
for mock_ins, ret in [
(mock_charts.MainStatsTable, "main_stats"),
(mock_charts.MainStackedAreaChart, "main_stacked"),
(mock_charts.AtomicStackedAreaChart, "atomic_stacked"),
(mock_charts.OutputStackedAreaDeprecatedChart,
"output_stacked"),
(mock_charts.LoadProfileChart, "load_profile"),
(mock_charts.MainHistogramChart, "main_histogram"),
(mock_charts.AtomicHistogramChart, "atomic_histogram"),
(mock_charts.AtomicAvgChart, "atomic_avg")]:
setattr(mock_ins.return_value.render, "return_value", ret)
iterations = [
{"timestamp": i + 2, "error": [],
"duration": i + 5, "idle_duration": i,
"output": {"additive": [], "complete": []},
"atomic_actions": {"foo_action": i + 10}} for i in range(10)]
workload = {
"data": iterations,
"sla_results": {"sla": {}}, "pass_sla": True,
"position": 0,
"name": "Foo.bar", "description": "Description!!",
"runner_type": "constant",
"runner": {},
"statistics": {"atomics": {
"foo_action": {"max_duration": 19, "min_duration": 10}}},
"full_duration": 40, "load_duration": 32,
"total_iteration_count": 10,
"max_duration": 14, "min_duration": 5,
"start_time": 2,
"created_at": "xxx_time",
"hooks": []}
result = plot._process_workload(
workload, "!!!CONF!!!", 1)
self.assertEqual(
{"cls": "Foo", "met": "bar", "pos": "1",
"name": "bar [2]", "description": "Description!!",
"runner": "constant", "config": json.dumps("!!!CONF!!!",
indent=2),
"created_at": "xxx_time",
"full_duration": 40, "load_duration": 32, "hooks": [],
"atomic": {"histogram": "atomic_histogram",
"iter": "atomic_stacked", "pie": "atomic_avg"},
"iterations": {"histogram": "main_histogram",
"iter": "main_stacked",
"pie": [("success", 10), ("errors", 0)]},
"iterations_count": 10, "errors": [],
"load_profile": "load_profile",
"additive_output": [],
"complete_output": [[], [], [], [], [], [], [], [], [], []],
"has_output": False,
"output_errors": [],
"sla": {}, "sla_success": True, "table": "main_stats"},
result)
@ddt.data(
{"hooks": [], "expected": []},
{"hooks": [
{
"config": {
"description": "Foo",
"action": ("sys_call", "foo cmd"),
"trigger": ("event", {"at": [2, 5], "unit": "iteration"})},
"results": [
{
"status": "success",
"started_at": 1475589987.433399,
"finished_at": 1475589987.525735,
"triggered_by": {"event_type": "iteration",
"value": 2},
"output": {
"additive": [
{"chart_plugin": "StatsTable",
"title": "Foo table",
"data": [["A", 158], ["B", 177]]}],
"complete": []}},
{
"status": "success",
"started_at": 1475589993.432734,
"finished_at": 1475589993.457818,
"triggered_by": {"event_type": "iteration",
"value": 5},
"output": {
"additive": [
{"chart_plugin": "StatsTable",
"title": "Foo table",
"data": [["A", 243], ["B", 179]]}],
"complete": []}}],
"summary": {"success": 2}},
{
"config": {
"action": ("sys_call", "bar cmd"),
"trigger": ("event", {"at": [1, 2, 4], "unit": "time"})},
"results": [
{
"status": "success",
"started_at": 1475589988.434244,
"finished_at": 1475589988.437791,
"triggered_by": {"event_type": "time", "value": 1},
"output": {
"additive": [],
"complete": [
{"chart_plugin": "Pie",
"title": "Bar Pie",
"data": [["F", 4], ["G", 2]]}]}},
{
"status": "success",
"started_at": 1475589989.433964,
"finished_at": 1475589989.437589,
"triggered_by": {"event_type": "time", "value": 2},
"output": {
"additive": [],
"complete": [
{"chart_plugin": "Pie",
"title": "Bar Pie",
"data": [["F", 42], ["G", 24]]}]}}],
"summary": {"success": 2}}],
"expected": [
{"additive": [
{"data": {"cols": ["Action", "Min (sec)", "Median (sec)",
"90%ile (sec)", "95%ile (sec)",
"Max (sec)", "Avg (sec)", "Count"],
"rows": [["A", 158.0, 200.5, 234.5, 238.75, 243.0,
200.5, 2],
["B", 177.0, 178.0, 178.8, 178.9, 179.0,
178.0, 2]],
"styles": {1: "rich"}},
"axis_label": "", "description": "", "label": "",
"title": "Foo table", "widget": "Table"}],
"complete": [], "desc": "Foo", "name": "sys_call"},
{"additive": [],
"complete": [
{"charts": [{"data": [["F", 4], ["G", 2]],
"title": "Bar Pie", "widget": "Pie"}],
"finished_at": "2016-10-04 14:06:28",
"started_at": "2016-10-04 14:06:28",
"status": "success",
"triggered_by": "time: 1"},
{"charts": [{"data": [["F", 42], ["G", 24]],
"title": "Bar Pie", "widget": "Pie"}],
"finished_at": "2016-10-04 14:06:29",
"started_at": "2016-10-04 14:06:29",
"status": "success",
"triggered_by": "time: 2"}],
"desc": "",
"name": "sys_call"}]})
@ddt.unpack
def test__process_hooks(self, hooks, expected):
self.assertEqual(expected, plot._process_hooks(hooks))
@mock.patch(PLOT + "_process_workload")
def test__process_workloads(self, mock__process_workload):
workloads = [{"id": i, "uuid": "uuid-%s" % i, "task_uuid": "task-uuid",
"subtask_uuid": "subtask-uuid",
"name": "Foo.bar_%s" % i,
"description": "Make something useful (or not).",
"position": i,
"runner_type": "constant",
"runner": {"times": 3},
"contexts": {"users": {}},
"sla": {"failure_rate": {"max": 0}},
"args": {"key1": "value1"},
"hooks": [{"config": {
"action": ("foo", {}),
"trigger": ("xxx", {})}}],
"sla_results": {"sla": []},
"start_time": "2997.23.12",
"load_duration": 42,
"full_duration": 37,
"min_duration": 1, "max_duration": 2,
"total_iteration_count": 7, "failed_iteration_count": 2,
"pass_sla": True} for i in (1, 2, 3, 1)]
mock__process_workload.side_effect = lambda a, b, c: (
{"cls": "%s_cls" % a["name"],
"name": str(c),
"met": "dummy",
"pos": str(c)})
p_workloads = plot._process_workloads(workloads)
self.assertEqual([
{"cls": "Foo.bar_1_cls", "met": "dummy", "name": "0", "pos": "0"},
{"cls": "Foo.bar_1_cls", "met": "dummy", "name": "1", "pos": "1"},
{"cls": "Foo.bar_2_cls", "met": "dummy", "name": "0", "pos": "0"},
{"cls": "Foo.bar_3_cls", "met": "dummy", "name": "0", "pos": "0"}],
p_workloads)
def test__make_source(self):
tasks = [{"title": "task title",
"uuid": "task1",
"description": "task description",
"subtasks": [
{"title": "subtask title",
"description": "subtask description",
"workloads": [
{"name": "workload1.1",
"uuid": "w1.1",
"description": "Be or not to be",
"args": {},
"contexts": {"key": "context"},
"sla": {"key": "sla"},
"runner_type": "crunner",
"runner": {},
"hooks": []}
]}
]}]
esource = json.dumps(collections.OrderedDict([
("version", 2),
("title", "task title"),
("description", "task description"),
("subtasks",
[collections.OrderedDict([
("title", "subtask title"),
("description", "subtask description"),
("workloads", [
collections.OrderedDict(
[("scenario", {"workload1.1": {}}),
("description", "Be or not to be"),
("contexts", {"key": "context"}),
("runner", {"crunner": {}}),
("hooks", []),
("sla", {"key": "sla"})])])])])]),
indent=2)
self.assertEqual(esource, plot._make_source(tasks))
tasks.append({"title": "task title",
"uuid": "task2",
"description": "task description",
"subtasks": [
{"title": "subtask title2",
"description": "subtask description2",
"workloads": [
{"name": "workload2.1",
"uuid": "w2.1",
"description": "Be or not to be",
"args": {},
"contexts": {"key": "context"},
"sla": {"key": "sla"},
"runner_type": "crunner",
"runner": {},
"hooks": []}
]},
{"title": "subtask title3",
"description": "subtask description3",
"workloads": [
{"name": "workload2.2",
"uuid": "w2.2",
"description": "Be or not to be3",
"args": {},
"contexts": {"key": "context"},
"sla": {"key": "sla"},
"runner_type": "crunner",
"runner": {},
"hooks": []}
]}
]})
esource = json.dumps(collections.OrderedDict([
("version", 2),
("title", "A combined task."),
("description",
"The task contains subtasks from a multiple number of tasks."),
("subtasks",
[
collections.OrderedDict([
("title", "subtask title"),
("description",
"subtask description\n[Task UUID: task1]"),
("workloads", [
collections.OrderedDict(
[("scenario", {"workload1.1": {}}),
("description", "Be or not to be"),
("contexts", {"key": "context"}),
("runner", {"crunner": {}}),
("hooks", []),
("sla", {"key": "sla"})])])]),
collections.OrderedDict([
("title", "subtask title2"),
("description",
"subtask description2\n[Task UUID: task2]"),
("workloads", [
collections.OrderedDict([
("scenario", {"workload2.1": {}}),
("description", "Be or not to be"),
("contexts", {"key": "context"}),
("runner", {"crunner": {}}),
("hooks", []),
("sla", {"key": "sla"})])])]),
collections.OrderedDict([
("title", "subtask title3"),
("description",
"subtask description3\n[Task UUID: task2]"),
("workloads", [
collections.OrderedDict([
("scenario", {"workload2.2": {}}),
("description", "Be or not to be3"),
("contexts", {"key": "context"}),
("runner", {"crunner": {}}),
("hooks", []),
("sla", {"key": "sla"})])])])])]),
indent=2)
self.assertEqual(esource, plot._make_source(tasks))
@ddt.data({},
{"include_libs": True},
{"include_libs": False})
@ddt.unpack
@mock.patch(PLOT + "_make_source")
@mock.patch(PLOT + "_process_workloads")
@mock.patch(PLOT + "ui_utils.get_template")
@mock.patch("rally.common.version.version_string", return_value="42.0")
def test_plot(self, mock_version_string, mock_get_template,
mock__process_workloads, mock__make_source, **ddt_kwargs):
task_dict = {"title": "", "description": "",
"subtasks": [{"title": "", "description": "",
"workloads": ["foo", "bar"]}]}
mock__make_source.return_value = "source"
mock__process_workloads.return_value = "scenarios"
mock_get_template.return_value.render.return_value = "tasks_html"
html = plot.plot([task_dict], **ddt_kwargs)
self.assertEqual("tasks_html", html)
mock_get_template.assert_called_once_with("task/report.html")
mock__process_workloads.assert_called_once_with(["foo", "bar"])
if "include_libs" in ddt_kwargs:
mock_get_template.return_value.render.assert_called_once_with(
version="42.0", data="\"scenarios\"",
source="\"source\"",
include_libs=ddt_kwargs["include_libs"])
else:
mock_get_template.return_value.render.assert_called_once_with(
version="42.0", data="\"scenarios\"",
source="\"source\"", include_libs=False)
@mock.patch(PLOT + "objects.Task")
@mock.patch(PLOT + "Trends")
@mock.patch(PLOT + "ui_utils.get_template")
@mock.patch("rally.common.version.version_string", return_value="42.0")
def test_trends(self, mock_version_string, mock_get_template, mock_trends,
mock_task):
task_dict = {"uuid": "task--uu--iiii-dd",
"subtasks": [{"workloads": ["foo", "bar"]}]}
trends = mock.Mock()
trends.get_data.return_value = ["foo", "bar"]
mock_trends.return_value = trends
template = mock.Mock()
template.render.return_value = "trends html"
mock_get_template.return_value = template
result = plot.trends([task_dict])
self.assertEqual("trends html", result)
self.assertEqual(
[mock.call("task--uu--iiii-dd", "foo"),
mock.call("task--uu--iiii-dd", "bar")],
trends.add_result.mock_calls)
mock_get_template.assert_called_once_with("task/trends.html")
template.render.assert_called_once_with(version="42.0",
data="[\"foo\", \"bar\"]",
include_libs=False)
@ddt.ddt
class TrendsTestCase(test.TestCase):
def test___init__(self):
trends = plot.Trends()
self.assertEqual({}, trends._data)
self.assertRaises(TypeError, plot.Trends, 42)
@ddt.data({"args": [None], "result": "None"},
{"args": [""], "result": ""},
{"args": [" str value "], "result": "str value"},
{"args": [" 42 "], "result": "42"},
{"args": ["42"], "result": "42"},
{"args": [42], "result": "42"},
{"args": [42.00], "result": "42.0"},
{"args": [[3.2, 1, " foo ", None]], "result": "1,3.2,None,foo"},
{"args": [(" def", "abc", [22, 33])], "result": "22,33,abc,def"},
{"args": [{}], "result": ""},
{"args": [{1: 2, "a": " b c "}], "result": "1:2|a:b c"},
{"args": [{"foo": "bar", (1, 2): [5, 4, 3]}],
"result": "1,2:3,4,5|foo:bar"},
{"args": [1, 2], "raises": TypeError},
{"args": [set()], "raises": TypeError})
@ddt.unpack
def test__to_str(self, args, result=None, raises=None):
trends = plot.Trends()
if raises:
self.assertRaises(raises, trends._to_str, *args)
else:
self.assertEqual(result, trends._to_str(*args))
@mock.patch(PLOT + "hashlib")
def test__make_hash(self, mock_hashlib):
mock_hashlib.md5.return_value.hexdigest.return_value = "md5_digest"
trends = plot.Trends()
trends._to_str = mock.Mock()
trends._to_str.return_value.encode.return_value = "foo_str"
self.assertEqual("md5_digest", trends._make_hash("foo_obj"))
trends._to_str.assert_called_once_with("foo_obj")
trends._to_str.return_value.encode.assert_called_once_with("utf8")
mock_hashlib.md5.assert_called_once_with("foo_str")
def _make_result(self, salt, sla_success=True, with_na=False):
if with_na:
atomic = {"a": "n/a", "b": "n/a"}
stats = {
"atomics": [
{"name": "a", "display_name": "a",
"data": {"min": "n/a", "median": "n/a",
"90%ile": "n/a", "95%ile": "n/a", "max": "n/a",
"avg": "n/a", "success": "n/a", "count": 4}},
{"name": "b", "display_name": "b",
"data": {"min": "n/a", "median": "n/a",
"90%ile": "n/a", "95%ile": "n/a", "max": "n/a",
"avg": "n/a", "success": "n/a", "count": 4}}
],
"total": {"name": "total", "display_name": "total",
"data": {"min": "n/a", "median": "n/a",
"90%ile": "n/a", "95%ile": "n/a",
"max": "n/a", "avg": "n/a",
"success": "n/a", "count": 4}}
}
else:
stats = {
"atomics": [
{"name": "a", "display_name": "a",
"data": {"min": 0.7, "median": 0.85, "90%ile": 0.9,
"95%ile": 0.87, "max": 1.25, "avg": 0.67,
"success": "100.0%", "count": 4}},
{"name": "b", "display_name": "b",
"data": {"min": 0.5, "median": 0.75, "90%ile": 0.85,
"95%ile": 0.9, "max": 1.1, "avg": 0.58,
"success": "100.0%", "count": 4}}],
"total": {"name": "total", "display_name": "total",
"data": {"min": 1.2, "median": 1.55,
"90%ile": 1.7, "95%ile": 1.8, "max": 1.5,
"avg": 0.8, "success": "100.0%", "count": 4
}}}
atomic = {"a": 123, "b": 456}
return {
"name": "Scenario.name_%d" % salt,
"args": {}, "contexts": {}, "runner_type": "foo", "runner": {},
"hooks": {},
"pass_sla": sla_success,
"sla": [{"success": sla_success}],
"total_iteration_count": 4,
"start_time": 123456.789 + salt,
"statistics": {
"atomics": atomic,
"durations": stats},
"data": ["<iter-0>", "<iter-1>", "<iter-2>", "<iter-3>"]}
def _sort_trends(self, trends_result):
for idx in range(len(trends_result)):
trends_result[idx]["durations"].sort()
for a_idx in range(len(trends_result[idx]["actions"])):
trends_result[idx]["actions"][a_idx]["durations"].sort()
return trends_result
@mock.patch(PLOT + "json.dumps")
@mock.patch(PLOT + "objects.Workload.to_task")
def test_add_result_and_get_data(self, mock_workload_to_task, mock_dumps):
mock_dumps.side_effect = lambda x, **j: x
workload_cfg = [
{
"description": "foo", "name": "Name1",
"subtasks": [{"description": "descr"}]},
{
"description": "foo", "name": "Name2",
"subtasks": [{"description": "descr"}]}]
mock_workload_to_task.side_effect = workload_cfg
trends = plot.Trends()
for i in 0, 1:
trends.add_result(
"task_uuid_%s" % i, self._make_result(i))
actual = self._sort_trends(trends.get_data())
workload_cfg[0]["description"] = (
"Task(s) with the workload: task_uuid_1")
workload_cfg[1]["description"] = (
"Task(s) with the workload: task_uuid_2")
expected = [
{"actions": [{"durations": [("90%ile", [(123456789, 0.9)]),
("95%ile", [(123456789, 0.87)]),
("avg", [(123456789, 0.67)]),
("max", [(123456789, 1.25)]),
("median", [(123456789, 0.85)]),
("min", [(123456789, 0.7)])],
"name": "a",
"success": [("success", [(123456789, 100.0)])]},
{"durations": [("90%ile", [(123456789, 0.85)]),
("95%ile", [(123456789, 0.9)]),
("avg", [(123456789, 0.58)]),
("max", [(123456789, 1.1)]),
("median", [(123456789, 0.75)]),
("min", [(123456789, 0.5)])],
"name": "b",
"success": [("success", [(123456789, 100.0)])]}],
"cls": "Scenario",
"config": workload_cfg[0],
"durations": [("90%ile", [(123456789, 1.7)]),
("95%ile", [(123456789, 1.8)]),
("avg", [(123456789, 0.8)]),
("max", [(123456789, 1.5)]),
("median", [(123456789, 1.55)]),
("min", [(123456789, 1.2)])],
"length": 1,
"met": "name_0",
"name": "Scenario.name_0",
"sla_failures": 0,
"stat": {"avg": 1.425, "max": 1.8, "min": 0.8},
"success": [("success", [(123456789, 100.0)])]},
{"actions": [{"durations": [("90%ile", [(123457789, 0.9)]),
("95%ile", [(123457789, 0.87)]),
("avg", [(123457789, 0.67)]),
("max", [(123457789, 1.25)]),
("median", [(123457789, 0.85)]),
("min", [(123457789, 0.7)])],
"name": "a",
"success": [("success", [(123457789, 100.0)])]},
{"durations": [("90%ile", [(123457789, 0.85)]),
("95%ile", [(123457789, 0.9)]),
("avg", [(123457789, 0.58)]),
("max", [(123457789, 1.1)]),
("median", [(123457789, 0.75)]),
("min", [(123457789, 0.5)])],
"name": "b",
"success": [("success", [(123457789, 100.0)])]}],
"cls": "Scenario",
"config": workload_cfg[1],
"durations": [("90%ile", [(123457789, 1.7)]),
("95%ile", [(123457789, 1.8)]),
("avg", [(123457789, 0.8)]),
("max", [(123457789, 1.5)]),
("median", [(123457789, 1.55)]),
("min", [(123457789, 1.2)])],
"length": 1,
"met": "name_1",
"name": "Scenario.name_1",
"sla_failures": 0,
"stat": {"avg": 1.425, "max": 1.8, "min": 0.8},
"success": [("success", [(123457789, 100.0)])]}]
self.assertEqual(expected, actual)
@mock.patch(PLOT + "json.dumps")
@mock.patch(PLOT + "objects.Workload.to_task")
def test_add_result_once_and_get_data(self, mock_workload_to_task,
mock_dumps):
mock_dumps.side_effect = lambda x, **j: x
workload_cfg = {"description": "foo",
"subtasks": [{"description": "descr"}]}
mock_workload_to_task.return_value = workload_cfg
trends = plot.Trends()
trends.add_result(
"task_uuid",
self._make_result(42, sla_success=False))
actual = self._sort_trends(trends.get_data())
workload_cfg["description"] = "Task(s) with the workload: task_uuid"
expected = [
{"actions": [{"durations": [("90%ile", [(123498789, 0.9)]),
("95%ile", [(123498789, 0.87)]),
("avg", [(123498789, 0.67)]),
("max", [(123498789, 1.25)]),
("median", [(123498789, 0.85)]),
("min", [(123498789, 0.7)])],
"name": "a",
"success": [("success", [(123498789, 100.0)])]},
{"durations": [("90%ile", [(123498789, 0.85)]),
("95%ile", [(123498789, 0.9)]),
("avg", [(123498789, 0.58)]),
("max", [(123498789, 1.1)]),
("median", [(123498789, 0.75)]),
("min", [(123498789, 0.5)])],
"name": "b",
"success": [("success", [(123498789, 100.0)])]}],
"cls": "Scenario",
"config": workload_cfg,
"durations": [("90%ile", [(123498789, 1.7)]),
("95%ile", [(123498789, 1.8)]),
("avg", [(123498789, 0.8)]),
("max", [(123498789, 1.5)]),
("median", [(123498789, 1.55)]),
("min", [(123498789, 1.2)])],
"length": 1,
"met": "name_42",
"name": "Scenario.name_42",
"sla_failures": 1,
"stat": {"avg": 1.425, "max": 1.8, "min": 0.8},
"success": [("success", [(123498789, 100.0)])]}]
self.assertEqual(expected, actual)
@mock.patch(PLOT + "json.dumps")
@mock.patch(PLOT + "objects.Workload.to_task")
def test_add_result_with_na_and_get_data(self, mock_workload_to_task,
mock_dumps):
mock_dumps.side_effect = lambda x, **j: x
workload_cfg = {"description": "foo",
"subtasks": [{"description": "descr"}]}
mock_workload_to_task.return_value = workload_cfg
trends = plot.Trends()
trends.add_result(
"task_uuid",
self._make_result(42, sla_success=False, with_na=True))
actual = self._sort_trends(trends.get_data())
workload_cfg["description"] = "Task(s) with the workload: task_uuid"
expected = [
{"actions": [{"durations": [("90%ile", [(123498789, "n/a")]),
("95%ile", [(123498789, "n/a")]),
("avg", [(123498789, "n/a")]),
("max", [(123498789, "n/a")]),
("median", [(123498789, "n/a")]),
("min", [(123498789, "n/a")])],
"name": "a",
"success": [("success", [(123498789, 0)])]},
{"durations": [("90%ile", [(123498789, "n/a")]),
("95%ile", [(123498789, "n/a")]),
("avg", [(123498789, "n/a")]),
("max", [(123498789, "n/a")]),
("median", [(123498789, "n/a")]),
("min", [(123498789, "n/a")])],
"name": "b",
"success": [("success", [(123498789, 0)])]}],
"cls": "Scenario",
"config": workload_cfg,
"durations": [("90%ile", [(123498789, "n/a")]),
("95%ile", [(123498789, "n/a")]),
("avg", [(123498789, "n/a")]),
("max", [(123498789, "n/a")]),
("median", [(123498789, "n/a")]),
("min", [(123498789, "n/a")])],
"length": 1,
"met": "name_42",
"name": "Scenario.name_42",
"sla_failures": 1,
"stat": {"avg": None, "max": None, "min": None},
"success": [("success", [(123498789, 0)])]}]
self.assertEqual(expected, actual)
def test_get_data_no_results_added(self):
trends = plot.Trends()
self.assertEqual([], trends.get_data())
def test_obtaining_workload_description(self):
trends = plot.Trends()
workload_1 = self._make_result(42)
workload_1["name"] = "Dummy.dummy"
workload_1["description"] = "foo!!!"
trends.add_result("task_uuid", workload_1)
data = trends.get_data()
self.assertEqual(1, len(data))
cfg = json.loads(data[0]["config"])
self.assertEqual("foo!!!",
cfg["subtasks"][0]["description"])
workload_2 = self._make_result(42)
workload_2["name"] = "Dummy.dummy"
workload_2["description"] = "bar!!!"
trends.add_result("task_uuid", workload_2)
data = trends.get_data()
self.assertEqual(1, len(data))
cfg = json.loads(data[0]["config"])
self.assertEqual("Do nothing and sleep for the given number of "
"seconds (0 by default).",
cfg["subtasks"][0]["description"])
| 47.459916 | 79 | 0.401701 |
import collections
import json
from unittest import mock
import ddt
from rally.task.processing import plot
from tests.unit import test
PLOT = "rally.task.processing.plot."
@ddt.ddt
class PlotTestCase(test.TestCase):
@mock.patch(PLOT + "charts")
def test__process_workload(self, mock_charts):
for mock_ins, ret in [
(mock_charts.MainStatsTable, "main_stats"),
(mock_charts.MainStackedAreaChart, "main_stacked"),
(mock_charts.AtomicStackedAreaChart, "atomic_stacked"),
(mock_charts.OutputStackedAreaDeprecatedChart,
"output_stacked"),
(mock_charts.LoadProfileChart, "load_profile"),
(mock_charts.MainHistogramChart, "main_histogram"),
(mock_charts.AtomicHistogramChart, "atomic_histogram"),
(mock_charts.AtomicAvgChart, "atomic_avg")]:
setattr(mock_ins.return_value.render, "return_value", ret)
iterations = [
{"timestamp": i + 2, "error": [],
"duration": i + 5, "idle_duration": i,
"output": {"additive": [], "complete": []},
"atomic_actions": {"foo_action": i + 10}} for i in range(10)]
workload = {
"data": iterations,
"sla_results": {"sla": {}}, "pass_sla": True,
"position": 0,
"name": "Foo.bar", "description": "Description!!",
"runner_type": "constant",
"runner": {},
"statistics": {"atomics": {
"foo_action": {"max_duration": 19, "min_duration": 10}}},
"full_duration": 40, "load_duration": 32,
"total_iteration_count": 10,
"max_duration": 14, "min_duration": 5,
"start_time": 2,
"created_at": "xxx_time",
"hooks": []}
result = plot._process_workload(
workload, "!!!CONF!!!", 1)
self.assertEqual(
{"cls": "Foo", "met": "bar", "pos": "1",
"name": "bar [2]", "description": "Description!!",
"runner": "constant", "config": json.dumps("!!!CONF!!!",
indent=2),
"created_at": "xxx_time",
"full_duration": 40, "load_duration": 32, "hooks": [],
"atomic": {"histogram": "atomic_histogram",
"iter": "atomic_stacked", "pie": "atomic_avg"},
"iterations": {"histogram": "main_histogram",
"iter": "main_stacked",
"pie": [("success", 10), ("errors", 0)]},
"iterations_count": 10, "errors": [],
"load_profile": "load_profile",
"additive_output": [],
"complete_output": [[], [], [], [], [], [], [], [], [], []],
"has_output": False,
"output_errors": [],
"sla": {}, "sla_success": True, "table": "main_stats"},
result)
@ddt.data(
{"hooks": [], "expected": []},
{"hooks": [
{
"config": {
"description": "Foo",
"action": ("sys_call", "foo cmd"),
"trigger": ("event", {"at": [2, 5], "unit": "iteration"})},
"results": [
{
"status": "success",
"started_at": 1475589987.433399,
"finished_at": 1475589987.525735,
"triggered_by": {"event_type": "iteration",
"value": 2},
"output": {
"additive": [
{"chart_plugin": "StatsTable",
"title": "Foo table",
"data": [["A", 158], ["B", 177]]}],
"complete": []}},
{
"status": "success",
"started_at": 1475589993.432734,
"finished_at": 1475589993.457818,
"triggered_by": {"event_type": "iteration",
"value": 5},
"output": {
"additive": [
{"chart_plugin": "StatsTable",
"title": "Foo table",
"data": [["A", 243], ["B", 179]]}],
"complete": []}}],
"summary": {"success": 2}},
{
"config": {
"action": ("sys_call", "bar cmd"),
"trigger": ("event", {"at": [1, 2, 4], "unit": "time"})},
"results": [
{
"status": "success",
"started_at": 1475589988.434244,
"finished_at": 1475589988.437791,
"triggered_by": {"event_type": "time", "value": 1},
"output": {
"additive": [],
"complete": [
{"chart_plugin": "Pie",
"title": "Bar Pie",
"data": [["F", 4], ["G", 2]]}]}},
{
"status": "success",
"started_at": 1475589989.433964,
"finished_at": 1475589989.437589,
"triggered_by": {"event_type": "time", "value": 2},
"output": {
"additive": [],
"complete": [
{"chart_plugin": "Pie",
"title": "Bar Pie",
"data": [["F", 42], ["G", 24]]}]}}],
"summary": {"success": 2}}],
"expected": [
{"additive": [
{"data": {"cols": ["Action", "Min (sec)", "Median (sec)",
"90%ile (sec)", "95%ile (sec)",
"Max (sec)", "Avg (sec)", "Count"],
"rows": [["A", 158.0, 200.5, 234.5, 238.75, 243.0,
200.5, 2],
["B", 177.0, 178.0, 178.8, 178.9, 179.0,
178.0, 2]],
"styles": {1: "rich"}},
"axis_label": "", "description": "", "label": "",
"title": "Foo table", "widget": "Table"}],
"complete": [], "desc": "Foo", "name": "sys_call"},
{"additive": [],
"complete": [
{"charts": [{"data": [["F", 4], ["G", 2]],
"title": "Bar Pie", "widget": "Pie"}],
"finished_at": "2016-10-04 14:06:28",
"started_at": "2016-10-04 14:06:28",
"status": "success",
"triggered_by": "time: 1"},
{"charts": [{"data": [["F", 42], ["G", 24]],
"title": "Bar Pie", "widget": "Pie"}],
"finished_at": "2016-10-04 14:06:29",
"started_at": "2016-10-04 14:06:29",
"status": "success",
"triggered_by": "time: 2"}],
"desc": "",
"name": "sys_call"}]})
@ddt.unpack
def test__process_hooks(self, hooks, expected):
self.assertEqual(expected, plot._process_hooks(hooks))
@mock.patch(PLOT + "_process_workload")
def test__process_workloads(self, mock__process_workload):
workloads = [{"id": i, "uuid": "uuid-%s" % i, "task_uuid": "task-uuid",
"subtask_uuid": "subtask-uuid",
"name": "Foo.bar_%s" % i,
"description": "Make something useful (or not).",
"position": i,
"runner_type": "constant",
"runner": {"times": 3},
"contexts": {"users": {}},
"sla": {"failure_rate": {"max": 0}},
"args": {"key1": "value1"},
"hooks": [{"config": {
"action": ("foo", {}),
"trigger": ("xxx", {})}}],
"sla_results": {"sla": []},
"start_time": "2997.23.12",
"load_duration": 42,
"full_duration": 37,
"min_duration": 1, "max_duration": 2,
"total_iteration_count": 7, "failed_iteration_count": 2,
"pass_sla": True} for i in (1, 2, 3, 1)]
mock__process_workload.side_effect = lambda a, b, c: (
{"cls": "%s_cls" % a["name"],
"name": str(c),
"met": "dummy",
"pos": str(c)})
p_workloads = plot._process_workloads(workloads)
self.assertEqual([
{"cls": "Foo.bar_1_cls", "met": "dummy", "name": "0", "pos": "0"},
{"cls": "Foo.bar_1_cls", "met": "dummy", "name": "1", "pos": "1"},
{"cls": "Foo.bar_2_cls", "met": "dummy", "name": "0", "pos": "0"},
{"cls": "Foo.bar_3_cls", "met": "dummy", "name": "0", "pos": "0"}],
p_workloads)
def test__make_source(self):
tasks = [{"title": "task title",
"uuid": "task1",
"description": "task description",
"subtasks": [
{"title": "subtask title",
"description": "subtask description",
"workloads": [
{"name": "workload1.1",
"uuid": "w1.1",
"description": "Be or not to be",
"args": {},
"contexts": {"key": "context"},
"sla": {"key": "sla"},
"runner_type": "crunner",
"runner": {},
"hooks": []}
]}
]}]
esource = json.dumps(collections.OrderedDict([
("version", 2),
("title", "task title"),
("description", "task description"),
("subtasks",
[collections.OrderedDict([
("title", "subtask title"),
("description", "subtask description"),
("workloads", [
collections.OrderedDict(
[("scenario", {"workload1.1": {}}),
("description", "Be or not to be"),
("contexts", {"key": "context"}),
("runner", {"crunner": {}}),
("hooks", []),
("sla", {"key": "sla"})])])])])]),
indent=2)
self.assertEqual(esource, plot._make_source(tasks))
tasks.append({"title": "task title",
"uuid": "task2",
"description": "task description",
"subtasks": [
{"title": "subtask title2",
"description": "subtask description2",
"workloads": [
{"name": "workload2.1",
"uuid": "w2.1",
"description": "Be or not to be",
"args": {},
"contexts": {"key": "context"},
"sla": {"key": "sla"},
"runner_type": "crunner",
"runner": {},
"hooks": []}
]},
{"title": "subtask title3",
"description": "subtask description3",
"workloads": [
{"name": "workload2.2",
"uuid": "w2.2",
"description": "Be or not to be3",
"args": {},
"contexts": {"key": "context"},
"sla": {"key": "sla"},
"runner_type": "crunner",
"runner": {},
"hooks": []}
]}
]})
esource = json.dumps(collections.OrderedDict([
("version", 2),
("title", "A combined task."),
("description",
"The task contains subtasks from a multiple number of tasks."),
("subtasks",
[
collections.OrderedDict([
("title", "subtask title"),
("description",
"subtask description\n[Task UUID: task1]"),
("workloads", [
collections.OrderedDict(
[("scenario", {"workload1.1": {}}),
("description", "Be or not to be"),
("contexts", {"key": "context"}),
("runner", {"crunner": {}}),
("hooks", []),
("sla", {"key": "sla"})])])]),
collections.OrderedDict([
("title", "subtask title2"),
("description",
"subtask description2\n[Task UUID: task2]"),
("workloads", [
collections.OrderedDict([
("scenario", {"workload2.1": {}}),
("description", "Be or not to be"),
("contexts", {"key": "context"}),
("runner", {"crunner": {}}),
("hooks", []),
("sla", {"key": "sla"})])])]),
collections.OrderedDict([
("title", "subtask title3"),
("description",
"subtask description3\n[Task UUID: task2]"),
("workloads", [
collections.OrderedDict([
("scenario", {"workload2.2": {}}),
("description", "Be or not to be3"),
("contexts", {"key": "context"}),
("runner", {"crunner": {}}),
("hooks", []),
("sla", {"key": "sla"})])])])])]),
indent=2)
self.assertEqual(esource, plot._make_source(tasks))
@ddt.data({},
{"include_libs": True},
{"include_libs": False})
@ddt.unpack
@mock.patch(PLOT + "_make_source")
@mock.patch(PLOT + "_process_workloads")
@mock.patch(PLOT + "ui_utils.get_template")
@mock.patch("rally.common.version.version_string", return_value="42.0")
def test_plot(self, mock_version_string, mock_get_template,
mock__process_workloads, mock__make_source, **ddt_kwargs):
task_dict = {"title": "", "description": "",
"subtasks": [{"title": "", "description": "",
"workloads": ["foo", "bar"]}]}
mock__make_source.return_value = "source"
mock__process_workloads.return_value = "scenarios"
mock_get_template.return_value.render.return_value = "tasks_html"
html = plot.plot([task_dict], **ddt_kwargs)
self.assertEqual("tasks_html", html)
mock_get_template.assert_called_once_with("task/report.html")
mock__process_workloads.assert_called_once_with(["foo", "bar"])
if "include_libs" in ddt_kwargs:
mock_get_template.return_value.render.assert_called_once_with(
version="42.0", data="\"scenarios\"",
source="\"source\"",
include_libs=ddt_kwargs["include_libs"])
else:
mock_get_template.return_value.render.assert_called_once_with(
version="42.0", data="\"scenarios\"",
source="\"source\"", include_libs=False)
@mock.patch(PLOT + "objects.Task")
@mock.patch(PLOT + "Trends")
@mock.patch(PLOT + "ui_utils.get_template")
@mock.patch("rally.common.version.version_string", return_value="42.0")
def test_trends(self, mock_version_string, mock_get_template, mock_trends,
mock_task):
task_dict = {"uuid": "task--uu--iiii-dd",
"subtasks": [{"workloads": ["foo", "bar"]}]}
trends = mock.Mock()
trends.get_data.return_value = ["foo", "bar"]
mock_trends.return_value = trends
template = mock.Mock()
template.render.return_value = "trends html"
mock_get_template.return_value = template
result = plot.trends([task_dict])
self.assertEqual("trends html", result)
self.assertEqual(
[mock.call("task--uu--iiii-dd", "foo"),
mock.call("task--uu--iiii-dd", "bar")],
trends.add_result.mock_calls)
mock_get_template.assert_called_once_with("task/trends.html")
template.render.assert_called_once_with(version="42.0",
data="[\"foo\", \"bar\"]",
include_libs=False)
@ddt.ddt
class TrendsTestCase(test.TestCase):
def test___init__(self):
trends = plot.Trends()
self.assertEqual({}, trends._data)
self.assertRaises(TypeError, plot.Trends, 42)
@ddt.data({"args": [None], "result": "None"},
{"args": [""], "result": ""},
{"args": [" str value "], "result": "str value"},
{"args": [" 42 "], "result": "42"},
{"args": ["42"], "result": "42"},
{"args": [42], "result": "42"},
{"args": [42.00], "result": "42.0"},
{"args": [[3.2, 1, " foo ", None]], "result": "1,3.2,None,foo"},
{"args": [(" def", "abc", [22, 33])], "result": "22,33,abc,def"},
{"args": [{}], "result": ""},
{"args": [{1: 2, "a": " b c "}], "result": "1:2|a:b c"},
{"args": [{"foo": "bar", (1, 2): [5, 4, 3]}],
"result": "1,2:3,4,5|foo:bar"},
{"args": [1, 2], "raises": TypeError},
{"args": [set()], "raises": TypeError})
@ddt.unpack
def test__to_str(self, args, result=None, raises=None):
trends = plot.Trends()
if raises:
self.assertRaises(raises, trends._to_str, *args)
else:
self.assertEqual(result, trends._to_str(*args))
@mock.patch(PLOT + "hashlib")
def test__make_hash(self, mock_hashlib):
mock_hashlib.md5.return_value.hexdigest.return_value = "md5_digest"
trends = plot.Trends()
trends._to_str = mock.Mock()
trends._to_str.return_value.encode.return_value = "foo_str"
self.assertEqual("md5_digest", trends._make_hash("foo_obj"))
trends._to_str.assert_called_once_with("foo_obj")
trends._to_str.return_value.encode.assert_called_once_with("utf8")
mock_hashlib.md5.assert_called_once_with("foo_str")
def _make_result(self, salt, sla_success=True, with_na=False):
if with_na:
atomic = {"a": "n/a", "b": "n/a"}
stats = {
"atomics": [
{"name": "a", "display_name": "a",
"data": {"min": "n/a", "median": "n/a",
"90%ile": "n/a", "95%ile": "n/a", "max": "n/a",
"avg": "n/a", "success": "n/a", "count": 4}},
{"name": "b", "display_name": "b",
"data": {"min": "n/a", "median": "n/a",
"90%ile": "n/a", "95%ile": "n/a", "max": "n/a",
"avg": "n/a", "success": "n/a", "count": 4}}
],
"total": {"name": "total", "display_name": "total",
"data": {"min": "n/a", "median": "n/a",
"90%ile": "n/a", "95%ile": "n/a",
"max": "n/a", "avg": "n/a",
"success": "n/a", "count": 4}}
}
else:
stats = {
"atomics": [
{"name": "a", "display_name": "a",
"data": {"min": 0.7, "median": 0.85, "90%ile": 0.9,
"95%ile": 0.87, "max": 1.25, "avg": 0.67,
"success": "100.0%", "count": 4}},
{"name": "b", "display_name": "b",
"data": {"min": 0.5, "median": 0.75, "90%ile": 0.85,
"95%ile": 0.9, "max": 1.1, "avg": 0.58,
"success": "100.0%", "count": 4}}],
"total": {"name": "total", "display_name": "total",
"data": {"min": 1.2, "median": 1.55,
"90%ile": 1.7, "95%ile": 1.8, "max": 1.5,
"avg": 0.8, "success": "100.0%", "count": 4
}}}
atomic = {"a": 123, "b": 456}
return {
"name": "Scenario.name_%d" % salt,
"args": {}, "contexts": {}, "runner_type": "foo", "runner": {},
"hooks": {},
"pass_sla": sla_success,
"sla": [{"success": sla_success}],
"total_iteration_count": 4,
"start_time": 123456.789 + salt,
"statistics": {
"atomics": atomic,
"durations": stats},
"data": ["<iter-0>", "<iter-1>", "<iter-2>", "<iter-3>"]}
def _sort_trends(self, trends_result):
for idx in range(len(trends_result)):
trends_result[idx]["durations"].sort()
for a_idx in range(len(trends_result[idx]["actions"])):
trends_result[idx]["actions"][a_idx]["durations"].sort()
return trends_result
@mock.patch(PLOT + "json.dumps")
@mock.patch(PLOT + "objects.Workload.to_task")
def test_add_result_and_get_data(self, mock_workload_to_task, mock_dumps):
mock_dumps.side_effect = lambda x, **j: x
workload_cfg = [
{
"description": "foo", "name": "Name1",
"subtasks": [{"description": "descr"}]},
{
"description": "foo", "name": "Name2",
"subtasks": [{"description": "descr"}]}]
mock_workload_to_task.side_effect = workload_cfg
trends = plot.Trends()
for i in 0, 1:
trends.add_result(
"task_uuid_%s" % i, self._make_result(i))
actual = self._sort_trends(trends.get_data())
workload_cfg[0]["description"] = (
"Task(s) with the workload: task_uuid_1")
workload_cfg[1]["description"] = (
"Task(s) with the workload: task_uuid_2")
expected = [
{"actions": [{"durations": [("90%ile", [(123456789, 0.9)]),
("95%ile", [(123456789, 0.87)]),
("avg", [(123456789, 0.67)]),
("max", [(123456789, 1.25)]),
("median", [(123456789, 0.85)]),
("min", [(123456789, 0.7)])],
"name": "a",
"success": [("success", [(123456789, 100.0)])]},
{"durations": [("90%ile", [(123456789, 0.85)]),
("95%ile", [(123456789, 0.9)]),
("avg", [(123456789, 0.58)]),
("max", [(123456789, 1.1)]),
("median", [(123456789, 0.75)]),
("min", [(123456789, 0.5)])],
"name": "b",
"success": [("success", [(123456789, 100.0)])]}],
"cls": "Scenario",
"config": workload_cfg[0],
"durations": [("90%ile", [(123456789, 1.7)]),
("95%ile", [(123456789, 1.8)]),
("avg", [(123456789, 0.8)]),
("max", [(123456789, 1.5)]),
("median", [(123456789, 1.55)]),
("min", [(123456789, 1.2)])],
"length": 1,
"met": "name_0",
"name": "Scenario.name_0",
"sla_failures": 0,
"stat": {"avg": 1.425, "max": 1.8, "min": 0.8},
"success": [("success", [(123456789, 100.0)])]},
{"actions": [{"durations": [("90%ile", [(123457789, 0.9)]),
("95%ile", [(123457789, 0.87)]),
("avg", [(123457789, 0.67)]),
("max", [(123457789, 1.25)]),
("median", [(123457789, 0.85)]),
("min", [(123457789, 0.7)])],
"name": "a",
"success": [("success", [(123457789, 100.0)])]},
{"durations": [("90%ile", [(123457789, 0.85)]),
("95%ile", [(123457789, 0.9)]),
("avg", [(123457789, 0.58)]),
("max", [(123457789, 1.1)]),
("median", [(123457789, 0.75)]),
("min", [(123457789, 0.5)])],
"name": "b",
"success": [("success", [(123457789, 100.0)])]}],
"cls": "Scenario",
"config": workload_cfg[1],
"durations": [("90%ile", [(123457789, 1.7)]),
("95%ile", [(123457789, 1.8)]),
("avg", [(123457789, 0.8)]),
("max", [(123457789, 1.5)]),
("median", [(123457789, 1.55)]),
("min", [(123457789, 1.2)])],
"length": 1,
"met": "name_1",
"name": "Scenario.name_1",
"sla_failures": 0,
"stat": {"avg": 1.425, "max": 1.8, "min": 0.8},
"success": [("success", [(123457789, 100.0)])]}]
self.assertEqual(expected, actual)
@mock.patch(PLOT + "json.dumps")
@mock.patch(PLOT + "objects.Workload.to_task")
def test_add_result_once_and_get_data(self, mock_workload_to_task,
mock_dumps):
mock_dumps.side_effect = lambda x, **j: x
workload_cfg = {"description": "foo",
"subtasks": [{"description": "descr"}]}
mock_workload_to_task.return_value = workload_cfg
trends = plot.Trends()
trends.add_result(
"task_uuid",
self._make_result(42, sla_success=False))
actual = self._sort_trends(trends.get_data())
workload_cfg["description"] = "Task(s) with the workload: task_uuid"
expected = [
{"actions": [{"durations": [("90%ile", [(123498789, 0.9)]),
("95%ile", [(123498789, 0.87)]),
("avg", [(123498789, 0.67)]),
("max", [(123498789, 1.25)]),
("median", [(123498789, 0.85)]),
("min", [(123498789, 0.7)])],
"name": "a",
"success": [("success", [(123498789, 100.0)])]},
{"durations": [("90%ile", [(123498789, 0.85)]),
("95%ile", [(123498789, 0.9)]),
("avg", [(123498789, 0.58)]),
("max", [(123498789, 1.1)]),
("median", [(123498789, 0.75)]),
("min", [(123498789, 0.5)])],
"name": "b",
"success": [("success", [(123498789, 100.0)])]}],
"cls": "Scenario",
"config": workload_cfg,
"durations": [("90%ile", [(123498789, 1.7)]),
("95%ile", [(123498789, 1.8)]),
("avg", [(123498789, 0.8)]),
("max", [(123498789, 1.5)]),
("median", [(123498789, 1.55)]),
("min", [(123498789, 1.2)])],
"length": 1,
"met": "name_42",
"name": "Scenario.name_42",
"sla_failures": 1,
"stat": {"avg": 1.425, "max": 1.8, "min": 0.8},
"success": [("success", [(123498789, 100.0)])]}]
self.assertEqual(expected, actual)
@mock.patch(PLOT + "json.dumps")
@mock.patch(PLOT + "objects.Workload.to_task")
def test_add_result_with_na_and_get_data(self, mock_workload_to_task,
mock_dumps):
mock_dumps.side_effect = lambda x, **j: x
workload_cfg = {"description": "foo",
"subtasks": [{"description": "descr"}]}
mock_workload_to_task.return_value = workload_cfg
trends = plot.Trends()
trends.add_result(
"task_uuid",
self._make_result(42, sla_success=False, with_na=True))
actual = self._sort_trends(trends.get_data())
workload_cfg["description"] = "Task(s) with the workload: task_uuid"
expected = [
{"actions": [{"durations": [("90%ile", [(123498789, "n/a")]),
("95%ile", [(123498789, "n/a")]),
("avg", [(123498789, "n/a")]),
("max", [(123498789, "n/a")]),
("median", [(123498789, "n/a")]),
("min", [(123498789, "n/a")])],
"name": "a",
"success": [("success", [(123498789, 0)])]},
{"durations": [("90%ile", [(123498789, "n/a")]),
("95%ile", [(123498789, "n/a")]),
("avg", [(123498789, "n/a")]),
("max", [(123498789, "n/a")]),
("median", [(123498789, "n/a")]),
("min", [(123498789, "n/a")])],
"name": "b",
"success": [("success", [(123498789, 0)])]}],
"cls": "Scenario",
"config": workload_cfg,
"durations": [("90%ile", [(123498789, "n/a")]),
("95%ile", [(123498789, "n/a")]),
("avg", [(123498789, "n/a")]),
("max", [(123498789, "n/a")]),
("median", [(123498789, "n/a")]),
("min", [(123498789, "n/a")])],
"length": 1,
"met": "name_42",
"name": "Scenario.name_42",
"sla_failures": 1,
"stat": {"avg": None, "max": None, "min": None},
"success": [("success", [(123498789, 0)])]}]
self.assertEqual(expected, actual)
def test_get_data_no_results_added(self):
trends = plot.Trends()
self.assertEqual([], trends.get_data())
def test_obtaining_workload_description(self):
trends = plot.Trends()
workload_1 = self._make_result(42)
workload_1["name"] = "Dummy.dummy"
workload_1["description"] = "foo!!!"
trends.add_result("task_uuid", workload_1)
data = trends.get_data()
self.assertEqual(1, len(data))
cfg = json.loads(data[0]["config"])
self.assertEqual("foo!!!",
cfg["subtasks"][0]["description"])
workload_2 = self._make_result(42)
workload_2["name"] = "Dummy.dummy"
workload_2["description"] = "bar!!!"
trends.add_result("task_uuid", workload_2)
data = trends.get_data()
self.assertEqual(1, len(data))
cfg = json.loads(data[0]["config"])
self.assertEqual("Do nothing and sleep for the given number of "
"seconds (0 by default).",
cfg["subtasks"][0]["description"])
| true | true |
f7f549d9195f317ecf74b3cbd4aae7621a639cda | 3,715 | py | Python | code/pdf_split.py | solicia-xu/pbpython | 87a825a6a4b0aa25ef644498a781b222aa11369d | [
"BSD-3-Clause"
] | 1,846 | 2015-05-18T02:04:30.000Z | 2022-03-31T09:49:16.000Z | code/pdf_split.py | solicia-xu/pbpython | 87a825a6a4b0aa25ef644498a781b222aa11369d | [
"BSD-3-Clause"
] | 28 | 2015-12-07T01:57:08.000Z | 2021-08-24T01:21:02.000Z | code/pdf_split.py | solicia-xu/pbpython | 87a825a6a4b0aa25ef644498a781b222aa11369d | [
"BSD-3-Clause"
] | 1,054 | 2015-05-18T06:19:11.000Z | 2022-03-16T06:13:37.000Z | from appJar import gui
from PyPDF2 import PdfFileWriter, PdfFileReader
from pathlib import Path
# Define all the functions needed to process the files
def split_pages(input_file, page_range, out_file):
""" Take a pdf file and copy a range of pages into a new pdf file
Args:
input_file: The source PDF file
page_range: A string containing a range of pages to copy: 1-3,4
out_file: File name for the destination PDF
"""
output = PdfFileWriter()
input_pdf = PdfFileReader(open(input_file, "rb"))
output_file = open(out_file, "wb")
# https://stackoverflow.com/questions/5704931/parse-string-of-integer-sets-with-intervals-to-list
page_ranges = (x.split("-") for x in page_range.split(","))
range_list = [i for r in page_ranges for i in range(int(r[0]), int(r[-1]) + 1)]
for p in range_list:
# Need to subtract 1 because pages are 0 indexed
try:
output.addPage(input_pdf.getPage(p - 1))
except IndexError:
# Alert the user and stop adding pages
app.infoBox("Info", "Range exceeded number of pages in input.\nFile will still be saved.")
break
output.write(output_file)
if(app.questionBox("File Save", "Output PDF saved. Do you want to quit?")):
app.stop()
def validate_inputs(input_file, output_dir, range, file_name):
""" Verify that the input values provided by the user are valid
Args:
input_file: The source PDF file
output_dir: Directory to store the completed file
range: File A string containing a range of pages to copy: 1-3,4
file_name: Output name for the resulting PDF
Returns:
True if error and False otherwise
List of error messages
"""
errors = False
error_msgs = []
# Make sure a PDF is selected
if Path(input_file).suffix.upper() != ".PDF":
errors = True
error_msgs.append("Please select a PDF input file")
# Make sure a range is selected
if len(range) < 1:
errors = True
error_msgs.append("Please enter a valid page range")
# Check for a valid directory
if not(Path(output_dir)).exists():
errors = True
error_msgs.append("Please Select a valid output directory")
# Check for a file name
if len(file_name) < 1:
errors = True
error_msgs.append("Please enter a file name")
return(errors, error_msgs)
def press(button):
""" Process a button press
Args:
button: The name of the button. Either Process of Quit
"""
if button == "Process":
src_file = app.getEntry("Input_File")
dest_dir = app.getEntry("Output_Directory")
page_range = app.getEntry("Page_Ranges")
out_file = app.getEntry("Output_name")
errors, error_msg = validate_inputs(src_file, dest_dir, page_range, out_file)
if errors:
app.errorBox("Error", "\n".join(error_msg), parent=None)
else:
split_pages(src_file, page_range, Path(dest_dir, out_file))
else:
app.stop()
# Create the GUI Window
app = gui("PDF Splitter", useTtk=True)
app.setTtkTheme("default")
# Uncomment below to see all available themes
# print(app.getTtkThemes())
app.setSize(500, 200)
# Add the interactive components
app.addLabel("Choose Source PDF File")
app.addFileEntry("Input_File")
app.addLabel("Select Output Directory")
app.addDirectoryEntry("Output_Directory")
app.addLabel("Output file name")
app.addEntry("Output_name")
app.addLabel("Page Ranges: 1,3,4-10")
app.addEntry("Page_Ranges")
# link the buttons to the function called press
app.addButtons(["Process", "Quit"], press)
# start the GUI
app.go()
| 30.45082 | 102 | 0.662988 | from appJar import gui
from PyPDF2 import PdfFileWriter, PdfFileReader
from pathlib import Path
def split_pages(input_file, page_range, out_file):
output = PdfFileWriter()
input_pdf = PdfFileReader(open(input_file, "rb"))
output_file = open(out_file, "wb")
page_ranges = (x.split("-") for x in page_range.split(","))
range_list = [i for r in page_ranges for i in range(int(r[0]), int(r[-1]) + 1)]
for p in range_list:
try:
output.addPage(input_pdf.getPage(p - 1))
except IndexError:
app.infoBox("Info", "Range exceeded number of pages in input.\nFile will still be saved.")
break
output.write(output_file)
if(app.questionBox("File Save", "Output PDF saved. Do you want to quit?")):
app.stop()
def validate_inputs(input_file, output_dir, range, file_name):
errors = False
error_msgs = []
if Path(input_file).suffix.upper() != ".PDF":
errors = True
error_msgs.append("Please select a PDF input file")
if len(range) < 1:
errors = True
error_msgs.append("Please enter a valid page range")
if not(Path(output_dir)).exists():
errors = True
error_msgs.append("Please Select a valid output directory")
if len(file_name) < 1:
errors = True
error_msgs.append("Please enter a file name")
return(errors, error_msgs)
def press(button):
if button == "Process":
src_file = app.getEntry("Input_File")
dest_dir = app.getEntry("Output_Directory")
page_range = app.getEntry("Page_Ranges")
out_file = app.getEntry("Output_name")
errors, error_msg = validate_inputs(src_file, dest_dir, page_range, out_file)
if errors:
app.errorBox("Error", "\n".join(error_msg), parent=None)
else:
split_pages(src_file, page_range, Path(dest_dir, out_file))
else:
app.stop()
app = gui("PDF Splitter", useTtk=True)
app.setTtkTheme("default")
app.setSize(500, 200)
app.addLabel("Choose Source PDF File")
app.addFileEntry("Input_File")
app.addLabel("Select Output Directory")
app.addDirectoryEntry("Output_Directory")
app.addLabel("Output file name")
app.addEntry("Output_name")
app.addLabel("Page Ranges: 1,3,4-10")
app.addEntry("Page_Ranges")
app.addButtons(["Process", "Quit"], press)
app.go()
| true | true |
f7f54a2c1fa5e2ba3408fe227f1878224556afd3 | 34,493 | py | Python | fcs_api_py/main.py | JoaquinChartier/fcs_api_python | 530da72d7fb26a569eb7106dfcb5ad6d320b171f | [
"MIT"
] | null | null | null | fcs_api_py/main.py | JoaquinChartier/fcs_api_python | 530da72d7fb26a569eb7106dfcb5ad6d320b171f | [
"MIT"
] | null | null | null | fcs_api_py/main.py | JoaquinChartier/fcs_api_python | 530da72d7fb26a569eb7106dfcb5ad6d320b171f | [
"MIT"
] | null | null | null | import requests
import json
from . import response_data_types as rdt
class Forex():
def __init__(self, access_key):
"""Initializes with the access key.
access_key string Access key generated in https://fcsapi.com/
"""
self.access_key = access_key
#OK
def currency_latest_price(self, symbols):
"""Print a response with the latest price of a currency, or group of currency (comma separated).
symbols string The symbol of the currency, example: "EUR/USD".
returns A object with the following structure (Use dot(.) to access his childs):
object
status
code
msg
response
id
price
change
chg_per
last_change
symbol
info
server_time
credit_count
"""
self.symbols = symbols
symbols = symbols.upper()
url = "https://fcsapi.com/api-v2/forex/latest?symbol="+symbols+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
#Load the list manually
list_r = []
for i in res["response"]:
item_r = rdt.r_currency_latest_price.item_response(
id = i["id"],
price = i["price"],
change = i["change"],
chg_per = i["chg_per"],
last_changed = i["last_changed"],
symbol = i["symbol"]
)
list_r.append(item_r)
#Load info object
info = rdt.r_currency_latest_price.info(
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
#Load the main object
obj = rdt.r_currency_latest_price(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=list_r,
info=info
)
return obj
#OK
def list_symbols(self):
"""List all supported symbols in the API.
returns A object with the following structure (Use dot(.) to access his childs):
object
status
code
msg
response
id
decimal
symbol
info
server_time
credit_count
"""
url = "https://fcsapi.com/api-v2/forex/list?type=forex&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
#Load the list manually
list_r = []
for i in res["response"]:
item_r = rdt.r_list_symbols.item_response(
id = i["id"],
decimal = i["decimal"],
symbol = i["symbol"]
)
list_r.append(item_r)
#Load info object
info = rdt.r_list_symbols.info(
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
#Load the main object
obj = rdt.r_list_symbols(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=list_r,
info=info
)
return obj
#OK
def currency_details(self, symbols):
"""Get all the details about Forex currencies. Or group of currency (comma separated). Details like its name, country name.
symbols string The symbol or gruop of symbols, example: "EUR".
returns A object with the following structure (Use dot(.) to access his childs):
object
status
code
msg
response
short_name
name
country
code_n
subunit
website
symbol
bank
banknotes
coins
icon
type
symbol_2
banknotes_2
coins_2
info
server_time
credit_count
"""
self.symbols = symbols
symbols = symbols.upper()
url = "https://fcsapi.com/api-v2/forex/profile?symbol="+symbols+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
#Load the list manually
list_r = []
for i in res["response"]:
item_r = rdt.r_currency_details.item_response(
short_name = i["short_name"],
name = i["name"],
country = i["country"],
code_n = i["code_n"],
subunit = i["subunit"],
website = i["website"],
symbol = i["symbol"],
bank = i["bank"],
banknotes = i["banknotes"],
coins = i["coins"],
icon = i["icon"],
type = i["type"],
symbol_2 = i["symbol_2"],
banknotes_2 = i["banknotes_2"],
coins_2 = i["coins_2"],
)
list_r.append(item_r)
#Load info object
info = rdt.r_currency_details.info(
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
#Load the main object
obj = rdt.r_currency_details(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=list_r,
info=info
)
return obj
#OK
def currency_converter(self, origin_currency, dest_currency, amount):
"""Convert an amount from Origin to Destination currency.
origin_currency string The Symbol of the original amount. Example: USD.
dest_currency string The Symbol of the converted amount. Example: ARS.
amount int The amount to convert.
returns A object with the following structure (Use dot(.) to access his childs):
object
status
code
msg
response
price_1x_origin
price_1x_dest
total
info
server_time
credit_count
"""
self.origin_currency = origin_currency
self.dest_currency = dest_currency
self.amount = amount
origin_currency = origin_currency.upper()
dest_currency = dest_currency.upper()
url = "https://fcsapi.com/api-v2/forex/converter?pair1="+origin_currency+"&pair2="+dest_currency+"&amount="+str(amount)+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
#Load the list manually
list_r = []
item_r = rdt.r_currency_converter.item_response(
price_1x_origin = res["response"]["price_1x_"+origin_currency],
price_1x_dest = res["response"]["price_1x_"+dest_currency],
total = res["response"]["total"]
)
list_r.append(item_r)
#Load info object
info = rdt.r_currency_converter.info(
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
#Load the main object
obj = rdt.r_currency_converter(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=list_r,
info=info
)
return obj
#OK
def base_currency(self, symbol, type="forex"):
"""
On the base of 1 currency, it will return all quote prices of all available currencies.
symbol string The symbol of the currency.
type string the type of the response, it can be 'forex' or 'crypto'. Default value: 'forex'.
returns A object with the following structure (Use dot(.) to access his childs):
object
status
code
msg
response
key (currency) : value (value)
info
base
type
server_time
credit_count
"""
self.symbol = symbol
symbol = symbol.upper()
url = "https://fcsapi.com/api-v2/forex/base_latest?symbol="+symbol+"&type="+type+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
#Load the dict manually
dict_r = {}
dict_r = res["response"]
#Load info object
info = rdt.r_base_currency.info(
base=res["info"]["base"],
type=res["info"]["type"],
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
#Load the main object
obj = rdt.r_base_currency(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=dict_r,
info=info
)
return obj
#OK
def currency_cross(self, symbol):
"""
Return all related currencies of required currency.
symbol string The currency to return.
returns A object with the following structure (Use dot(.) to access his childs):
object
status
code
msg
response
id
price
change
chg_per
last_changed
type
symbol
info
server_time
credit_count
"""
self.symbol = symbol
symbol = symbol.upper()
url = "https://fcsapi.com/api-v2/forex/cross?symbol="+symbol+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
#Load the list manually
list_r = []
for i in res["response"]:
item_r = rdt.r_currency_cross.item_response(
id = i["id"],
price = i["price"],
change = i["change"],
chg_per = i["chg_per"],
last_changed = i["last_changed"],
type = i["type"],
symbol = i["symbol"]
)
list_r.append(item_r)
#Load info object
info = rdt.r_currency_cross.info(
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
#Load the main object
obj = rdt.r_currency_cross(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=list_r,
info=info
)
return obj
#OK
def last_candle(self, symbols, period):
"""
Return the last candle price of a currency (Open,High,Low,Close).
symbol string The currency or group of currencies to return.
period string The period of the data to retrieve, it can be: 1m, 5m, 15m, 30m,1h, 5h, 1d, 1w, month.
returns A object with the following structure (Use dot(.) to access his childs):
object
status
code
msg
response
o
h
l
c
t
tm
symbol
info
server_time
credit_count
"""
self.symbols = symbols
self.period = period
symbols = symbols.lower()
url = "https://fcsapi.com/api-v2/forex/candle?symbol="+symbols+"&period="+period+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
#Load the list manually
list_r = []
for i in res["response"]:
item_r = rdt.r_last_candle.item_response(
o = i["o"],
h = i["h"],
l = i["l"],
c = i["c"],
t = i["t"],
tm = i["tm"],
symbol = i["symbol"]
)
list_r.append(item_r)
#Load info object
info = rdt.r_last_candle.info(
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
#Load the main object
obj = rdt.r_last_candle(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=list_r,
info=info
)
return obj
#OK
def historical_price(self, symbol, period, from_date, to_date):
"""
Return the historical exchange price data for all supported symbols.
symbol string The currency or group of currencies to return.
period string The period of the data to retrieve, it can be: 1m, 5m, 15m, 30m,1h, 5h, 1d, 1w, month.
from_date string The date from. Format: YYYY-MM-DDThh:mm. Ex(2020-05-08T12:00)
to_date string The date to. Format: YYYY-MM-DDThh:mm. Ex(2020-05-08T12:00)
returns A object with the following structure (Use dot(.) to access his childs):
object
status
code
msg
response
o
h
l
c
t
tm
info
id
decimal
symbol
period
server_time
credit_count
"""
self.symbol = symbol
self.period = period
self.from_date = from_date
self.to_date = to_date
symbol = symbol.upper()
period = period.lower()
from_date = from_date.upper()
to_date = to_date.upper()
url = "https://fcsapi.com/api-v2/forex/history?symbol="+symbol+"&period="+period+"&from="+from_date+"&to="+to_date+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
#Load the list manually
list_r = []
for i in res["response"]:
item_r = rdt.r_historical_price.item_response(
o = i["o"],
h = i["h"],
l = i["l"],
c = i["c"],
t = i["t"],
tm = i["tm"],
)
list_r.append(item_r)
#Load info object
info = rdt.r_historical_price.info(
id = res["info"]["id"],
decimal = res["info"]["decimal"],
symbol = res["info"]["symbol"],
period = res["info"]["period"],
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
#Load the main object
obj = rdt.r_historical_price(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=list_r,
info=info
)
return obj
#OK
def pivot_points(self, symbol, period):
"""
Return market indicators for all supported symbols.
symbol string The currency to return.
period string The period of the data to retrieve, it can be: 1m, 5m, 15m, 30m,1h, 5h, 1d, 1w, month.
returns A object with the following structure (Use dot(.) to access his childs):
object
status
code
msg
response
oa_summary
pivot_point
classic
pp
R1
R2
R3
S1
S2
S3
fibonacci
pp
R1
R2
R3
S1
S2
S3
camarilla
pp
R1
R2
R3
R4
S1
S2
S3
S4
woodie
pp
R1
R2
S1
S2
demark
high
low
R1
S1
info
id
decimal
symbol
period
disclaimer
update
update_time
server_time
credit_count
"""
self.period = period
self.symbol = symbol
symbol = symbol.upper()
period = period.lower()
url = "https://fcsapi.com/api-v2/forex/pivot_points?symbol="+symbol+"&period="+period+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
#Load the dict manually
classic = rdt.r_pivot_points.classic(
pp = res["response"]["pivot_point"]["classic"]["pp"],
R1 = res["response"]["pivot_point"]["classic"]["R1"],
R2 = res["response"]["pivot_point"]["classic"]["R2"],
R3 = res["response"]["pivot_point"]["classic"]["R3"],
S1 = res["response"]["pivot_point"]["classic"]["S1"],
S2 = res["response"]["pivot_point"]["classic"]["S2"],
S3 = res["response"]["pivot_point"]["classic"]["S3"]
)
fibonacci = rdt.r_pivot_points.fibonacci(
pp = res["response"]["pivot_point"]["fibonacci"]["pp"],
R1 = res["response"]["pivot_point"]["fibonacci"]["R1"],
R2 = res["response"]["pivot_point"]["fibonacci"]["R2"],
R3 = res["response"]["pivot_point"]["fibonacci"]["R3"],
S1 = res["response"]["pivot_point"]["fibonacci"]["S1"],
S2 = res["response"]["pivot_point"]["fibonacci"]["S2"],
S3 = res["response"]["pivot_point"]["fibonacci"]["S3"]
)
camarilla = rdt.r_pivot_points.camarilla(
pp = res["response"]["pivot_point"]["camarilla"]["pp"],
R1 = res["response"]["pivot_point"]["camarilla"]["R1"],
R2 = res["response"]["pivot_point"]["camarilla"]["R2"],
R3 = res["response"]["pivot_point"]["camarilla"]["R3"],
R4 = res["response"]["pivot_point"]["camarilla"]["R4"],
S1 = res["response"]["pivot_point"]["camarilla"]["S1"],
S2 = res["response"]["pivot_point"]["camarilla"]["S2"],
S3 = res["response"]["pivot_point"]["camarilla"]["S3"],
S4 = res["response"]["pivot_point"]["camarilla"]["S4"]
)
woodie = rdt.r_pivot_points.woodie(
pp = res["response"]["pivot_point"]["woodie"]["pp"],
R1 = res["response"]["pivot_point"]["woodie"]["R1"],
R2 = res["response"]["pivot_point"]["woodie"]["R2"],
S1 = res["response"]["pivot_point"]["woodie"]["S1"],
S2 = res["response"]["pivot_point"]["woodie"]["S2"],
)
demark = rdt.r_pivot_points.demark(
high = res["response"]["pivot_point"]["demark"]["high"],
low = res["response"]["pivot_point"]["demark"]["low"],
R1 = res["response"]["pivot_point"]["demark"]["R1"],
S1 = res["response"]["pivot_point"]["demark"]["S1"],
)
pivot_point = rdt.r_pivot_points.pivot_point(
classic = classic,
fibonacci = fibonacci,
camarilla = camarilla,
woodie = woodie,
demark = demark
)
response = rdt.r_pivot_points.response(
oa_summary = res["response"]["oa_summary"],
pivot_point = pivot_point
)
#Load info object
info = rdt.r_pivot_points.info(
id=res["info"]["id"],
decimal = res["info"]["decimal"],
symbol = res["info"]["symbol"],
period = res["info"]["period"],
disclaimer = res["info"]["disclaimer"],
update=res["info"]["update"],
update_time=res["info"]["update_time"],
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
#Load the main object
obj = rdt.r_pivot_points(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=response,
info=info
)
return obj
#OK
def moving_average(self, symbol, period):
"""
Return moving average for all supported symbols.
symbol string The currency to return.
period string The period of the data to retrieve, it can be: 1m, 5m, 15m, 30m,1h, 5h, 1d, 1w, month.
returns A object with the following structure (Use dot(.) to access his childs):
object
status
code
msg
response
oa_summary
count
TotalBuy
Total_Sell
Total_Neutral
maBuy
maSell
ma_avg
SMA
MA5
v
s
MA10
v
s
MA20
v
s
MA50
v
s
MA100
v
s
MA200
v
s
EMA
MA5
v
s
MA10
v
s
MA20
v
s
MA50
v
s
MA100
v
s
MA200
v
s
summary
info
id
decimal
symbol
period
disclaimer
update
update_time
server_time
credit_count
"""
self.period = period
self.symbol = symbol
symbol = symbol.upper()
period = period.lower()
url = "https://fcsapi.com/api-v2/forex/ma_avg?symbol="+symbol+"&period="+period+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
#Load the dict manually
MA5=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["EMA"]["MA5"]["v"],
s=res["response"]["ma_avg"]["EMA"]["MA5"]["s"])
MA10=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["EMA"]["MA10"]["v"],
s=res["response"]["ma_avg"]["EMA"]["MA10"]["s"])
MA20=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["EMA"]["MA20"]["v"],
s=res["response"]["ma_avg"]["EMA"]["MA20"]["s"])
MA50=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["EMA"]["MA50"]["v"],
s=res["response"]["ma_avg"]["EMA"]["MA50"]["s"])
MA100=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["EMA"]["MA100"]["v"],
s=res["response"]["ma_avg"]["EMA"]["MA100"]["s"])
MA200=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["EMA"]["MA200"]["v"],
s=res["response"]["ma_avg"]["EMA"]["MA200"]["s"])
EMA = rdt.r_moving_average.EMA(
MA5= MA5,
MA10= MA10,
MA20= MA20,
MA50= MA50,
MA100= MA100,
MA200= MA200,
)
MA5=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["SMA"]["MA5"]["v"],
s=res["response"]["ma_avg"]["SMA"]["MA5"]["s"])
MA10=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["SMA"]["MA10"]["v"],
s=res["response"]["ma_avg"]["SMA"]["MA10"]["s"])
MA20=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["SMA"]["MA20"]["v"],
s=res["response"]["ma_avg"]["SMA"]["MA20"]["s"])
MA50=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["SMA"]["MA50"]["v"],
s=res["response"]["ma_avg"]["SMA"]["MA50"]["s"])
MA100=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["SMA"]["MA100"]["v"],
s=res["response"]["ma_avg"]["SMA"]["MA100"]["s"])
MA200=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["SMA"]["MA200"]["v"],
s=res["response"]["ma_avg"]["SMA"]["MA200"]["s"])
SMA = rdt.r_moving_average.SMA(
MA5= MA5,
MA10= MA10,
MA20= MA20,
MA50= MA50,
MA100= MA100,
MA200= MA200,
)
ma_avg = rdt.r_moving_average.ma_avg(
SMA=SMA,
EMA=EMA,
summary=res["response"]["ma_avg"]["summary"]
)
count = rdt.r_moving_average.count(
Total_Buy = res["response"]["count"]["Total_Buy"],
Total_Sell = res["response"]["count"]["Total_Sell"],
Total_Neutral = res["response"]["count"]["Total_Neutral"],
maBuy = res["response"]["count"]["maBuy"],
maSell = res["response"]["count"]["maSell"]
)
response = rdt.r_moving_average.response(
oa_summary = res["response"]["oa_summary"],
count = count,
ma_avg = ma_avg
)
#Load info object
info = rdt.r_moving_average.info(
id=res["info"]["id"],
decimal = res["info"]["decimal"],
symbol = res["info"]["symbol"],
period = res["info"]["period"],
disclaimer = res["info"]["disclaimer"],
update=res["info"]["update"],
update_time=res["info"]["update_time"],
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
#Load the main object
obj = rdt.r_moving_average(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=response,
info=info
)
return obj
#OK
def technical_indicator(self, symbol, period):
"""
Return technicals indicators for all supported symbols.
symbol string The currency to return.
period string The period of the data to retrieve, it can be: 1m, 5m, 15m, 30m,1h, 5h, 1d, 1w, month.
returns A object with the following structure (Use dot(.) to access his childs):
object
status
code
msg
response
oa_summary
count
TotalBuy
Total_Sell
Total_Neutral
tiBuy
tiSell
indicators
RSI14
v
s
STOCH9_6
v
s
STOCHRSI14
v
s
MACD12_26
v
s
WilliamsR
v
s
CCI14
v
s
ATR14
v
s
UltimateOscillator
v
s
summary
info
id
decimal
symbol
period
disclaimer
update
update_time
server_time
credit_count
"""
self.symbol = symbol
self.period = period
symbol.upper()
period.lower()
url = "https://fcsapi.com/api-v2/forex/indicators?symbol="+symbol+"&period="+period+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
#Load the dict manually
RSI14=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["RSI14"]["v"],
s=res["response"]["indicators"]["RSI14"]["s"])
STOCH9_6=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["STOCH9_6"]["v"],
s=res["response"]["indicators"]["STOCH9_6"]["s"])
STOCHRSI14=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["STOCHRSI14"]["v"],
s=res["response"]["indicators"]["STOCHRSI14"]["s"])
MACD12_26=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["MACD12_26"]["v"],
s=res["response"]["indicators"]["MACD12_26"]["s"])
WilliamsR=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["WilliamsR"]["v"],
s=res["response"]["indicators"]["WilliamsR"]["s"])
CCI14=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["CCI14"]["v"],
s=res["response"]["indicators"]["CCI14"]["s"])
ATR14=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["ATR14"]["v"],
s=res["response"]["indicators"]["ATR14"]["s"])
UltimateOscillator=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["UltimateOscillator"]["v"],
s=res["response"]["indicators"]["UltimateOscillator"]["s"])
indicators = rdt.r_technical_indicator.indicators(
UltimateOscillator=UltimateOscillator,
ATR14=ATR14,
CCI14=CCI14,
WilliamsR=WilliamsR,
MACD12_26=MACD12_26,
STOCHRSI14=STOCHRSI14,
STOCH9_6=STOCH9_6,
RSI14=RSI14,
summary=res["response"]["indicators"]["summary"]
)
count = rdt.r_technical_indicator.count(
Total_Buy = res["response"]["count"]["Total_Buy"],
Total_Sell = res["response"]["count"]["Total_Sell"],
Total_Neutral = res["response"]["count"]["Total_Neutral"],
tiBuy = res["response"]["count"]["tiBuy"],
tiSell = res["response"]["count"]["tiSell"]
)
response = rdt.r_technical_indicator.response(
oa_summary = res["response"]["oa_summary"],
count = count,
indicators = indicators
)
#Load info object
info = rdt.r_technical_indicator.info(
id=res["info"]["id"],
decimal = res["info"]["decimal"],
symbol = res["info"]["symbol"],
period = res["info"]["period"],
disclaimer = res["info"]["disclaimer"],
update=res["info"]["update"],
update_time=res["info"]["update_time"],
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
#Load the main object
obj = rdt.r_technical_indicator(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=response,
info=info
)
return obj | 35.413758 | 158 | 0.449396 | import requests
import json
from . import response_data_types as rdt
class Forex():
def __init__(self, access_key):
self.access_key = access_key
def currency_latest_price(self, symbols):
self.symbols = symbols
symbols = symbols.upper()
url = "https://fcsapi.com/api-v2/forex/latest?symbol="+symbols+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
list_r = []
for i in res["response"]:
item_r = rdt.r_currency_latest_price.item_response(
id = i["id"],
price = i["price"],
change = i["change"],
chg_per = i["chg_per"],
last_changed = i["last_changed"],
symbol = i["symbol"]
)
list_r.append(item_r)
info = rdt.r_currency_latest_price.info(
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
obj = rdt.r_currency_latest_price(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=list_r,
info=info
)
return obj
def list_symbols(self):
url = "https://fcsapi.com/api-v2/forex/list?type=forex&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
list_r = []
for i in res["response"]:
item_r = rdt.r_list_symbols.item_response(
id = i["id"],
decimal = i["decimal"],
symbol = i["symbol"]
)
list_r.append(item_r)
info = rdt.r_list_symbols.info(
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
obj = rdt.r_list_symbols(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=list_r,
info=info
)
return obj
def currency_details(self, symbols):
self.symbols = symbols
symbols = symbols.upper()
url = "https://fcsapi.com/api-v2/forex/profile?symbol="+symbols+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
list_r = []
for i in res["response"]:
item_r = rdt.r_currency_details.item_response(
short_name = i["short_name"],
name = i["name"],
country = i["country"],
code_n = i["code_n"],
subunit = i["subunit"],
website = i["website"],
symbol = i["symbol"],
bank = i["bank"],
banknotes = i["banknotes"],
coins = i["coins"],
icon = i["icon"],
type = i["type"],
symbol_2 = i["symbol_2"],
banknotes_2 = i["banknotes_2"],
coins_2 = i["coins_2"],
)
list_r.append(item_r)
info = rdt.r_currency_details.info(
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
obj = rdt.r_currency_details(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=list_r,
info=info
)
return obj
def currency_converter(self, origin_currency, dest_currency, amount):
self.origin_currency = origin_currency
self.dest_currency = dest_currency
self.amount = amount
origin_currency = origin_currency.upper()
dest_currency = dest_currency.upper()
url = "https://fcsapi.com/api-v2/forex/converter?pair1="+origin_currency+"&pair2="+dest_currency+"&amount="+str(amount)+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
list_r = []
item_r = rdt.r_currency_converter.item_response(
price_1x_origin = res["response"]["price_1x_"+origin_currency],
price_1x_dest = res["response"]["price_1x_"+dest_currency],
total = res["response"]["total"]
)
list_r.append(item_r)
info = rdt.r_currency_converter.info(
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
obj = rdt.r_currency_converter(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=list_r,
info=info
)
return obj
def base_currency(self, symbol, type="forex"):
self.symbol = symbol
symbol = symbol.upper()
url = "https://fcsapi.com/api-v2/forex/base_latest?symbol="+symbol+"&type="+type+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
dict_r = {}
dict_r = res["response"]
info = rdt.r_base_currency.info(
base=res["info"]["base"],
type=res["info"]["type"],
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
obj = rdt.r_base_currency(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=dict_r,
info=info
)
return obj
def currency_cross(self, symbol):
self.symbol = symbol
symbol = symbol.upper()
url = "https://fcsapi.com/api-v2/forex/cross?symbol="+symbol+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
list_r = []
for i in res["response"]:
item_r = rdt.r_currency_cross.item_response(
id = i["id"],
price = i["price"],
change = i["change"],
chg_per = i["chg_per"],
last_changed = i["last_changed"],
type = i["type"],
symbol = i["symbol"]
)
list_r.append(item_r)
info = rdt.r_currency_cross.info(
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
obj = rdt.r_currency_cross(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=list_r,
info=info
)
return obj
def last_candle(self, symbols, period):
self.symbols = symbols
self.period = period
symbols = symbols.lower()
url = "https://fcsapi.com/api-v2/forex/candle?symbol="+symbols+"&period="+period+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
list_r = []
for i in res["response"]:
item_r = rdt.r_last_candle.item_response(
o = i["o"],
h = i["h"],
l = i["l"],
c = i["c"],
t = i["t"],
tm = i["tm"],
symbol = i["symbol"]
)
list_r.append(item_r)
info = rdt.r_last_candle.info(
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
obj = rdt.r_last_candle(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=list_r,
info=info
)
return obj
def historical_price(self, symbol, period, from_date, to_date):
self.symbol = symbol
self.period = period
self.from_date = from_date
self.to_date = to_date
symbol = symbol.upper()
period = period.lower()
from_date = from_date.upper()
to_date = to_date.upper()
url = "https://fcsapi.com/api-v2/forex/history?symbol="+symbol+"&period="+period+"&from="+from_date+"&to="+to_date+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
list_r = []
for i in res["response"]:
item_r = rdt.r_historical_price.item_response(
o = i["o"],
h = i["h"],
l = i["l"],
c = i["c"],
t = i["t"],
tm = i["tm"],
)
list_r.append(item_r)
info = rdt.r_historical_price.info(
id = res["info"]["id"],
decimal = res["info"]["decimal"],
symbol = res["info"]["symbol"],
period = res["info"]["period"],
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
obj = rdt.r_historical_price(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=list_r,
info=info
)
return obj
def pivot_points(self, symbol, period):
self.period = period
self.symbol = symbol
symbol = symbol.upper()
period = period.lower()
url = "https://fcsapi.com/api-v2/forex/pivot_points?symbol="+symbol+"&period="+period+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
classic = rdt.r_pivot_points.classic(
pp = res["response"]["pivot_point"]["classic"]["pp"],
R1 = res["response"]["pivot_point"]["classic"]["R1"],
R2 = res["response"]["pivot_point"]["classic"]["R2"],
R3 = res["response"]["pivot_point"]["classic"]["R3"],
S1 = res["response"]["pivot_point"]["classic"]["S1"],
S2 = res["response"]["pivot_point"]["classic"]["S2"],
S3 = res["response"]["pivot_point"]["classic"]["S3"]
)
fibonacci = rdt.r_pivot_points.fibonacci(
pp = res["response"]["pivot_point"]["fibonacci"]["pp"],
R1 = res["response"]["pivot_point"]["fibonacci"]["R1"],
R2 = res["response"]["pivot_point"]["fibonacci"]["R2"],
R3 = res["response"]["pivot_point"]["fibonacci"]["R3"],
S1 = res["response"]["pivot_point"]["fibonacci"]["S1"],
S2 = res["response"]["pivot_point"]["fibonacci"]["S2"],
S3 = res["response"]["pivot_point"]["fibonacci"]["S3"]
)
camarilla = rdt.r_pivot_points.camarilla(
pp = res["response"]["pivot_point"]["camarilla"]["pp"],
R1 = res["response"]["pivot_point"]["camarilla"]["R1"],
R2 = res["response"]["pivot_point"]["camarilla"]["R2"],
R3 = res["response"]["pivot_point"]["camarilla"]["R3"],
R4 = res["response"]["pivot_point"]["camarilla"]["R4"],
S1 = res["response"]["pivot_point"]["camarilla"]["S1"],
S2 = res["response"]["pivot_point"]["camarilla"]["S2"],
S3 = res["response"]["pivot_point"]["camarilla"]["S3"],
S4 = res["response"]["pivot_point"]["camarilla"]["S4"]
)
woodie = rdt.r_pivot_points.woodie(
pp = res["response"]["pivot_point"]["woodie"]["pp"],
R1 = res["response"]["pivot_point"]["woodie"]["R1"],
R2 = res["response"]["pivot_point"]["woodie"]["R2"],
S1 = res["response"]["pivot_point"]["woodie"]["S1"],
S2 = res["response"]["pivot_point"]["woodie"]["S2"],
)
demark = rdt.r_pivot_points.demark(
high = res["response"]["pivot_point"]["demark"]["high"],
low = res["response"]["pivot_point"]["demark"]["low"],
R1 = res["response"]["pivot_point"]["demark"]["R1"],
S1 = res["response"]["pivot_point"]["demark"]["S1"],
)
pivot_point = rdt.r_pivot_points.pivot_point(
classic = classic,
fibonacci = fibonacci,
camarilla = camarilla,
woodie = woodie,
demark = demark
)
response = rdt.r_pivot_points.response(
oa_summary = res["response"]["oa_summary"],
pivot_point = pivot_point
)
info = rdt.r_pivot_points.info(
id=res["info"]["id"],
decimal = res["info"]["decimal"],
symbol = res["info"]["symbol"],
period = res["info"]["period"],
disclaimer = res["info"]["disclaimer"],
update=res["info"]["update"],
update_time=res["info"]["update_time"],
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
obj = rdt.r_pivot_points(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=response,
info=info
)
return obj
def moving_average(self, symbol, period):
self.period = period
self.symbol = symbol
symbol = symbol.upper()
period = period.lower()
url = "https://fcsapi.com/api-v2/forex/ma_avg?symbol="+symbol+"&period="+period+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
MA5=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["EMA"]["MA5"]["v"],
s=res["response"]["ma_avg"]["EMA"]["MA5"]["s"])
MA10=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["EMA"]["MA10"]["v"],
s=res["response"]["ma_avg"]["EMA"]["MA10"]["s"])
MA20=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["EMA"]["MA20"]["v"],
s=res["response"]["ma_avg"]["EMA"]["MA20"]["s"])
MA50=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["EMA"]["MA50"]["v"],
s=res["response"]["ma_avg"]["EMA"]["MA50"]["s"])
MA100=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["EMA"]["MA100"]["v"],
s=res["response"]["ma_avg"]["EMA"]["MA100"]["s"])
MA200=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["EMA"]["MA200"]["v"],
s=res["response"]["ma_avg"]["EMA"]["MA200"]["s"])
EMA = rdt.r_moving_average.EMA(
MA5= MA5,
MA10= MA10,
MA20= MA20,
MA50= MA50,
MA100= MA100,
MA200= MA200,
)
MA5=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["SMA"]["MA5"]["v"],
s=res["response"]["ma_avg"]["SMA"]["MA5"]["s"])
MA10=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["SMA"]["MA10"]["v"],
s=res["response"]["ma_avg"]["SMA"]["MA10"]["s"])
MA20=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["SMA"]["MA20"]["v"],
s=res["response"]["ma_avg"]["SMA"]["MA20"]["s"])
MA50=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["SMA"]["MA50"]["v"],
s=res["response"]["ma_avg"]["SMA"]["MA50"]["s"])
MA100=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["SMA"]["MA100"]["v"],
s=res["response"]["ma_avg"]["SMA"]["MA100"]["s"])
MA200=rdt.r_moving_average.v_s(
v=res["response"]["ma_avg"]["SMA"]["MA200"]["v"],
s=res["response"]["ma_avg"]["SMA"]["MA200"]["s"])
SMA = rdt.r_moving_average.SMA(
MA5= MA5,
MA10= MA10,
MA20= MA20,
MA50= MA50,
MA100= MA100,
MA200= MA200,
)
ma_avg = rdt.r_moving_average.ma_avg(
SMA=SMA,
EMA=EMA,
summary=res["response"]["ma_avg"]["summary"]
)
count = rdt.r_moving_average.count(
Total_Buy = res["response"]["count"]["Total_Buy"],
Total_Sell = res["response"]["count"]["Total_Sell"],
Total_Neutral = res["response"]["count"]["Total_Neutral"],
maBuy = res["response"]["count"]["maBuy"],
maSell = res["response"]["count"]["maSell"]
)
response = rdt.r_moving_average.response(
oa_summary = res["response"]["oa_summary"],
count = count,
ma_avg = ma_avg
)
info = rdt.r_moving_average.info(
id=res["info"]["id"],
decimal = res["info"]["decimal"],
symbol = res["info"]["symbol"],
period = res["info"]["period"],
disclaimer = res["info"]["disclaimer"],
update=res["info"]["update"],
update_time=res["info"]["update_time"],
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
obj = rdt.r_moving_average(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=response,
info=info
)
return obj
def technical_indicator(self, symbol, period):
self.symbol = symbol
self.period = period
symbol.upper()
period.lower()
url = "https://fcsapi.com/api-v2/forex/indicators?symbol="+symbol+"&period="+period+"&access_key="+self.access_key
payload = {}
headers = {}
response = requests.request("POST", url, headers=headers, data = payload)
res = json.loads(response.text.encode('utf8'))
RSI14=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["RSI14"]["v"],
s=res["response"]["indicators"]["RSI14"]["s"])
STOCH9_6=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["STOCH9_6"]["v"],
s=res["response"]["indicators"]["STOCH9_6"]["s"])
STOCHRSI14=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["STOCHRSI14"]["v"],
s=res["response"]["indicators"]["STOCHRSI14"]["s"])
MACD12_26=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["MACD12_26"]["v"],
s=res["response"]["indicators"]["MACD12_26"]["s"])
WilliamsR=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["WilliamsR"]["v"],
s=res["response"]["indicators"]["WilliamsR"]["s"])
CCI14=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["CCI14"]["v"],
s=res["response"]["indicators"]["CCI14"]["s"])
ATR14=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["ATR14"]["v"],
s=res["response"]["indicators"]["ATR14"]["s"])
UltimateOscillator=rdt.r_technical_indicator.v_s(
v=res["response"]["indicators"]["UltimateOscillator"]["v"],
s=res["response"]["indicators"]["UltimateOscillator"]["s"])
indicators = rdt.r_technical_indicator.indicators(
UltimateOscillator=UltimateOscillator,
ATR14=ATR14,
CCI14=CCI14,
WilliamsR=WilliamsR,
MACD12_26=MACD12_26,
STOCHRSI14=STOCHRSI14,
STOCH9_6=STOCH9_6,
RSI14=RSI14,
summary=res["response"]["indicators"]["summary"]
)
count = rdt.r_technical_indicator.count(
Total_Buy = res["response"]["count"]["Total_Buy"],
Total_Sell = res["response"]["count"]["Total_Sell"],
Total_Neutral = res["response"]["count"]["Total_Neutral"],
tiBuy = res["response"]["count"]["tiBuy"],
tiSell = res["response"]["count"]["tiSell"]
)
response = rdt.r_technical_indicator.response(
oa_summary = res["response"]["oa_summary"],
count = count,
indicators = indicators
)
info = rdt.r_technical_indicator.info(
id=res["info"]["id"],
decimal = res["info"]["decimal"],
symbol = res["info"]["symbol"],
period = res["info"]["period"],
disclaimer = res["info"]["disclaimer"],
update=res["info"]["update"],
update_time=res["info"]["update_time"],
server_time=res["info"]["server_time"],
credit_count=res["info"]["credit_count"]
)
obj = rdt.r_technical_indicator(
status=res["status"],
code=res["code"],
msg=res["msg"],
response=response,
info=info
)
return obj | true | true |
f7f54a31739a081a42746b6d728dd60789687c76 | 4,869 | py | Python | debugging/debug_validation.py | dangpzanco/dcase-task1 | 72867cc5b8969d7ec55c5acfd30ebbc3a7246666 | [
"MIT"
] | 1 | 2019-05-23T08:10:59.000Z | 2019-05-23T08:10:59.000Z | debugging/debug_validation.py | dangpzanco/dcase-task1 | 72867cc5b8969d7ec55c5acfd30ebbc3a7246666 | [
"MIT"
] | null | null | null | debugging/debug_validation.py | dangpzanco/dcase-task1 | 72867cc5b8969d7ec55c5acfd30ebbc3a7246666 | [
"MIT"
] | 2 | 2019-07-12T05:26:15.000Z | 2019-11-22T09:15:00.000Z | # Standard libraries
import pathlib
import glob
import platform
import pickle
from datetime import datetime
from pprint import pprint
# Scientific stack
import numpy as np
import numpy.random as rnd
import pandas as pd
# Chunked data
import zarr
# Audio processing
import dcase_util as du
# Pretty progress bar
import tqdm
import preprocessing as prep
n_feats = 100
dataset_name = f'numfeats{n_feats}'
# db_path = '/media/zanco/DADOS/zanco/datasets/TUT-urban-acoustic-scenes-2018-development/'
db_path = '/media/zanco/DADOS/zanco/datasets/TAU-urban-acoustic-scenes-2019-development/'
# db_path = 'E:/datasets/TUT-urban-acoustic-scenes-2018-development/'
# db_path = 'E:/datasets/TAU-urban-acoustic-scenes-2019-development/'
# version = '2018'
version = '2019'
preprocessor = prep.DataPreprocessing(db_path=db_path,
version=version,
n_feats=n_feats,
dataset_name=dataset_name,
dataset_folder=f'../saved_features{version}',
audio_preprocess='mid',
feature_type='mel_spectrogram')
# preprocessor.process(overwrite=False)
fold_meta, fold_split = preprocessor.generate_fold_meta(overwrite=False)
train_ids = fold_meta['identifier'][fold_split[0][0]]
valid_ids = fold_meta['identifier'][fold_split[0][1]]
c = list(set(train_ids) & set(valid_ids))
print(len(c))
seed = 0
n_splits = 5
# Get consistent results (same folds every time)
rand_state = rnd.get_state() # get current PRNG state
rnd.seed(seed)
# Get training and evaluation example indexes
train_ind = np.where(preprocessor.db_meta['example_type'].values == 'train')[0]
eval_ind = np.where(preprocessor.db_meta['example_type'].values == 'test')[0]
# Split based on labels and identifiers
from sklearn.model_selection import GroupKFold
splitter = GroupKFold(n_splits=n_splits)
X = np.empty([train_ind.size,1])
y = preprocessor.db_meta['scene_label'][train_ind]
ids = preprocessor.db_meta['identifier'][train_ind]
temp_fold_split = list(splitter.split(X=X,y=y,groups=ids))
# Fix indexing
fold_split = [[train_ind[x[0]], train_ind[x[1]]] for x in temp_fold_split]
from sklearn.model_selection import (TimeSeriesSplit, KFold, ShuffleSplit,
StratifiedKFold, GroupShuffleSplit,
GroupKFold, StratifiedShuffleSplit)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
np.random.seed(1338)
cmap_data = plt.cm.Paired
cmap_group = plt.cm.prism
cmap_cv = plt.cm.coolwarm
n_splits = 5
# Generate the class/group data
_, label_index = np.unique(preprocessor.db_meta['scene_label'][train_ind].values, return_inverse=True)
y = label_index.astype('i1')
_, id_index = np.unique(preprocessor.db_meta['identifier'][train_ind].values, return_inverse=True)
groups = id_index.astype(int)
def visualize_groups(classes, groups):
# Visualize dataset groups
fig, ax = plt.subplots()
plot = ax.scatter(range(len(groups)), [.5] * len(groups), c=groups, marker='_',
lw=50, cmap=cmap_group)
ax.scatter(range(len(groups)), [3.5] * len(groups), c=classes, marker='_',
lw=50, cmap=cmap_data)
ax.set(ylim=[-1, 5], yticks=[.5, 3.5],
yticklabels=['Data\ngroup', 'Data\nclass'], xlabel="Sample index")
fig.colorbar(plot)
visualize_groups(y, groups)
def plot_cv_indices(cv, X, y, group, ax, n_splits, lw=10):
"""Create a sample plot for indices of a cross-validation object."""
# Generate the training/testing visualizations for each CV split
for ii, (tr, tt) in enumerate(cv.split(X=X, y=y, groups=group)):
# Fill in indices with the training/test groups
indices = np.array([np.nan] * len(X))
indices[tt] = 1
indices[tr] = 0
# Visualize the results
plot = ax.scatter(range(len(indices)), [ii + .5] * len(indices),
c=indices, marker='_', lw=lw, cmap=cmap_cv,
vmin=-.2, vmax=1.2)
fig.colorbar(plot)
# Plot the data classes and groups at the end
ax.scatter(range(len(X)), [ii + 1.5] * len(X),
c=y, marker='_', lw=lw, cmap=cmap_data)
ax.scatter(range(len(X)), [ii + 2.5] * len(X),
c=group, marker='_', lw=lw, cmap=cmap_group)
# Formatting
yticklabels = list(range(n_splits)) + ['class', 'group']
ax.set(yticks=np.arange(n_splits+2) + .5, yticklabels=yticklabels,
xlabel='Sample index', ylabel="CV iteration",
ylim=[n_splits+2.2, -.2])
ax.set_title('{}'.format(type(cv).__name__), fontsize=15)
return ax
fig, ax = plt.subplots()
# cv = KFold(n_splits)
plot_cv_indices(splitter, X, y, groups, ax, n_splits)
plt.show()
exit(0)
| 30.816456 | 102 | 0.659478 |
import pathlib
import glob
import platform
import pickle
from datetime import datetime
from pprint import pprint
import numpy as np
import numpy.random as rnd
import pandas as pd
import zarr
import dcase_util as du
import tqdm
import preprocessing as prep
n_feats = 100
dataset_name = f'numfeats{n_feats}'
db_path = '/media/zanco/DADOS/zanco/datasets/TAU-urban-acoustic-scenes-2019-development/'
version = '2019'
preprocessor = prep.DataPreprocessing(db_path=db_path,
version=version,
n_feats=n_feats,
dataset_name=dataset_name,
dataset_folder=f'../saved_features{version}',
audio_preprocess='mid',
feature_type='mel_spectrogram')
fold_meta, fold_split = preprocessor.generate_fold_meta(overwrite=False)
train_ids = fold_meta['identifier'][fold_split[0][0]]
valid_ids = fold_meta['identifier'][fold_split[0][1]]
c = list(set(train_ids) & set(valid_ids))
print(len(c))
seed = 0
n_splits = 5
rand_state = rnd.get_state()
rnd.seed(seed)
train_ind = np.where(preprocessor.db_meta['example_type'].values == 'train')[0]
eval_ind = np.where(preprocessor.db_meta['example_type'].values == 'test')[0]
from sklearn.model_selection import GroupKFold
splitter = GroupKFold(n_splits=n_splits)
X = np.empty([train_ind.size,1])
y = preprocessor.db_meta['scene_label'][train_ind]
ids = preprocessor.db_meta['identifier'][train_ind]
temp_fold_split = list(splitter.split(X=X,y=y,groups=ids))
fold_split = [[train_ind[x[0]], train_ind[x[1]]] for x in temp_fold_split]
from sklearn.model_selection import (TimeSeriesSplit, KFold, ShuffleSplit,
StratifiedKFold, GroupShuffleSplit,
GroupKFold, StratifiedShuffleSplit)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Patch
np.random.seed(1338)
cmap_data = plt.cm.Paired
cmap_group = plt.cm.prism
cmap_cv = plt.cm.coolwarm
n_splits = 5
_, label_index = np.unique(preprocessor.db_meta['scene_label'][train_ind].values, return_inverse=True)
y = label_index.astype('i1')
_, id_index = np.unique(preprocessor.db_meta['identifier'][train_ind].values, return_inverse=True)
groups = id_index.astype(int)
def visualize_groups(classes, groups):
fig, ax = plt.subplots()
plot = ax.scatter(range(len(groups)), [.5] * len(groups), c=groups, marker='_',
lw=50, cmap=cmap_group)
ax.scatter(range(len(groups)), [3.5] * len(groups), c=classes, marker='_',
lw=50, cmap=cmap_data)
ax.set(ylim=[-1, 5], yticks=[.5, 3.5],
yticklabels=['Data\ngroup', 'Data\nclass'], xlabel="Sample index")
fig.colorbar(plot)
visualize_groups(y, groups)
def plot_cv_indices(cv, X, y, group, ax, n_splits, lw=10):
for ii, (tr, tt) in enumerate(cv.split(X=X, y=y, groups=group)):
indices = np.array([np.nan] * len(X))
indices[tt] = 1
indices[tr] = 0
plot = ax.scatter(range(len(indices)), [ii + .5] * len(indices),
c=indices, marker='_', lw=lw, cmap=cmap_cv,
vmin=-.2, vmax=1.2)
fig.colorbar(plot)
ax.scatter(range(len(X)), [ii + 1.5] * len(X),
c=y, marker='_', lw=lw, cmap=cmap_data)
ax.scatter(range(len(X)), [ii + 2.5] * len(X),
c=group, marker='_', lw=lw, cmap=cmap_group)
yticklabels = list(range(n_splits)) + ['class', 'group']
ax.set(yticks=np.arange(n_splits+2) + .5, yticklabels=yticklabels,
xlabel='Sample index', ylabel="CV iteration",
ylim=[n_splits+2.2, -.2])
ax.set_title('{}'.format(type(cv).__name__), fontsize=15)
return ax
fig, ax = plt.subplots()
plot_cv_indices(splitter, X, y, groups, ax, n_splits)
plt.show()
exit(0)
| true | true |
f7f54b7970b7e59172a8dbcd2ed502d803503f38 | 539 | py | Python | educate/manage.py | fndos/Control-de-Visitas-Web | a5b7d954e327e4e1a2979f3cf539a2f70fd325d6 | [
"MIT"
] | null | null | null | educate/manage.py | fndos/Control-de-Visitas-Web | a5b7d954e327e4e1a2979f3cf539a2f70fd325d6 | [
"MIT"
] | null | null | null | educate/manage.py | fndos/Control-de-Visitas-Web | a5b7d954e327e4e1a2979f3cf539a2f70fd325d6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "educate.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 33.6875 | 73 | 0.686456 |
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "educate.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| true | true |
f7f54bf093d7ebf6362729e7f1fb476789684de0 | 2,156 | py | Python | Data_Science/src/model.py | robovish/Python-Projects | 1cdfa18c093af32cfc02ac7d08e2bdf682670470 | [
"MIT"
] | null | null | null | Data_Science/src/model.py | robovish/Python-Projects | 1cdfa18c093af32cfc02ac7d08e2bdf682670470 | [
"MIT"
] | null | null | null | Data_Science/src/model.py | robovish/Python-Projects | 1cdfa18c093af32cfc02ac7d08e2bdf682670470 | [
"MIT"
] | 1 | 2022-03-03T09:21:37.000Z | 2022-03-03T09:21:37.000Z |
#load the libraries
import pandas as pd
import numpy as np
from pandas_profiling import ProfileReport
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from joblib import dump, load
import os
path = os.getcwd()
#load data
data = pd.read_csv(os.path.join(path,"data","diabetes.csv"))
#replace zeros with NANs
data[['Glucose','BloodPressure','SkinThickness','Insulin','BMI']] = data[['Glucose','BloodPressure','SkinThickness','Insulin','BMI']].replace(0,np.NaN)
#function to impute the missing values with median based on Outcome class
def impute_median(data, var):
temp = data[data[var].notnull()]
temp = temp[[var, 'Outcome']].groupby(['Outcome'])[[var]].median()
data.loc[(data['Outcome'] == 0 ) & (data[var].isnull()), var] = temp.loc[0 ,var]
data.loc[(data['Outcome'] == 1 ) & (data[var].isnull()), var] = temp.loc[1 ,var]
return data
#impute values using the function
data = impute_median(data, 'Glucose')
data = impute_median(data, 'BloodPressure')
data = impute_median(data, 'SkinThickness')
data = impute_median(data, 'Insulin')
data = impute_median(data, 'BMI')
#separate features and target as x & y
y = data['Outcome']
x = data.drop('Outcome', axis = 1)
columns = x.columns
#scale the values using a StandardScaler
scaler = StandardScaler()
scaler = scaler.fit(x)
X = scaler.transform(x)
#features DataFrame
features = pd.DataFrame(X, columns = columns)
# Training
#split data into training and test sets
x_train, x_test, y_train, y_test = train_test_split(features, y, test_size = 0.2, random_state = 42)
#define the model
model = RandomForestClassifier(n_estimators=300, bootstrap = True, max_features = 'sqrt')
#fit model to training data
model.fit(x_train, y_train)
#predict on test data
y_pred = model.predict(x_test)
#evaluate performance
# print(classification_report(y_test, y_pred))
# Dump Scaler object and Model object using joblib
dump(scaler, os.path.join(path, "resources","scaler.joblib"))
dump(model, os.path.join(path, "resources","model.joblib"))
| 31.246377 | 151 | 0.74397 |
import pandas as pd
import numpy as np
from pandas_profiling import ProfileReport
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from joblib import dump, load
import os
path = os.getcwd()
data = pd.read_csv(os.path.join(path,"data","diabetes.csv"))
data[['Glucose','BloodPressure','SkinThickness','Insulin','BMI']] = data[['Glucose','BloodPressure','SkinThickness','Insulin','BMI']].replace(0,np.NaN)
def impute_median(data, var):
temp = data[data[var].notnull()]
temp = temp[[var, 'Outcome']].groupby(['Outcome'])[[var]].median()
data.loc[(data['Outcome'] == 0 ) & (data[var].isnull()), var] = temp.loc[0 ,var]
data.loc[(data['Outcome'] == 1 ) & (data[var].isnull()), var] = temp.loc[1 ,var]
return data
data = impute_median(data, 'Glucose')
data = impute_median(data, 'BloodPressure')
data = impute_median(data, 'SkinThickness')
data = impute_median(data, 'Insulin')
data = impute_median(data, 'BMI')
y = data['Outcome']
x = data.drop('Outcome', axis = 1)
columns = x.columns
scaler = StandardScaler()
scaler = scaler.fit(x)
X = scaler.transform(x)
features = pd.DataFrame(X, columns = columns)
x_train, x_test, y_train, y_test = train_test_split(features, y, test_size = 0.2, random_state = 42)
model = RandomForestClassifier(n_estimators=300, bootstrap = True, max_features = 'sqrt')
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
dump(scaler, os.path.join(path, "resources","scaler.joblib"))
dump(model, os.path.join(path, "resources","model.joblib"))
| true | true |
f7f54d9d31f7537e0668e9b9c814c31d9fb15902 | 2,806 | py | Python | frappe/utils/identicon.py | lukptr/frappe-v7.2.23 | 494f39de78ad9f3ea7b3ff1239a34df35e6f3727 | [
"MIT"
] | 2 | 2021-08-28T06:08:17.000Z | 2021-09-06T10:41:43.000Z | frappe/utils/identicon.py | lukptr/frappe-v7.2.23 | 494f39de78ad9f3ea7b3ff1239a34df35e6f3727 | [
"MIT"
] | 7 | 2016-05-30T04:03:38.000Z | 2019-02-03T03:10:03.000Z | frappe/utils/identicon.py | lukptr/frappe-v7.2.23 | 494f39de78ad9f3ea7b3ff1239a34df35e6f3727 | [
"MIT"
] | 5 | 2016-06-20T08:48:11.000Z | 2018-12-12T09:42:31.000Z | from PIL import Image, ImageDraw
from hashlib import md5
import base64
import StringIO
import random
GRID_SIZE = 5
BORDER_SIZE = 20
SQUARE_SIZE = 40
class Identicon(object):
def __init__(self, str_, background='#fafbfc'):
"""
`str_` is the string used to generate the identicon.
`background` is the background of the identicon.
"""
w = h = BORDER_SIZE * 2 + SQUARE_SIZE * GRID_SIZE
self.image = Image.new('RGB', (w, h), background)
self.draw = ImageDraw.Draw(self.image)
self.hash = self.digest(str_)
def digest(self, str_):
"""
Returns a md5 numeric hash
"""
return int(md5(str_.encode('utf-8')).hexdigest(), 16)
def calculate(self):
"""
Creates the identicon.
First three bytes are used to generate the color,
remaining bytes are used to create the drawing
"""
# color = (self.hash & 0xff, self.hash >> 8 & 0xff, self.hash >> 16 & 0xff)
color = random.choice((
(254, 196, 197),
(253, 138, 139),
(254, 231, 206),
(254, 208, 159),
(210, 211, 253),
(163, 165, 252),
(247, 213, 247),
(242, 172, 238),
(235, 247, 206),
(217, 241, 157),
(211, 248, 237),
(167, 242, 221),
(255, 249, 207),
(254, 245, 161),
(211, 241, 254),
(168, 228, 254),
(207, 245, 210),
(159, 235, 164),
))
# print color
# color = (254, 232, 206)
self.hash >>= 24 # skip first three bytes
square_x = square_y = 0 # init square position
for x in range(GRID_SIZE * (GRID_SIZE + 1) // 2):
if self.hash & 1:
x = BORDER_SIZE + square_x * SQUARE_SIZE
y = BORDER_SIZE + square_y * SQUARE_SIZE
self.draw.rectangle(
(x, y, x + SQUARE_SIZE, y + SQUARE_SIZE),
fill=color,
outline=color
)
# following is just for mirroring
x = BORDER_SIZE + (GRID_SIZE - 1 - square_x) * SQUARE_SIZE
self.draw.rectangle(
(x, y, x + SQUARE_SIZE, y + SQUARE_SIZE),
fill=color,
outline=color
)
self.hash >>= 1 # shift to right
square_y += 1
if square_y == GRID_SIZE: # done with first column
square_y = 0
square_x += 1
def generate(self):
"""
Save and show calculated identicon
"""
self.calculate()
with open('identicon.png', 'wb') as out:
self.image.save(out, 'PNG')
self.image.show()
def base64(self, format='PNG'):
'''
usage: i = Identicon('xx')
print(i.base64())
return: this image's base64 code
created by: liuzheng712
bug report: https://github.com/liuzheng712/identicons/issues
'''
self.calculate()
fp = StringIO.StringIO()
self.image.encoderinfo = {}
self.image.encoderconfig = ()
if format.upper() not in Image.SAVE:
Image.init()
save_handler = Image.SAVE[format.upper()]
try:
save_handler(self.image, fp, '')
finally:
fp.seek(0)
return "data:image/png;base64,{0}".format(base64.b64encode(fp.read()))
| 24.831858 | 77 | 0.626515 | from PIL import Image, ImageDraw
from hashlib import md5
import base64
import StringIO
import random
GRID_SIZE = 5
BORDER_SIZE = 20
SQUARE_SIZE = 40
class Identicon(object):
def __init__(self, str_, background='#fafbfc'):
w = h = BORDER_SIZE * 2 + SQUARE_SIZE * GRID_SIZE
self.image = Image.new('RGB', (w, h), background)
self.draw = ImageDraw.Draw(self.image)
self.hash = self.digest(str_)
def digest(self, str_):
return int(md5(str_.encode('utf-8')).hexdigest(), 16)
def calculate(self):
color = random.choice((
(254, 196, 197),
(253, 138, 139),
(254, 231, 206),
(254, 208, 159),
(210, 211, 253),
(163, 165, 252),
(247, 213, 247),
(242, 172, 238),
(235, 247, 206),
(217, 241, 157),
(211, 248, 237),
(167, 242, 221),
(255, 249, 207),
(254, 245, 161),
(211, 241, 254),
(168, 228, 254),
(207, 245, 210),
(159, 235, 164),
))
self.hash >>= 24
square_x = square_y = 0
for x in range(GRID_SIZE * (GRID_SIZE + 1) // 2):
if self.hash & 1:
x = BORDER_SIZE + square_x * SQUARE_SIZE
y = BORDER_SIZE + square_y * SQUARE_SIZE
self.draw.rectangle(
(x, y, x + SQUARE_SIZE, y + SQUARE_SIZE),
fill=color,
outline=color
)
x = BORDER_SIZE + (GRID_SIZE - 1 - square_x) * SQUARE_SIZE
self.draw.rectangle(
(x, y, x + SQUARE_SIZE, y + SQUARE_SIZE),
fill=color,
outline=color
)
self.hash >>= 1
square_y += 1
if square_y == GRID_SIZE:
square_y = 0
square_x += 1
def generate(self):
self.calculate()
with open('identicon.png', 'wb') as out:
self.image.save(out, 'PNG')
self.image.show()
def base64(self, format='PNG'):
self.calculate()
fp = StringIO.StringIO()
self.image.encoderinfo = {}
self.image.encoderconfig = ()
if format.upper() not in Image.SAVE:
Image.init()
save_handler = Image.SAVE[format.upper()]
try:
save_handler(self.image, fp, '')
finally:
fp.seek(0)
return "data:image/png;base64,{0}".format(base64.b64encode(fp.read()))
| true | true |
f7f54dd0af83637bf8e929ec8c35a3c31393073c | 1,079 | py | Python | install.py | sbsrouteur/weewx-PiSenseHat | 70bcc9b31f8e914e773b762dc345f28d849f54a6 | [
"MIT"
] | null | null | null | install.py | sbsrouteur/weewx-PiSenseHat | 70bcc9b31f8e914e773b762dc345f28d849f54a6 | [
"MIT"
] | null | null | null | install.py | sbsrouteur/weewx-PiSenseHat | 70bcc9b31f8e914e773b762dc345f28d849f54a6 | [
"MIT"
] | null | null | null | # installer for PiSenseHat data acquisition service
# Copyright 2021-
# Distributed under the terms of the MIT License
from bin.user.PiSense import PiSensewx
from weecfg.extension import ExtensionInstaller
def loader():
return PiSenseHatInstaller()
class PiSenseHatInstaller(ExtensionInstaller):
def __init__(self):
super(PiSenseHatInstaller, self).__init__(
version="0.1",
name='Pi Sense Hat Service',
description='Service to include PiSense sensor to WeeWx loop.',
author="sbsrouteur",
author_email="sbsrouteur@free.fr",
data_services='user.PiSense.PiSensewx',
config={
'PiSensewx': {
'i2c_port' : '1',
'i2c_address' : '0x5c',
'temperatureKeys' : 'extraTemp1',
'pressureKeys' : 'pressure',
'humidityKeys' : 'outHumidity'
}
},
files=[('bin/user', ['bin/user/TCS34725.py','bin/user/PiSense.py'])]
)
| 34.806452 | 80 | 0.564411 |
from bin.user.PiSense import PiSensewx
from weecfg.extension import ExtensionInstaller
def loader():
return PiSenseHatInstaller()
class PiSenseHatInstaller(ExtensionInstaller):
def __init__(self):
super(PiSenseHatInstaller, self).__init__(
version="0.1",
name='Pi Sense Hat Service',
description='Service to include PiSense sensor to WeeWx loop.',
author="sbsrouteur",
author_email="sbsrouteur@free.fr",
data_services='user.PiSense.PiSensewx',
config={
'PiSensewx': {
'i2c_port' : '1',
'i2c_address' : '0x5c',
'temperatureKeys' : 'extraTemp1',
'pressureKeys' : 'pressure',
'humidityKeys' : 'outHumidity'
}
},
files=[('bin/user', ['bin/user/TCS34725.py','bin/user/PiSense.py'])]
)
| true | true |
f7f54e8a9a9a6f6cac23d7bfc41dbc4d7f726f49 | 1,170 | py | Python | code/gits_commit.py | ianyehwork/GITS | ee88aea547dca93b91e54c8efd9ffd9112841bc0 | [
"MIT"
] | null | null | null | code/gits_commit.py | ianyehwork/GITS | ee88aea547dca93b91e54c8efd9ffd9112841bc0 | [
"MIT"
] | 17 | 2020-10-08T21:01:08.000Z | 2020-10-28T18:18:35.000Z | code/gits_commit.py | ianyehwork/GITS | ee88aea547dca93b91e54c8efd9ffd9112841bc0 | [
"MIT"
] | 8 | 2020-10-29T20:28:09.000Z | 2021-09-27T13:25:19.000Z | #!/usr/bin/python3
from subprocess import Popen, PIPE
def gits_commit_func(args):
"""
Function that commit files as staged in the git command line internface
Performs operation as similar to git commit command.
Future additions : user can specify if the commit should be rejected , if the unit test fails.
"""
try:
subprocess_command = list()
subprocess_command.append("git")
subprocess_command.append("commit")
commit_message = args.m
if not commit_message:
print("ERROR: gits commit message not present, aborting")
return False
subprocess_command.append("-m")
subprocess_command.append(commit_message)
if not args.amend:
# do nothing
pass
else:
subprocess_command.append("--amend")
# print(subprocess_command)
process = Popen(subprocess_command, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
except Exception as e:
print("ERROR: gits commit command caught an exception")
print("ERROR: {}".format(str(e)))
return False
return True
| 30.789474 | 98 | 0.639316 |
from subprocess import Popen, PIPE
def gits_commit_func(args):
try:
subprocess_command = list()
subprocess_command.append("git")
subprocess_command.append("commit")
commit_message = args.m
if not commit_message:
print("ERROR: gits commit message not present, aborting")
return False
subprocess_command.append("-m")
subprocess_command.append(commit_message)
if not args.amend:
pass
else:
subprocess_command.append("--amend")
process = Popen(subprocess_command, stdout=PIPE, stderr=PIPE)
stdout, stderr = process.communicate()
except Exception as e:
print("ERROR: gits commit command caught an exception")
print("ERROR: {}".format(str(e)))
return False
return True
| true | true |
f7f5506e23a2005a1f241f22a204c42251737148 | 7,026 | py | Python | face_sdk/core/model_handler/face_detection/FaceDetModelHandler.py | XJX777/FaceX-Zoo | 9d083ed58d77dca077bbdae3e8bbdc73f46d287f | [
"Apache-2.0"
] | null | null | null | face_sdk/core/model_handler/face_detection/FaceDetModelHandler.py | XJX777/FaceX-Zoo | 9d083ed58d77dca077bbdae3e8bbdc73f46d287f | [
"Apache-2.0"
] | null | null | null | face_sdk/core/model_handler/face_detection/FaceDetModelHandler.py | XJX777/FaceX-Zoo | 9d083ed58d77dca077bbdae3e8bbdc73f46d287f | [
"Apache-2.0"
] | null | null | null | """
@author: JiXuan Xu, Jun Wang
@date: 20201019
@contact: jun21wangustc@gmail.com
"""
import logging.config
logging.config.fileConfig("config/logging.conf")
logger = logging.getLogger('sdk')
import torch
import numpy as np
from math import ceil
from itertools import product as product
import torch.backends.cudnn as cudnn
from core.model_handler.BaseModelHandler import BaseModelHandler
from utils.BuzException import *
class FaceDetModelHandler(BaseModelHandler):
"""Implimentation of face detection model handler
Attributes:
model: the face detection model.
device: use cpu or gpu to process.
cfg(dict): testing config, inherit from the parent class.
"""
def __init__(self, model, device, cfg):
"""
Init FaceDetModelHandler settings.
"""
super().__init__(model, device, cfg)
self.variance = self.cfg['variance']
def inference_on_image(self, image):
"""Get the inference of the image and process the inference result.
Returns:
A numpy array, the shape is N * (x, y, w, h, confidence),
N is the number of detection box.
"""
cudnn.benchmark = True
input_height, input_width, _ = image.shape
try:
image, scale = self._preprocess(image)
except Exception as e:
raise e
self.model = self.model.to(self.device)
image = torch.from_numpy(image).unsqueeze(0)
with torch.no_grad():
image = image.to(self.device)
scale = scale.to(self.device)
loc, conf, landms = self.model(image)
dets = self._postprocess(loc, conf, scale, input_height, input_width)
return dets
def _preprocess(self, image):
"""Preprocess the image, such as standardization and other operations.
Returns:
A numpy array list, the shape is channel * h * w.
A tensor, the shape is 4.
"""
if not isinstance(image, np.ndarray):
logger.error('The input should be the ndarray read by cv2!')
raise InputError()
img = np.float32(image)
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
return img, scale
def _postprocess(self, loc, conf, scale, input_height, input_width):
"""Postprecess the prediction result.
Decode detection result, set the confidence threshold and do the NMS
to keep the appropriate detection box.
Returns:
A numpy array, the shape is N * (x, y, w, h, confidence),
N is the number of detection box.
"""
priorbox = PriorBox(self.cfg, image_size=(input_height, input_width))
priors = priorbox.forward()
priors = priors.to(self.device)
prior_data = priors.data
boxes = self.decode(loc.data.squeeze(0), prior_data, self.cfg['variance'])
boxes = boxes * scale
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
# ignore low scores
inds = np.where(scores > self.cfg['confidence_threshold'])[0]
boxes = boxes[inds]
scores = scores[inds]
# keep top-K before NMS
order = scores.argsort()[::-1]
boxes = boxes[order]
scores = scores[order]
# do NMS
nms_threshold = 0.2
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = self.py_cpu_nms(dets, nms_threshold)
dets = dets[keep, :]
return dets
# Adapted from https://github.com/chainer/chainercv
def decode(self, loc, priors, variances):
"""Decode locations from predictions using priors to undo
the encoding we did for offset regression at train time.
Args:
loc (tensor): location predictions for loc layers,
Shape: [num_priors,4]
priors (tensor): Prior boxes in center-offset form.
Shape: [num_priors,4].
variances: (list[float]) Variances of priorboxes
Return:
decoded bounding box predictions
"""
boxes = torch.cat((priors[:, :2], priors[:, 2:]), 1)
boxes[:, :2] = priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:]
boxes[:, 2:] = priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
# Adapted from https://github.com/biubug6/Pytorch_Retinafacey
def py_cpu_nms(self, dets, thresh):
"""Python version NMS.
Returns:
The kept index after NMS.
"""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
# Adapted from https://github.com/biubug6/Pytorch_Retinafacey
class PriorBox(object):
"""Compute the suitable parameters of anchors for later decode operation
Attributes:
cfg(dict): testing config.
image_size(tuple): the input image size.
"""
def __init__(self, cfg, image_size=None):
"""
Init priorBox settings related to the generation of anchors.
"""
super(PriorBox, self).__init__()
self.min_sizes = cfg['min_sizes']
self.steps = cfg['steps']
self.image_size = image_size
self.feature_maps = [[ceil(self.image_size[0]/step), ceil(self.image_size[1]/step)] for step in self.steps]
self.name = "s"
def forward(self):
anchors = []
for k, f in enumerate(self.feature_maps):
min_sizes = self.min_sizes[k]
for i, j in product(range(f[0]), range(f[1])):
for min_size in min_sizes:
s_kx = min_size / self.image_size[1]
s_ky = min_size / self.image_size[0]
dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]
dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]
for cy, cx in product(dense_cy, dense_cx):
anchors += [cx, cy, s_kx, s_ky]
# back to torch land
output = torch.Tensor(anchors).view(-1, 4)
return output
| 36.216495 | 115 | 0.568887 |
import logging.config
logging.config.fileConfig("config/logging.conf")
logger = logging.getLogger('sdk')
import torch
import numpy as np
from math import ceil
from itertools import product as product
import torch.backends.cudnn as cudnn
from core.model_handler.BaseModelHandler import BaseModelHandler
from utils.BuzException import *
class FaceDetModelHandler(BaseModelHandler):
def __init__(self, model, device, cfg):
super().__init__(model, device, cfg)
self.variance = self.cfg['variance']
def inference_on_image(self, image):
cudnn.benchmark = True
input_height, input_width, _ = image.shape
try:
image, scale = self._preprocess(image)
except Exception as e:
raise e
self.model = self.model.to(self.device)
image = torch.from_numpy(image).unsqueeze(0)
with torch.no_grad():
image = image.to(self.device)
scale = scale.to(self.device)
loc, conf, landms = self.model(image)
dets = self._postprocess(loc, conf, scale, input_height, input_width)
return dets
def _preprocess(self, image):
if not isinstance(image, np.ndarray):
logger.error('The input should be the ndarray read by cv2!')
raise InputError()
img = np.float32(image)
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
return img, scale
def _postprocess(self, loc, conf, scale, input_height, input_width):
priorbox = PriorBox(self.cfg, image_size=(input_height, input_width))
priors = priorbox.forward()
priors = priors.to(self.device)
prior_data = priors.data
boxes = self.decode(loc.data.squeeze(0), prior_data, self.cfg['variance'])
boxes = boxes * scale
boxes = boxes.cpu().numpy()
scores = conf.squeeze(0).data.cpu().numpy()[:, 1]
inds = np.where(scores > self.cfg['confidence_threshold'])[0]
boxes = boxes[inds]
scores = scores[inds]
order = scores.argsort()[::-1]
boxes = boxes[order]
scores = scores[order]
nms_threshold = 0.2
dets = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = self.py_cpu_nms(dets, nms_threshold)
dets = dets[keep, :]
return dets
def decode(self, loc, priors, variances):
boxes = torch.cat((priors[:, :2], priors[:, 2:]), 1)
boxes[:, :2] = priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:]
boxes[:, 2:] = priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def py_cpu_nms(self, dets, thresh):
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
class PriorBox(object):
def __init__(self, cfg, image_size=None):
super(PriorBox, self).__init__()
self.min_sizes = cfg['min_sizes']
self.steps = cfg['steps']
self.image_size = image_size
self.feature_maps = [[ceil(self.image_size[0]/step), ceil(self.image_size[1]/step)] for step in self.steps]
self.name = "s"
def forward(self):
anchors = []
for k, f in enumerate(self.feature_maps):
min_sizes = self.min_sizes[k]
for i, j in product(range(f[0]), range(f[1])):
for min_size in min_sizes:
s_kx = min_size / self.image_size[1]
s_ky = min_size / self.image_size[0]
dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]
dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]
for cy, cx in product(dense_cy, dense_cx):
anchors += [cx, cy, s_kx, s_ky]
output = torch.Tensor(anchors).view(-1, 4)
return output
| true | true |
f7f55197df03df7d78e36b62a77172709bb13cc0 | 4,617 | py | Python | src/python/fsqio/pants/node/tasks/webpack.py | stuhood/fsqio | 5f133d74e88649da336c362f1af71ca1a42a41d7 | [
"Apache-2.0"
] | null | null | null | src/python/fsqio/pants/node/tasks/webpack.py | stuhood/fsqio | 5f133d74e88649da336c362f1af71ca1a42a41d7 | [
"Apache-2.0"
] | null | null | null | src/python/fsqio/pants/node/tasks/webpack.py | stuhood/fsqio | 5f133d74e88649da336c362f1af71ca1a42a41d7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2016 Foursquare Labs Inc. All Rights Reserved.
from __future__ import (
absolute_import,
division,
generators,
nested_scopes,
print_function,
unicode_literals,
with_statement,
)
import os.path
from textwrap import dedent
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.build_graph.resources import Resources as BaseResources
from pants.contrib.node.tasks.node_paths import NodePaths
from pants.contrib.node.tasks.node_task import NodeTask
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.contextutil import pushd
from fsqio.pants.node.targets.webpack_module import NpmResource, WebPackModule
class WebPack(NodeTask, SimpleCodegenTask):
"""Run webpack on WebPackModule targets.
The result is a synthetic target that subclasses `Resources` and
the task exports the `compile_classpath` product, so
the output should appear on the classpath of any
JVM target that transitively depends on the node target
being codegenned.
WARNING: The node module must express a dependency on webpack
in its package.json / npm-shrinkwrap.json, or this task will
fail.
"""
class Resources(BaseResources):
"""Resources container to hold generated json."""
@classmethod
def product_types(cls):
return super(WebPack, cls).product_types() + [WebPack.Resources, 'compile_classpath']
@property
def cache_target_dirs(self):
return True
@property
def _copy_target_attributes(self):
# Override from SimpleCodegenTask, which expects targets to have a 'provided' attribute.
# NOTE(Mateo): This is needed for compatability with Pants 1.1.0.
return ['tags', 'scope']
@classmethod
def implementation_version(cls):
return super(WebPack, cls).implementation_version() + [('WebPack', 5)]
@classmethod
def prepare(cls, options, round_manager):
# NOTE(mateo): This task should be requiring the NodePaths product - but doing so results in a goal cycle upstream.
# - NodePaths is a product of the NodeResolve task, so requiring it meant Webpack depended on NodeResolve.
# - NodeResolve was installed into the 'resolve' goal, and 'resolve' depends on 'gen' goal
# - WebPack is a SimpleCodegen subclass, so this meant that WebpackResolve depended on WebPack, obviously a cycle.
# - NodeResolve also registers the product requirements of every Resolver subsystem, including ScalaJs, etc.
#
# The workaround is simply to not require NodePaths and instead enforce the WebPack -> WebPackResolve with a
# separate product. NodePaths is just a cache to make sure that a target is not processed by multiple resolvers.
# We are forcing this to run right before gen, so the upstream resolvers will by definition not have ran.
#
# TODO(mateo): Fix the scheduling - it will likely require upstream changes to the Node plugin or forking NodePaths.
# super(Webpack, cls).prepare(options, round_manager)
# round_manager.require_data(NodePaths)
round_manager.require_data('webpack_distribution')
@classmethod
def register_options(cls, register):
super(WebPack, cls).register_options(register)
register(
'--destination-dir', type=str, advanced=True, default='webpack',
help='The directory prefix for webpack resources to go in'
)
def synthetic_target_type(self, target):
return WebPack.Resources
def is_gentarget(self, target):
return isinstance(target, WebPackModule) and not isinstance(target, NpmResource)
def execute_codegen(self, target, target_workdir):
node_paths = self.context.products.get_data(NodePaths)
if not node_paths:
raise TaskError("No npm distribution was found!")
node_path = node_paths.node_path(target)
dest_dir = os.path.join(target_workdir, self.get_options().destination_dir, target.name)
# Added "bail" to the args since webpack only returns failure on failed transpiling, treating missing deps or
# syntax errors as soft errors. This resulted in Pants returning success while the canary fails health check.
args = [
'run-script',
'webpack',
'--',
'--bail',
'--output-path={}'.format(dest_dir),
'--env=dist',
]
with pushd(node_path):
result, npm_run = self.execute_npm(
args=args,
workunit_name='npm',
workunit_labels=[WorkUnitLabel.RUN],
)
if result:
raise TaskError(dedent(
""" webpack command:
\n\t{} failed with exit code {}
""".format(' '.join(npm_run.cmd), result)
))
| 37.844262 | 120 | 0.730778 |
from __future__ import (
absolute_import,
division,
generators,
nested_scopes,
print_function,
unicode_literals,
with_statement,
)
import os.path
from textwrap import dedent
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.build_graph.resources import Resources as BaseResources
from pants.contrib.node.tasks.node_paths import NodePaths
from pants.contrib.node.tasks.node_task import NodeTask
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.contextutil import pushd
from fsqio.pants.node.targets.webpack_module import NpmResource, WebPackModule
class WebPack(NodeTask, SimpleCodegenTask):
class Resources(BaseResources):
@classmethod
def product_types(cls):
return super(WebPack, cls).product_types() + [WebPack.Resources, 'compile_classpath']
@property
def cache_target_dirs(self):
return True
@property
def _copy_target_attributes(self):
return ['tags', 'scope']
@classmethod
def implementation_version(cls):
return super(WebPack, cls).implementation_version() + [('WebPack', 5)]
@classmethod
def prepare(cls, options, round_manager):
round_manager.require_data('webpack_distribution')
@classmethod
def register_options(cls, register):
super(WebPack, cls).register_options(register)
register(
'--destination-dir', type=str, advanced=True, default='webpack',
help='The directory prefix for webpack resources to go in'
)
def synthetic_target_type(self, target):
return WebPack.Resources
def is_gentarget(self, target):
return isinstance(target, WebPackModule) and not isinstance(target, NpmResource)
def execute_codegen(self, target, target_workdir):
node_paths = self.context.products.get_data(NodePaths)
if not node_paths:
raise TaskError("No npm distribution was found!")
node_path = node_paths.node_path(target)
dest_dir = os.path.join(target_workdir, self.get_options().destination_dir, target.name)
args = [
'run-script',
'webpack',
'--',
'--bail',
'--output-path={}'.format(dest_dir),
'--env=dist',
]
with pushd(node_path):
result, npm_run = self.execute_npm(
args=args,
workunit_name='npm',
workunit_labels=[WorkUnitLabel.RUN],
)
if result:
raise TaskError(dedent(
""" webpack command:
\n\t{} failed with exit code {}
""".format(' '.join(npm_run.cmd), result)
))
| true | true |
f7f551aaac7161bf78ae3706e254660f1a55999d | 6,170 | py | Python | unsupervised_training.py | Steven20210/cell_death_ML | c1a380bb2f9f3e0279403cf76cb5f5193e771b5b | [
"MIT"
] | null | null | null | unsupervised_training.py | Steven20210/cell_death_ML | c1a380bb2f9f3e0279403cf76cb5f5193e771b5b | [
"MIT"
] | null | null | null | unsupervised_training.py | Steven20210/cell_death_ML | c1a380bb2f9f3e0279403cf76cb5f5193e771b5b | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import cv2
from matplotlib import style
# from sci_utilities import is_outlier
import pandas as pd
style.use("ggplot")
from sklearn.cluster import MiniBatchKMeans
from keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D
import pickle
from sklearn.neighbors import NearestNeighbors
from PIL import Image, ImageEnhance
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
label = [0, 1]
array_label = np.asarray(label)
nucleus_array = []
index = 0
colors = ['r.', 'g.', 'b', '']
def pca():
dying = []
extreme = []
# outputs_array_in = open('layer_4_output_all2.array', 'rb')
# output_array = pickle.load(outputs_array_in)
outputs_array_in = open('sample_3.array', 'rb')
output_array = pickle.load(outputs_array_in)
labels_array_in = open('label_2.array', 'rb')
labels_array = pickle.load(labels_array_in)
# output_array1 = np.asarray(output_array)
output_array1 = []
imgs = []
for i in output_array:
for j in i:
mean_pi, std_pi = cv2.meanStdDev(j)
output_array1.append(std_pi[0][0])
imgs.append(j)
# output_array1.append(std_pi[0][0])
# imgs.append(j)
# for data in output_array1:
# data_mean, data_std = cv2.meanStdDev(data)
#
# cut_off = data_std * 3
#
# lower_bound, upper_bound = data_mean - cut_off, data_mean + cut_off
for img in output_array1:
if img > 120:
output_array1.remove(img)
# optimal_bins = np.histogram_bin_edges(output_array1, bins='fd')
q3, q1 = np.percentile(output_array1, [75, 25])
iqr = q3 - q1
h = 2 * iqr * (len(output_array1) ** (-1/3))
optimal_bins = int((np.amax(output_array1) - np.amin(output_array1))/h)
possible_hist = [1, 1.5, 2, 2.5, 3]
saved_hist = []
for i in range(len(possible_hist)):
optimal_bins = int(possible_hist[i] * optimal_bins)
# hist, axs = plt.subplots(1, len(possible_hist))
# axs[1, i].set_title('PI Standard Deviation of H2O2-stimulated Nuclei Images (2650 images)', fontsize=10)
# axs[1, i].set_xlabel("PI Standard Deviation")
# axs[1, i].set_ylabel("# of Images")
# axs[1, i].hist(output_array1, bins=optimal_bins, range=[0, 120])
plt.title('PI Standard Deviation of H2O2-stimulated Nuclei Images (2650 images)', fontsize=10)
plt.xlabel("PI Standard Deviation")
plt.ylabel("# of Images")
plt.hist(output_array1, bins=optimal_bins, range=[0, 120])
saved = plt.savefig("histogram " + str(possible_hist[i]) + "x.png")
saved_hist.append(saved)
# plt.show()
# hist, bin_edges = np.histogram(output_array1)
# for i in range(len(output_array1)):
# if output_array1[i] > 36:
# print(output_array1[i])
# plt.imshow(imgs[i])
# plt.show()
return possible_hist, output_array1, optimal_bins
# output_array1 = np.asarray(output_array1)
# output_array1 = output_array1.reshape(output_array1.shape[-5], -1)
# outputs_array_1 = np.transpose(output_array1, (1, 0, 2))
# for x in outputs_array_1:
# for x in output_array1:
# transformed = StandardScaler().fit_transform(x)
# components = PCA(n_components=2)
#
#
# # principalComponents = components.fit_transform(transformed)
#
# principalComponents = components.fit_transform(output_array1)
#
# variance = components.explained_variance_ratio_
#
# print(variance)
# principalDf = pd.DataFrame(data=principalComponents, columns=['principal component 1', 'principal component 2'])
#
# print(principalDf)
# for i in range(len(principalComponents)):
# # plt.plot(principalComponents[i][0], principalComponents[i][1], colors[labels_array[i]], markersize=5)
# plt.plot(principalComponents[i][0], principalComponents[i][1], 'g.', markersize=5)
#
# plt.xlabel("PCA1 - " + str(variance[0] * 100) + " %")
# plt.ylabel("PCA2 - " + str(variance[1] * 100) + " %")
# plt.title('PCA Plot of the Output Values of Layer 4 of the Cell Death Classification Neural Network', fontsize=10)
# for i in range(len(principalDf)):
#
# # if 0 <= principalDf.iloc[i][0] <= 15:
# # #healthy_close.append(i)
# # extreme.append(i)
# if principalDf.iloc[i][0] <= 0:
# extreme.append(i)
# # elif principalDf.iloc[i][0] > 30:
# # healthy_far.append(i)
# else:
# dying.append(i)
# plt.legend(['dying cells', 'healthy cells'])
a, b, c = pca()
print(a, b, c)
def kmeans_clustering(X):
total_clusters = len(np.unique(array_label))
kmeans = MiniBatchKMeans(n_clusters= total_clusters)
kmeans.fit(X)
labels = kmeans.labels_
centroids = kmeans.cluster_centers_
index = 0
index1 = 0
index2 = 0
print(labels)
for i in labels:
if i == 0:
index += 1
elif i == 1:
index1 += 1
elif i == 2:
index2 += 1
print(str(index) + " : 0 ," + str(index1) + " : 1 ," + str(index2) + " : 2")
return centroids, labels
def show_cluster(centroids, labels, X):
colors = ["g.", "r.", "c.", "y."]
x = []
y = []
for i in range(len(X)):
#print("coordinate:", X[i], "label:", labels[i])
#plt.plot(X[i][0], X[i][1], colors[labels[i]], markersize=10)
x.append(X[i][0])
y.append(X[i][1])
for i in range(len(x)):
plt.plot([i], x[i], "g.", markersize=10)
plt.plot([i], y[i], 'r.', markersize=10)
# x = np.asarray(x)
# x = x.reshape(-1, 1)
# y = np.asarray(y)
#
# cov = np.cov(x, y)
#
# print(cov)
# reg = LinearRegression()
# reg.fit(x, y)
#
# reg_predict = reg.predict(x)
# plt.plot(x, reg_predict)
# print(reg.coef_)
plt.scatter(centroids[:, 0], centroids[:, 1], marker="x", s=150, linewidths=5, zorder=10)
plt.title("Weights")
plt.show() | 30.85 | 120 | 0.609562 | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import cv2
from matplotlib import style
import pandas as pd
style.use("ggplot")
from sklearn.cluster import MiniBatchKMeans
from keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D
import pickle
from sklearn.neighbors import NearestNeighbors
from PIL import Image, ImageEnhance
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
label = [0, 1]
array_label = np.asarray(label)
nucleus_array = []
index = 0
colors = ['r.', 'g.', 'b', '']
def pca():
dying = []
extreme = []
outputs_array_in = open('sample_3.array', 'rb')
output_array = pickle.load(outputs_array_in)
labels_array_in = open('label_2.array', 'rb')
labels_array = pickle.load(labels_array_in)
output_array1 = []
imgs = []
for i in output_array:
for j in i:
mean_pi, std_pi = cv2.meanStdDev(j)
output_array1.append(std_pi[0][0])
imgs.append(j)
for img in output_array1:
if img > 120:
output_array1.remove(img)
q3, q1 = np.percentile(output_array1, [75, 25])
iqr = q3 - q1
h = 2 * iqr * (len(output_array1) ** (-1/3))
optimal_bins = int((np.amax(output_array1) - np.amin(output_array1))/h)
possible_hist = [1, 1.5, 2, 2.5, 3]
saved_hist = []
for i in range(len(possible_hist)):
optimal_bins = int(possible_hist[i] * optimal_bins)
plt.title('PI Standard Deviation of H2O2-stimulated Nuclei Images (2650 images)', fontsize=10)
plt.xlabel("PI Standard Deviation")
plt.ylabel("# of Images")
plt.hist(output_array1, bins=optimal_bins, range=[0, 120])
saved = plt.savefig("histogram " + str(possible_hist[i]) + "x.png")
saved_hist.append(saved)
return possible_hist, output_array1, optimal_bins
means.labels_
centroids = kmeans.cluster_centers_
index = 0
index1 = 0
index2 = 0
print(labels)
for i in labels:
if i == 0:
index += 1
elif i == 1:
index1 += 1
elif i == 2:
index2 += 1
print(str(index) + " : 0 ," + str(index1) + " : 1 ," + str(index2) + " : 2")
return centroids, labels
def show_cluster(centroids, labels, X):
colors = ["g.", "r.", "c.", "y."]
x = []
y = []
for i in range(len(X)):
x.append(X[i][0])
y.append(X[i][1])
for i in range(len(x)):
plt.plot([i], x[i], "g.", markersize=10)
plt.plot([i], y[i], 'r.', markersize=10)
plt.scatter(centroids[:, 0], centroids[:, 1], marker="x", s=150, linewidths=5, zorder=10)
plt.title("Weights")
plt.show() | true | true |
f7f551f3c1564167744221c32674338402e2a694 | 3,011 | py | Python | nets.py | TomaszGolan/q-learning-maze | 2540acf09d939c1686060c58cbe52775e94304ed | [
"MIT"
] | null | null | null | nets.py | TomaszGolan/q-learning-maze | 2540acf09d939c1686060c58cbe52775e94304ed | [
"MIT"
] | null | null | null | nets.py | TomaszGolan/q-learning-maze | 2540acf09d939c1686060c58cbe52775e94304ed | [
"MIT"
] | null | null | null | """Quality functions"""
import tensorflow as tf
import numpy as np
from settings import Moves, Settings
class Net01:
"""My first attempt to approximate Q with NN"""
def __init__(self, session, in_size, snapshot=None):
"""Create a graph for NN
session -- tensorflow session
input_size -- input vector size (maze width x maze height)
snapshot -- path to saved model
"""
self.sess = session
# layers size
self.in_size = in_size
self.out_size = len(Moves.ALL)
h01_size = Settings.NOF_HIDDEN_NEURONS
h02_size = Settings.NOF_HIDDEN_NEURONS
# placeholders for features and targets
self.x = tf.placeholder(tf.float32, [None, in_size])
self.y = tf.placeholder(tf.float32, [None, self.out_size])
# weights
w01 = tf.Variable(tf.random_normal([in_size, h01_size]))
w02 = tf.Variable(tf.random_normal([h01_size, h02_size]))
w03 = tf.Variable(tf.random_normal([h02_size, self.out_size]))
# biases
b01 = tf.Variable(tf.zeros([h01_size]))
b02 = tf.Variable(tf.zeros([h02_size]))
b03 = tf.Variable(tf.zeros([self.out_size]))
# hidden layers
h01 = tf.nn.relu(tf.add(tf.matmul(self.x, w01), b01))
h02 = tf.nn.relu(tf.add(tf.matmul(h01, w02), b02))
# output layer
self.out = tf.add(tf.matmul(h02, w03), b03)
# training
loss = tf.reduce_mean(tf.losses.mean_squared_error(
labels=self.y, predictions=self.out))
self.train = \
tf.train.AdamOptimizer(Settings.LEARNING_RATE).minimize(loss)
self.sess.run(tf.global_variables_initializer())
def predict(self, state):
"""Predict next move"""
return self.sess.run(tf.argmax(self.out, 1),
feed_dict={self.x: state})[0]
def maxQ(self, state):
"""Get max possible quality function value (for the next move)"""
return np.max(self.sess.run(self.out, feed_dict={self.x: state})[0])
def inference(self, state):
"""Get network output"""
return self.sess.run(self.out, feed_dict={self.x: state})[0]
def training(self, inputs, targets):
self.sess.run(self.train, feed_dict={self.x: inputs, self.y: targets})
def get_training_data(network, history):
"""Prepare next batch of training data"""
inputs = np.zeros((Settings.BATCH_SIZE, network.in_size))
targets = np.zeros((Settings.BATCH_SIZE, network.out_size))
# loop over random episodes from history
for i, entry in enumerate(history.get_data(Settings.BATCH_SIZE)):
state, action, reward, next_state, game_over = entry
inputs[i] = state
targets[i] = network.inference(state)
if game_over:
targets[i, action] = reward
else:
targets[i, action] = reward + \
Settings.GAMMA * network.maxQ(next_state)
return inputs, targets
| 33.831461 | 78 | 0.616739 | import tensorflow as tf
import numpy as np
from settings import Moves, Settings
class Net01:
def __init__(self, session, in_size, snapshot=None):
self.sess = session
self.in_size = in_size
self.out_size = len(Moves.ALL)
h01_size = Settings.NOF_HIDDEN_NEURONS
h02_size = Settings.NOF_HIDDEN_NEURONS
self.x = tf.placeholder(tf.float32, [None, in_size])
self.y = tf.placeholder(tf.float32, [None, self.out_size])
w01 = tf.Variable(tf.random_normal([in_size, h01_size]))
w02 = tf.Variable(tf.random_normal([h01_size, h02_size]))
w03 = tf.Variable(tf.random_normal([h02_size, self.out_size]))
b01 = tf.Variable(tf.zeros([h01_size]))
b02 = tf.Variable(tf.zeros([h02_size]))
b03 = tf.Variable(tf.zeros([self.out_size]))
h01 = tf.nn.relu(tf.add(tf.matmul(self.x, w01), b01))
h02 = tf.nn.relu(tf.add(tf.matmul(h01, w02), b02))
self.out = tf.add(tf.matmul(h02, w03), b03)
loss = tf.reduce_mean(tf.losses.mean_squared_error(
labels=self.y, predictions=self.out))
self.train = \
tf.train.AdamOptimizer(Settings.LEARNING_RATE).minimize(loss)
self.sess.run(tf.global_variables_initializer())
def predict(self, state):
return self.sess.run(tf.argmax(self.out, 1),
feed_dict={self.x: state})[0]
def maxQ(self, state):
return np.max(self.sess.run(self.out, feed_dict={self.x: state})[0])
def inference(self, state):
return self.sess.run(self.out, feed_dict={self.x: state})[0]
def training(self, inputs, targets):
self.sess.run(self.train, feed_dict={self.x: inputs, self.y: targets})
def get_training_data(network, history):
inputs = np.zeros((Settings.BATCH_SIZE, network.in_size))
targets = np.zeros((Settings.BATCH_SIZE, network.out_size))
for i, entry in enumerate(history.get_data(Settings.BATCH_SIZE)):
state, action, reward, next_state, game_over = entry
inputs[i] = state
targets[i] = network.inference(state)
if game_over:
targets[i, action] = reward
else:
targets[i, action] = reward + \
Settings.GAMMA * network.maxQ(next_state)
return inputs, targets
| true | true |
f7f552e0ae64779722621f26163da8d176c85822 | 12,356 | py | Python | tests/test_index.py | eukaryote/knowhow | 276439680e4075300d8001cae4f03d199f473991 | [
"MIT"
] | null | null | null | tests/test_index.py | eukaryote/knowhow | 276439680e4075300d8001cae4f03d199f473991 | [
"MIT"
] | null | null | null | tests/test_index.py | eukaryote/knowhow | 276439680e4075300d8001cae4f03d199f473991 | [
"MIT"
] | null | null | null | # coding=utf8
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import datetime
import hashlib
import json
from os.path import abspath, dirname, exists, join
import pickle
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from pytz import UTC
from knowhow.index import Index, Results
from knowhow import schema, util
from knowhow.conf import PYTHON2
import tests
def test_index_to_doc_tag_str():
doc = {"id": "myid", "tag": "mytag", "content": "my content"}
result = Index._to_doc(doc)
assert result == {"id": doc["id"], "tag": [doc["tag"]], "content": doc["content"]}
def test_index_to_doc_tag_list():
doc = {"id": "myid", "tag": ["tag1", "tag2"], "content": "my content"}
assert Index._to_doc(doc) == doc
def test_index_property_opens_without_clearing(tmp_app_index_dir_paths):
_, appd, indexd = tmp_app_index_dir_paths
index1 = Index(app_dir=appd, index_dir=indexd)
with index1._index.reader() as reader:
assert reader.doc_count() == 0
index1.add(tag="foo", content="my content")
with index1._index.reader() as reader:
assert reader.doc_count() == 1
index2 = Index(app_dir=appd, index_dir=indexd)
with index2._index.reader() as reader:
assert reader.doc_count() == 1
def test_index_open_nonexistent_noclear(tmp_app_index_dir_paths):
_, appd, indexd = tmp_app_index_dir_paths
index = Index(app_dir=appd, index_dir=indexd)
assert not exists(indexd)
index.open(clear=False)
assert exists(indexd)
def test_index_open_nonexistent_clear(tmp_app_index_dir_paths):
_, appd, indexd = tmp_app_index_dir_paths
index = Index(app_dir=appd, index_dir=indexd)
assert not exists(indexd)
index.open(clear=True)
assert exists(indexd)
def test_index_open_noclear(index_one):
assert len(index_one) == 1
index_one.open(clear=False)
assert len(index_one) == 1
def test_index_open_clear(index_one):
assert len(index_one) == 1
index_one.open(clear=True)
assert len(index_one) == 0
def test_index_search_repr(index_one):
search = index_one.search("myquery")
assert repr(search) == '<Search (query="text:myquery")>'
def test_index_search_tag(index_one):
with index_one.search("tag:mytag0") as results:
assert len(results) == 1
assert results[0].get("content") == "mycontent0"
def test_index_search_content(index_one):
with index_one.search("content:mycontent0") as results:
assert len(results) == 1
assert results[0].get("tag") == ["mytag0"]
def test_index_search_boolean(index_one):
with index_one.search("content:mycontent0 AND tag:mytag0") as results:
assert len(results) == 1
assert results[0].get("tag") == ["mytag0"]
def test_results_iter(index_one):
seen = []
with index_one.search("tag:mytag0") as results:
for elem in results:
seen.append(elem)
assert len(seen) == 1
def test_results_bool():
_results = []
search = object()
results = Results(_results, search)
assert not bool(results)
_results.append({})
assert bool(results)
def test_results_repr(index_one):
search = '<Search (query="tag:mytag0")>'
expected = "<Results (count=1, search=%s)>" % (search,)
with index_one.search("tag:mytag0") as results:
assert repr(results) == expected
def test_result_length(index_one):
with index_one.search("tag:mytag0") as results:
result = results[0]
assert len(result) == len(result.fields.keys())
def test_result_iter(index_one):
with index_one.search("tag:mytag0") as results:
result = results[0]
for key in result:
assert key in ("tag", "id", "content", "updated")
def test_result_getitem(index_one):
with index_one.search("tag:mytag0") as results:
result = results[0]
assert result["id"] == result.fields["id"]
def test_result_contains(index_one):
with index_one.search("tag:mytag0") as results:
result = results[0]
assert "id" in result
assert "foo" not in result
def test_result_repr(index_one):
with index_one.search("tag:mytag0") as results:
result = results[0]
assert repr(result) == "<Result (%s)>" % (result.fields,)
def test_result_format(index_one):
with index_one.search("tag:mytag0") as results:
result = results[0]
assert format(result, "{id}") == result.fields["id"]
assert format(result, "{tags}") == ",".join(result.fields["tag"])
assert format(result, "{content}") == result.fields["content"]
def test_index_add(index_one):
assert len(index_one) == 1
with index_one.search("tag:mytag1") as results:
assert len(results) == 0
index_one.add(**tests.test_doc1)
assert len(index_one) == 2
with index_one.search("tag:mytag1") as results:
assert len(results) == 1
def test_index_add_all(index_one):
with index_one._index.reader() as reader:
assert reader.doc_count() == 1
with index_one.search("tag:test_index_add_all") as results:
assert len(results) == 0
doc1 = {"tag": "test_index_add_all", "content": "content1"}
doc2 = {"tag": "test_index_add_all", "content": "content2"}
index_one.add_all([doc1, doc2])
with index_one._index.reader() as reader:
assert reader.doc_count() == 3
with index_one.search("tag:test_index_add_all") as results:
assert len(results) == 2
def test_index_remove_none(index_one):
with index_one._index.reader() as reader:
assert reader.doc_count() == 1
index_one.remove()
with index_one._index.reader() as reader:
assert reader.doc_count() == 1
def test_index_remove_found(index_one):
with index_one._index.reader() as reader:
assert reader.doc_count() == 1
docs = list(index_one)
assert len(docs) == 1
doc = docs[0]
result = index_one.remove(doc["id"])
assert result == 1
with index_one._index.reader() as reader:
assert reader.doc_count() == 0
def test_index_remove_not_found(index_one):
with index_one._index.reader() as reader:
assert reader.doc_count() == 1
assert index_one.remove("invalidid") == 0
with index_one._index.reader() as reader:
assert reader.doc_count() == 1
def test_index_remove_bytes_key(index_one):
doc = list(index_one)[0]
assert index_one.remove(doc["id"].encode("ascii")) == 1
with index_one._index.reader() as reader:
assert reader.doc_count() == 0
def test_index_dump_empty(tmpd, index_empty):
path = join(tmpd, "dump.json")
with open(path, "w") as f:
index_empty.dump(f)
with open(path) as f:
docs = json.load(f)
assert len(docs) == 0
def test_index_dump_one(tmpd, index_one):
path = join(tmpd, "dump.json")
with open(path, "w") as f:
index_one.dump(f)
with open(path) as f:
docs = json.load(f)
assert len(docs) == 1
doc = docs[0]
assert doc["tag"] == ["mytag0"]
assert doc["content"] == "mycontent0"
def test_index_load(tmpd, index_empty):
path = join(tmpd, "load.json")
with open(path, "w") as f:
json.dump([tests.test_doc_dumped], f)
assert len(index_empty) == 0
with open(path, "r") as f:
index_empty.load(f)
assert len(index_empty) == 1
with index_empty.search("tag:mytag") as results:
assert len(results) == 1
assert results[0].get("content") == tests.test_doc_dumped["content"]
def test_index_get_tags(index_one):
tags = index_one.get_tags()
assert tags == ["mytag0"]
def test_index_get_tags_prefix(index_one):
assert index_one.get_tags(prefix=None) == ["mytag0"]
assert index_one.get_tags(prefix="") == ["mytag0"]
assert index_one.get_tags(prefix="m") == ["mytag0"]
assert index_one.get_tags(prefix="mytag0") == ["mytag0"]
assert index_one.get_tags(prefix="ytag") == []
def test_index_get_tags_with_counts(index_one):
docs = [
{"tag": "mytag2", "content": "doc1"},
{"tag": "mytag1", "content": "doc2"},
{"tag": "mytag0", "content": "doc3"},
{"tag": "mytag4", "content": "doc4"},
{"tag": "mytag3", "content": "doc5"},
{"tag": "testtag0", "content": "doc6"},
]
index_one.add_all(docs)
expected = [
(2, "mytag0"),
(1, "mytag1"),
(1, "mytag2"),
(1, "mytag3"),
(1, "mytag4"),
(1, "testtag0"),
]
assert index_one.get_tags(counts=True) == expected
expected.pop()
assert index_one.get_tags(prefix="mytag", counts=True) == expected
def test_index_pprint_default(capsys, index_one):
index_one.pprint()
docid = list(index_one)[0]["id"]
expected = "id: " + util.decode(docid) + "\n"
out, err = capsys.readouterr()
assert not err
assert out.startswith(expected)
def test_index_clear(index_one):
with index_one._index.reader() as reader:
assert reader.doc_count() == 1
index_one.clear()
with index_one._index.reader() as reader:
assert reader.doc_count() == 0
def test_index_last_modified_utc(index_one):
dt = datetime.datetime.utcfromtimestamp(index_one._index.last_modified())
assert index_one.last_modified() == dt
dt = index_one.last_modified(localize=True)
now = datetime.datetime.now()
delta = now - dt
assert 0 < delta.total_seconds() < 1
def test_index_upgrade_not_changed(index_one):
assert index_one.upgrade() == 0
def test_index_upgrade_changed(index_empty):
index = index_empty
tag = "mytag"
content = "mycontent"
old_id = hashlib.md5(content.encode("ascii")).hexdigest()
new_id = schema.identifier({"content": content, "tag": tag})
index.add(id=old_id, content=content, tag=[tag])
with index.search("id:" + old_id) as results:
assert len(results) == 1
assert results[0]["id"] == old_id
with index.search(tag) as results:
assert len(results) == 1
assert results[0]["id"] == old_id
assert index.upgrade() == 1
with index.search("id:" + old_id) as results:
assert not results
with index.search("id:" + new_id) as results:
assert len(results) == 1
assert results[0]["id"] == new_id
assert len(index) == 1
with index.search(tag) as results:
assert len(results) == 1
assert results[0]["id"] == new_id
def test_index_load_python2(monkeypatch):
"""
Test loading a python2-generated whoosh index under python3.
This test passes only because of the monkeypatching of
whoosh.compat.loads with knowhow.util.pickle_loads.
"""
expected = {
"id": ("02a7cefe1189668fa85b56b52ee1e769" "1ee1821913f2031c8117263c07526468"),
"content": "Hello, from Python2",
"tag": ["python2"],
"updated": datetime.datetime(2017, 9, 15, 14, 58, 12, 441405, tzinfo=UTC),
}
import knowhow
home_dir = join(abspath(dirname(dirname(knowhow.__file__))), "data", "homepy2")
data_dir = join(home_dir, "datapy2")
monkeypatch.setenv("KNOWHOW_HOME", home_dir)
monkeypatch.setenv("KNOWHOW_DATA", data_dir)
def load_py2(data, *args, **kwargs):
try:
return pickle.loads(data, *args, **kwargs)
except UnicodeDecodeError as e:
if PYTHON2 or not e.args[0] == "ascii":
raise
result = pickle.loads(data, encoding="bytes")
# need to handle a py2-pickled dict having bytes keys, which will
# be skipped in python3, so we convert all keys to str if needed
if isinstance(result, dict):
d = {}
method = result.iteritems if PYTHON2 else result.items
for k, v in method():
if isinstance(k, bytes):
k = k.decode("ascii")
d[k] = v
if d:
result = d
return result
index = Index()
assert len(index) == 1
with index.search("tag:python2") as results:
assert len(results) == 1
with patch("whoosh.columns.loads", load_py2):
result = results[0]
assert sorted(result.fields.keys()) == sorted(expected.keys())
assert result.fields == expected
| 29.773494 | 86 | 0.645435 |
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import datetime
import hashlib
import json
from os.path import abspath, dirname, exists, join
import pickle
try:
from unittest.mock import patch
except ImportError:
from mock import patch
from pytz import UTC
from knowhow.index import Index, Results
from knowhow import schema, util
from knowhow.conf import PYTHON2
import tests
def test_index_to_doc_tag_str():
doc = {"id": "myid", "tag": "mytag", "content": "my content"}
result = Index._to_doc(doc)
assert result == {"id": doc["id"], "tag": [doc["tag"]], "content": doc["content"]}
def test_index_to_doc_tag_list():
doc = {"id": "myid", "tag": ["tag1", "tag2"], "content": "my content"}
assert Index._to_doc(doc) == doc
def test_index_property_opens_without_clearing(tmp_app_index_dir_paths):
_, appd, indexd = tmp_app_index_dir_paths
index1 = Index(app_dir=appd, index_dir=indexd)
with index1._index.reader() as reader:
assert reader.doc_count() == 0
index1.add(tag="foo", content="my content")
with index1._index.reader() as reader:
assert reader.doc_count() == 1
index2 = Index(app_dir=appd, index_dir=indexd)
with index2._index.reader() as reader:
assert reader.doc_count() == 1
def test_index_open_nonexistent_noclear(tmp_app_index_dir_paths):
_, appd, indexd = tmp_app_index_dir_paths
index = Index(app_dir=appd, index_dir=indexd)
assert not exists(indexd)
index.open(clear=False)
assert exists(indexd)
def test_index_open_nonexistent_clear(tmp_app_index_dir_paths):
_, appd, indexd = tmp_app_index_dir_paths
index = Index(app_dir=appd, index_dir=indexd)
assert not exists(indexd)
index.open(clear=True)
assert exists(indexd)
def test_index_open_noclear(index_one):
assert len(index_one) == 1
index_one.open(clear=False)
assert len(index_one) == 1
def test_index_open_clear(index_one):
assert len(index_one) == 1
index_one.open(clear=True)
assert len(index_one) == 0
def test_index_search_repr(index_one):
search = index_one.search("myquery")
assert repr(search) == '<Search (query="text:myquery")>'
def test_index_search_tag(index_one):
with index_one.search("tag:mytag0") as results:
assert len(results) == 1
assert results[0].get("content") == "mycontent0"
def test_index_search_content(index_one):
with index_one.search("content:mycontent0") as results:
assert len(results) == 1
assert results[0].get("tag") == ["mytag0"]
def test_index_search_boolean(index_one):
with index_one.search("content:mycontent0 AND tag:mytag0") as results:
assert len(results) == 1
assert results[0].get("tag") == ["mytag0"]
def test_results_iter(index_one):
seen = []
with index_one.search("tag:mytag0") as results:
for elem in results:
seen.append(elem)
assert len(seen) == 1
def test_results_bool():
_results = []
search = object()
results = Results(_results, search)
assert not bool(results)
_results.append({})
assert bool(results)
def test_results_repr(index_one):
search = '<Search (query="tag:mytag0")>'
expected = "<Results (count=1, search=%s)>" % (search,)
with index_one.search("tag:mytag0") as results:
assert repr(results) == expected
def test_result_length(index_one):
with index_one.search("tag:mytag0") as results:
result = results[0]
assert len(result) == len(result.fields.keys())
def test_result_iter(index_one):
with index_one.search("tag:mytag0") as results:
result = results[0]
for key in result:
assert key in ("tag", "id", "content", "updated")
def test_result_getitem(index_one):
with index_one.search("tag:mytag0") as results:
result = results[0]
assert result["id"] == result.fields["id"]
def test_result_contains(index_one):
with index_one.search("tag:mytag0") as results:
result = results[0]
assert "id" in result
assert "foo" not in result
def test_result_repr(index_one):
with index_one.search("tag:mytag0") as results:
result = results[0]
assert repr(result) == "<Result (%s)>" % (result.fields,)
def test_result_format(index_one):
with index_one.search("tag:mytag0") as results:
result = results[0]
assert format(result, "{id}") == result.fields["id"]
assert format(result, "{tags}") == ",".join(result.fields["tag"])
assert format(result, "{content}") == result.fields["content"]
def test_index_add(index_one):
assert len(index_one) == 1
with index_one.search("tag:mytag1") as results:
assert len(results) == 0
index_one.add(**tests.test_doc1)
assert len(index_one) == 2
with index_one.search("tag:mytag1") as results:
assert len(results) == 1
def test_index_add_all(index_one):
with index_one._index.reader() as reader:
assert reader.doc_count() == 1
with index_one.search("tag:test_index_add_all") as results:
assert len(results) == 0
doc1 = {"tag": "test_index_add_all", "content": "content1"}
doc2 = {"tag": "test_index_add_all", "content": "content2"}
index_one.add_all([doc1, doc2])
with index_one._index.reader() as reader:
assert reader.doc_count() == 3
with index_one.search("tag:test_index_add_all") as results:
assert len(results) == 2
def test_index_remove_none(index_one):
with index_one._index.reader() as reader:
assert reader.doc_count() == 1
index_one.remove()
with index_one._index.reader() as reader:
assert reader.doc_count() == 1
def test_index_remove_found(index_one):
with index_one._index.reader() as reader:
assert reader.doc_count() == 1
docs = list(index_one)
assert len(docs) == 1
doc = docs[0]
result = index_one.remove(doc["id"])
assert result == 1
with index_one._index.reader() as reader:
assert reader.doc_count() == 0
def test_index_remove_not_found(index_one):
with index_one._index.reader() as reader:
assert reader.doc_count() == 1
assert index_one.remove("invalidid") == 0
with index_one._index.reader() as reader:
assert reader.doc_count() == 1
def test_index_remove_bytes_key(index_one):
doc = list(index_one)[0]
assert index_one.remove(doc["id"].encode("ascii")) == 1
with index_one._index.reader() as reader:
assert reader.doc_count() == 0
def test_index_dump_empty(tmpd, index_empty):
path = join(tmpd, "dump.json")
with open(path, "w") as f:
index_empty.dump(f)
with open(path) as f:
docs = json.load(f)
assert len(docs) == 0
def test_index_dump_one(tmpd, index_one):
path = join(tmpd, "dump.json")
with open(path, "w") as f:
index_one.dump(f)
with open(path) as f:
docs = json.load(f)
assert len(docs) == 1
doc = docs[0]
assert doc["tag"] == ["mytag0"]
assert doc["content"] == "mycontent0"
def test_index_load(tmpd, index_empty):
path = join(tmpd, "load.json")
with open(path, "w") as f:
json.dump([tests.test_doc_dumped], f)
assert len(index_empty) == 0
with open(path, "r") as f:
index_empty.load(f)
assert len(index_empty) == 1
with index_empty.search("tag:mytag") as results:
assert len(results) == 1
assert results[0].get("content") == tests.test_doc_dumped["content"]
def test_index_get_tags(index_one):
tags = index_one.get_tags()
assert tags == ["mytag0"]
def test_index_get_tags_prefix(index_one):
assert index_one.get_tags(prefix=None) == ["mytag0"]
assert index_one.get_tags(prefix="") == ["mytag0"]
assert index_one.get_tags(prefix="m") == ["mytag0"]
assert index_one.get_tags(prefix="mytag0") == ["mytag0"]
assert index_one.get_tags(prefix="ytag") == []
def test_index_get_tags_with_counts(index_one):
docs = [
{"tag": "mytag2", "content": "doc1"},
{"tag": "mytag1", "content": "doc2"},
{"tag": "mytag0", "content": "doc3"},
{"tag": "mytag4", "content": "doc4"},
{"tag": "mytag3", "content": "doc5"},
{"tag": "testtag0", "content": "doc6"},
]
index_one.add_all(docs)
expected = [
(2, "mytag0"),
(1, "mytag1"),
(1, "mytag2"),
(1, "mytag3"),
(1, "mytag4"),
(1, "testtag0"),
]
assert index_one.get_tags(counts=True) == expected
expected.pop()
assert index_one.get_tags(prefix="mytag", counts=True) == expected
def test_index_pprint_default(capsys, index_one):
index_one.pprint()
docid = list(index_one)[0]["id"]
expected = "id: " + util.decode(docid) + "\n"
out, err = capsys.readouterr()
assert not err
assert out.startswith(expected)
def test_index_clear(index_one):
with index_one._index.reader() as reader:
assert reader.doc_count() == 1
index_one.clear()
with index_one._index.reader() as reader:
assert reader.doc_count() == 0
def test_index_last_modified_utc(index_one):
dt = datetime.datetime.utcfromtimestamp(index_one._index.last_modified())
assert index_one.last_modified() == dt
dt = index_one.last_modified(localize=True)
now = datetime.datetime.now()
delta = now - dt
assert 0 < delta.total_seconds() < 1
def test_index_upgrade_not_changed(index_one):
assert index_one.upgrade() == 0
def test_index_upgrade_changed(index_empty):
index = index_empty
tag = "mytag"
content = "mycontent"
old_id = hashlib.md5(content.encode("ascii")).hexdigest()
new_id = schema.identifier({"content": content, "tag": tag})
index.add(id=old_id, content=content, tag=[tag])
with index.search("id:" + old_id) as results:
assert len(results) == 1
assert results[0]["id"] == old_id
with index.search(tag) as results:
assert len(results) == 1
assert results[0]["id"] == old_id
assert index.upgrade() == 1
with index.search("id:" + old_id) as results:
assert not results
with index.search("id:" + new_id) as results:
assert len(results) == 1
assert results[0]["id"] == new_id
assert len(index) == 1
with index.search(tag) as results:
assert len(results) == 1
assert results[0]["id"] == new_id
def test_index_load_python2(monkeypatch):
expected = {
"id": ("02a7cefe1189668fa85b56b52ee1e769" "1ee1821913f2031c8117263c07526468"),
"content": "Hello, from Python2",
"tag": ["python2"],
"updated": datetime.datetime(2017, 9, 15, 14, 58, 12, 441405, tzinfo=UTC),
}
import knowhow
home_dir = join(abspath(dirname(dirname(knowhow.__file__))), "data", "homepy2")
data_dir = join(home_dir, "datapy2")
monkeypatch.setenv("KNOWHOW_HOME", home_dir)
monkeypatch.setenv("KNOWHOW_DATA", data_dir)
def load_py2(data, *args, **kwargs):
try:
return pickle.loads(data, *args, **kwargs)
except UnicodeDecodeError as e:
if PYTHON2 or not e.args[0] == "ascii":
raise
result = pickle.loads(data, encoding="bytes")
if isinstance(result, dict):
d = {}
method = result.iteritems if PYTHON2 else result.items
for k, v in method():
if isinstance(k, bytes):
k = k.decode("ascii")
d[k] = v
if d:
result = d
return result
index = Index()
assert len(index) == 1
with index.search("tag:python2") as results:
assert len(results) == 1
with patch("whoosh.columns.loads", load_py2):
result = results[0]
assert sorted(result.fields.keys()) == sorted(expected.keys())
assert result.fields == expected
| true | true |
f7f553ba17540df38791774f2fb3554f781b9016 | 10,451 | py | Python | src/pilot/rm-instanceHA-node.py | tahir696/JetPack-1 | 9bd5b04d1d5997693a52eb16a91e9f4f60d98b33 | [
"Apache-2.0"
] | null | null | null | src/pilot/rm-instanceHA-node.py | tahir696/JetPack-1 | 9bd5b04d1d5997693a52eb16a91e9f4f60d98b33 | [
"Apache-2.0"
] | null | null | null | src/pilot/rm-instanceHA-node.py | tahir696/JetPack-1 | 9bd5b04d1d5997693a52eb16a91e9f4f60d98b33 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2016-2018 Dell Inc. or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
# Run this script run from the director node as the director's admin user.
# This script assumes the update_ssh_config.py is present.
###############################################################################
# IMPORTS
import argparse
import os
import sys
import subprocess
import shlex
import re
import paramiko
import logging
# Dell utilities
from identify_nodes import main as identify_nodes
from credential_helper import CredentialHelper
from update_ssh_config import main as update_ssh_config
FORMAT = '%(levelname)s: %(message)s'
logging.basicConfig(format=FORMAT)
LOG = logging.getLogger(os.path.splitext(os.path.basename(sys.argv[0]))[0])
# Global method definition
def ssh_cmd(address, user, command):
try:
cmd = "ssh " + user + "@" + address + " \"" + command + "\""
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(address, username=user)
stdin, ss_stdout, ss_stderr = client.exec_command(command)
r_out, r_err = ss_stdout.read(), ss_stderr.read()
client.close()
except IOError:
LOG.error(".. host " + address + " is not up")
return "host not up"
return r_out, r_err
def awk_it(instring, index, delimiter=" "):
try:
return [instring,
instring.split(delimiter)[index-1]][max(0, min(1, index))]
except:
return ""
def check_ip_validity(ipaddr):
octet_exp = "(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)"
ValidIpAddressRegex = ("^" + octet_exp + "\." +
octet_exp + "\." +
octet_exp + "\." +
octet_exp + "$")
ip_match = re.search(ValidIpAddressRegex, ipaddr)
return ip_match
def logging_level(string):
string_level = string
try:
# Convert to upper case to allow the user to specify
# --logging-level=DEBUG or --logging-level=debug.
numeric_level = getattr(logging, string_level.upper())
except AttributeError:
raise argparse.ArgumentTypeError(
"Unknown logging level: {}".format(string_level))
if not isinstance(numeric_level, (int, long)) or int(numeric_level) < 0:
raise argparse.ArgumentTypeError(
"Logging level not a nonnegative integer: {!r}".format(
numeric_level))
return numeric_level
# Delete a controller node resource
def delete_controller_node_resources(controller_node_ip,
first_controller_node_ip):
out, err = ssh_cmd(controller_node_ip, "heat-admin",
"hostname")
node_name = out.strip()
controller_node_name = awk_it(node_name, 1, ".")
LOG.info("Delete controller node resource {}"
.format(controller_node_name))
ssh_cmd(first_controller_node_ip, "heat-admin",
"sudo pcs cluster node remove " + controller_node_name)
# Delete a compute node resource
def delete_compute_node_resources(compute_node_ip, first_controller_node_ip):
out, err = ssh_cmd(compute_node_ip, "heat-admin", "sudo crm_node -n")
crm_node_name = out.strip()
nova_compute_name = awk_it(crm_node_name, 1, ".")
LOG.info("Delete compute node resources {}."
.format(compute_node_ip))
ssh_cmd(compute_node_ip, "heat-admin",
"sudo systemctl stop pacemaker_remote")
ssh_cmd(compute_node_ip, "heat-admin",
"sudo systemctl disable pacemaker_remote")
ssh_cmd(first_controller_node_ip, "heat-admin",
"sudo pcs resource delete " + crm_node_name)
ssh_cmd(first_controller_node_ip, "heat-admin",
"sudo pcs stonith delete ipmilan-" + nova_compute_name)
LOG.info("Compute node resources {}."
.format(nova_compute_name))
ssh_cmd(first_controller_node_ip, "heat-admin",
"sudo pcs resource clean " + crm_node_name)
# Main Routine
def main():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("-compute",
"--compute",
dest="compute_node_ip",
action="store",
default='')
group.add_argument("-controller",
"--controller",
dest="controller_node_ip",
action="store",
default='')
parser.add_argument('-f',
'--file',
help='name of json file containing the node being set',
default=Constants.INSTACKENV_FILENAME)
parser.add_argument("-l",
"--logging-level",
default="INFO",
type=logging_level,
help="""logging level defined by the logging module;
choices include CRITICAL, ERROR, WARNING,
INFO, and DEBUG""", metavar="LEVEL")
args = parser.parse_args()
home_dir = os.path.expanduser('~')
undercloudrc_name = os.path.join(home_dir, 'stackrc')
oc_stack_name = CredentialHelper.get_overcloud_name()
ssh_config = os.path.join(home_dir, '.ssh/config')
undercloud_config = os.path.join(home_dir, 'undercloud_nodes.txt')
instack_file = os.path.expanduser(args.file)
# Run ~/pilot/identify_nodes.py > ~/undercloud_nodes.txt
cmd = os.path.join(home_dir,
'pilot/identify_nodes.py > ~/undercloud_nodes.txt')
os.system(cmd)
# Get CONTROLLER_NODES_IP
p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
p2 = subprocess.Popen(shlex.split('grep -A1 "cntl"'),
stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
stdin=p2.stdout,
stdout=subprocess.PIPE)
controller_nodes_ip = p3.communicate()[0].split()
# Get CONTROLLER_NODE_NAMES
p1 = subprocess.Popen(['nova', 'list'], stdout=subprocess.PIPE)
p2 = subprocess.Popen(shlex.split('awk \'/controller/ {print $4}\''),
stdin=p1.stdout,
stdout=subprocess.PIPE)
controller_node_names = p2.communicate()[0].split()
# Get COMPUTE_NODES_IP
p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
p2 = subprocess.Popen(shlex.split('egrep -A1 -h "nova|compute"'),
stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
stdin=p2.stdout,
stdout=subprocess.PIPE)
compute_nodes_ip = p3.communicate()[0].split()
# Get COMPUTE_NOVA_NAMES
p1 = subprocess.Popen(['nova', 'list'], stdout=subprocess.PIPE)
p2 = subprocess.Popen(shlex.split('awk \'/compute/ {print $4}\''),
stdin=p1.stdout,
stdout=subprocess.PIPE)
compute_nova_names = p2.communicate()[0].split()
# Get first_controller_node_ip
p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
p2 = subprocess.Popen(shlex.split('grep -A1 "cntl0"'),
stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
stdin=p2.stdout,
stdout=subprocess.PIPE)
first_controller_node_ip = p3.communicate()[0].rstrip()
oc_auth_url, oc_tenant_name, oc_username, oc_password = \
CredentialHelper.get_overcloud_creds()
LOG.setLevel(args.logging_level)
LOG.debug("home_dir: {}".format(home_dir))
LOG.debug("oc_stack_name: {}".format(oc_stack_name))
LOG.debug("oc_auth_url: {}".format(oc_auth_url))
LOG.debug("oc_username: {}".format(oc_username))
LOG.debug("oc_password: {}".format(oc_password))
LOG.debug("oc_tenant_name: {}".format(oc_tenant_name))
LOG.debug("controller_nodes_ip: {}".format(controller_nodes_ip))
LOG.debug("controller_nodes_names: {}".format(controller_nodes_ip))
LOG.debug("compute_nodes_ip: {}".format(compute_nodes_ip))
LOG.debug("compute_nova_names: {}".format(compute_nodes_ip))
# Execute Compute node deletion
if args.compute_node_ip != '':
compute_node_ip = args.compute_node_ip.rstrip()
if check_ip_validity(compute_node_ip):
LOG.info("*** Removing a compute node {} to InstanceHA"
" configuration.".format(compute_node_ip))
delete_compute_node_resources(compute_node_ip,
first_controller_node_ip)
else:
LOG.critical("!!! - Fatal Error: Invalid IP address: {}"
.format(compute_node_ip))
exit(-1)
# Execute Controller node deletion
if args.controller_node_ip != '':
controller_node_ip = args.controller_node_ip.rstrip()
if check_ip_validity(controller_node_ip):
LOG.info("*** Removing a controller node {} to InstanceHA"
" configuration.".format(controller_node_ip))
LOG.debug("controller_node_ip: {}".format(controller_node_ip))
delete_controller_node_resources(controller_node_ip,
first_controller_node_ip)
else:
LOG.critical("!!! - Fatal Error: Invalid IP address: {}"
.format(controller_node_ip))
exit(-1)
if __name__ == "__main__":
main()
| 38.707407 | 79 | 0.604153 |
m_node -n")
crm_node_name = out.strip()
nova_compute_name = awk_it(crm_node_name, 1, ".")
LOG.info("Delete compute node resources {}."
.format(compute_node_ip))
ssh_cmd(compute_node_ip, "heat-admin",
"sudo systemctl stop pacemaker_remote")
ssh_cmd(compute_node_ip, "heat-admin",
"sudo systemctl disable pacemaker_remote")
ssh_cmd(first_controller_node_ip, "heat-admin",
"sudo pcs resource delete " + crm_node_name)
ssh_cmd(first_controller_node_ip, "heat-admin",
"sudo pcs stonith delete ipmilan-" + nova_compute_name)
LOG.info("Compute node resources {}."
.format(nova_compute_name))
ssh_cmd(first_controller_node_ip, "heat-admin",
"sudo pcs resource clean " + crm_node_name)
# Main Routine
def main():
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group()
group.add_argument("-compute",
"--compute",
dest="compute_node_ip",
action="store",
default='')
group.add_argument("-controller",
"--controller",
dest="controller_node_ip",
action="store",
default='')
parser.add_argument('-f',
'--file',
help='name of json file containing the node being set',
default=Constants.INSTACKENV_FILENAME)
parser.add_argument("-l",
"--logging-level",
default="INFO",
type=logging_level,
help="""logging level defined by the logging module;
choices include CRITICAL, ERROR, WARNING,
INFO, and DEBUG""", metavar="LEVEL")
args = parser.parse_args()
home_dir = os.path.expanduser('~')
undercloudrc_name = os.path.join(home_dir, 'stackrc')
oc_stack_name = CredentialHelper.get_overcloud_name()
ssh_config = os.path.join(home_dir, '.ssh/config')
undercloud_config = os.path.join(home_dir, 'undercloud_nodes.txt')
instack_file = os.path.expanduser(args.file)
# Run ~/pilot/identify_nodes.py > ~/undercloud_nodes.txt
cmd = os.path.join(home_dir,
'pilot/identify_nodes.py > ~/undercloud_nodes.txt')
os.system(cmd)
# Get CONTROLLER_NODES_IP
p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
p2 = subprocess.Popen(shlex.split('grep -A1 "cntl"'),
stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
stdin=p2.stdout,
stdout=subprocess.PIPE)
controller_nodes_ip = p3.communicate()[0].split()
# Get CONTROLLER_NODE_NAMES
p1 = subprocess.Popen(['nova', 'list'], stdout=subprocess.PIPE)
p2 = subprocess.Popen(shlex.split('awk \'/controller/ {print $4}\''),
stdin=p1.stdout,
stdout=subprocess.PIPE)
controller_node_names = p2.communicate()[0].split()
# Get COMPUTE_NODES_IP
p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
p2 = subprocess.Popen(shlex.split('egrep -A1 -h "nova|compute"'),
stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
stdin=p2.stdout,
stdout=subprocess.PIPE)
compute_nodes_ip = p3.communicate()[0].split()
# Get COMPUTE_NOVA_NAMES
p1 = subprocess.Popen(['nova', 'list'], stdout=subprocess.PIPE)
p2 = subprocess.Popen(shlex.split('awk \'/compute/ {print $4}\''),
stdin=p1.stdout,
stdout=subprocess.PIPE)
compute_nova_names = p2.communicate()[0].split()
# Get first_controller_node_ip
p1 = subprocess.Popen(['cat', ssh_config], stdout=subprocess.PIPE)
p2 = subprocess.Popen(shlex.split('grep -A1 "cntl0"'),
stdin=p1.stdout,
stdout=subprocess.PIPE)
p3 = subprocess.Popen(shlex.split('awk \'/Hostname/ {print $2}\''),
stdin=p2.stdout,
stdout=subprocess.PIPE)
first_controller_node_ip = p3.communicate()[0].rstrip()
oc_auth_url, oc_tenant_name, oc_username, oc_password = \
CredentialHelper.get_overcloud_creds()
LOG.setLevel(args.logging_level)
LOG.debug("home_dir: {}".format(home_dir))
LOG.debug("oc_stack_name: {}".format(oc_stack_name))
LOG.debug("oc_auth_url: {}".format(oc_auth_url))
LOG.debug("oc_username: {}".format(oc_username))
LOG.debug("oc_password: {}".format(oc_password))
LOG.debug("oc_tenant_name: {}".format(oc_tenant_name))
LOG.debug("controller_nodes_ip: {}".format(controller_nodes_ip))
LOG.debug("controller_nodes_names: {}".format(controller_nodes_ip))
LOG.debug("compute_nodes_ip: {}".format(compute_nodes_ip))
LOG.debug("compute_nova_names: {}".format(compute_nodes_ip))
# Execute Compute node deletion
if args.compute_node_ip != '':
compute_node_ip = args.compute_node_ip.rstrip()
if check_ip_validity(compute_node_ip):
LOG.info("*** Removing a compute node {} to InstanceHA"
" configuration.".format(compute_node_ip))
delete_compute_node_resources(compute_node_ip,
first_controller_node_ip)
else:
LOG.critical("!!! - Fatal Error: Invalid IP address: {}"
.format(compute_node_ip))
exit(-1)
# Execute Controller node deletion
if args.controller_node_ip != '':
controller_node_ip = args.controller_node_ip.rstrip()
if check_ip_validity(controller_node_ip):
LOG.info("*** Removing a controller node {} to InstanceHA"
" configuration.".format(controller_node_ip))
LOG.debug("controller_node_ip: {}".format(controller_node_ip))
delete_controller_node_resources(controller_node_ip,
first_controller_node_ip)
else:
LOG.critical("!!! - Fatal Error: Invalid IP address: {}"
.format(controller_node_ip))
exit(-1)
if __name__ == "__main__":
main()
| true | true |
f7f554e435082648fb1440f17ece05aabf7278ce | 1,282 | py | Python | tests/test_dict_compatibility.py | kdheepak/carsons | 6919cd5be416a58f14c1d5d933a52905a6d5f6a6 | [
"MIT"
] | null | null | null | tests/test_dict_compatibility.py | kdheepak/carsons | 6919cd5be416a58f14c1d5d933a52905a6d5f6a6 | [
"MIT"
] | null | null | null | tests/test_dict_compatibility.py | kdheepak/carsons | 6919cd5be416a58f14c1d5d933a52905a6d5f6a6 | [
"MIT"
] | null | null | null | from numpy.testing import assert_array_almost_equal
from .test_carsons import ABCN_line_z_primitive
from carsons.carsons import CarsonsEquations
def test_compatibility_with_dict_of_phases():
class BackwardsCompatibleModel():
def __init__(self):
self.resistance = {
"A": 0.000115575,
"B": 0.000115575,
"C": 0.000115575,
"N": 0.000367852,
}
self.geometric_mean_radius = {
"A": 0.00947938,
"B": 0.00947938,
"C": 0.00947938,
"N": 0.00248107,
}
self.wire_positions = {
"A": (0.762, 8.5344),
"B": (2.1336, 8.5344),
"C": (0, 8.5344),
"N": (1.2192, 7.3152),
}
self.phases = {
"A": "A",
"B": "B",
"C": "C",
"N": "N",
}
# we are compatible models that provide 'phases'
# as a dictionary
model = BackwardsCompatibleModel()
z_primative = CarsonsEquations(model).build_z_primitive()
assert_array_almost_equal(
z_primative,
ABCN_line_z_primitive(),
decimal=4
)
| 27.276596 | 61 | 0.473479 | from numpy.testing import assert_array_almost_equal
from .test_carsons import ABCN_line_z_primitive
from carsons.carsons import CarsonsEquations
def test_compatibility_with_dict_of_phases():
class BackwardsCompatibleModel():
def __init__(self):
self.resistance = {
"A": 0.000115575,
"B": 0.000115575,
"C": 0.000115575,
"N": 0.000367852,
}
self.geometric_mean_radius = {
"A": 0.00947938,
"B": 0.00947938,
"C": 0.00947938,
"N": 0.00248107,
}
self.wire_positions = {
"A": (0.762, 8.5344),
"B": (2.1336, 8.5344),
"C": (0, 8.5344),
"N": (1.2192, 7.3152),
}
self.phases = {
"A": "A",
"B": "B",
"C": "C",
"N": "N",
}
model = BackwardsCompatibleModel()
z_primative = CarsonsEquations(model).build_z_primitive()
assert_array_almost_equal(
z_primative,
ABCN_line_z_primitive(),
decimal=4
)
| true | true |
f7f5551dd0a2595ba739618bc36ced97cc0c05e3 | 10,439 | py | Python | TestMaster/libs/ddt.py | allblue2025/learn_git | 8ed884d0e0cb0dac14e26856be19a9a341bef0c0 | [
"MIT"
] | null | null | null | TestMaster/libs/ddt.py | allblue2025/learn_git | 8ed884d0e0cb0dac14e26856be19a9a341bef0c0 | [
"MIT"
] | null | null | null | TestMaster/libs/ddt.py | allblue2025/learn_git | 8ed884d0e0cb0dac14e26856be19a9a341bef0c0 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# This file is a part of DDT (https://github.com/txels/ddt)
# Copyright 2012-2015 Carles Barrobés and DDT contributors
# For the exact contribution history, see the git revision log.
# DDT is licensed under the MIT License, included in
# https://github.com/txels/ddt/blob/master/LICENSE.md
import inspect
import json
import os
import re
import codecs
from functools import wraps
try:
import yaml
except ImportError: # pragma: no cover
_have_yaml = False
else:
_have_yaml = True
__version__ = '1.2.1'
# These attributes will not conflict with any real python attribute
# They are added to the decorated test method and processed later
# by the `ddt` class decorator.
DATA_ATTR = '%values' # store the data the test must run with
FILE_ATTR = '%file_path' # store the path to JSON file
UNPACK_ATTR = '%unpack' # remember that we have to unpack values
index_len = 5 # default max length of case index
try:
trivial_types = (type(None), bool, int, float, basestring)
except NameError:
trivial_types = (type(None), bool, int, float, str)
def is_trivial(value):
if isinstance(value, trivial_types):
return True
elif isinstance(value, (list, tuple)):
return all(map(is_trivial, value))
return False
def unpack(func):
"""
Method decorator to add unpack feature.
"""
setattr(func, UNPACK_ATTR, True)
return func
def data(*values):
"""
Method decorator to add to your test methods.
Should be added to methods of instances of ``unittest.TestCase``.
"""
global index_len
index_len = len(str(len(values)))
return idata(values)
def idata(iterable):
"""
Method decorator to add to your test methods.
Should be added to methods of instances of ``unittest.TestCase``.
"""
def wrapper(func):
setattr(func, DATA_ATTR, iterable)
return func
return wrapper
def file_data(value):
"""
Method decorator to add to your test methods.
Should be added to methods of instances of ``unittest.TestCase``.
``value`` should be a path relative to the directory of the file
containing the decorated ``unittest.TestCase``. The file
should contain JSON encoded data, that can either be a list or a
dict.
In case of a list, each value in the list will correspond to one
test case, and the value will be concatenated to the test method
name.
In case of a dict, keys will be used as suffixes to the name of the
test case, and values will be fed as test data.
"""
def wrapper(func):
setattr(func, FILE_ATTR, value)
return func
return wrapper
def mk_test_name(name, value, index=0):
"""
Generate a new name for a test case.
It will take the original test name and append an ordinal index and a
string representation of the value, and convert the result into a valid
python identifier by replacing extraneous characters with ``_``.
We avoid doing str(value) if dealing with non-trivial values.
The problem is possible different names with different runs, e.g.
different order of dictionary keys (see PYTHONHASHSEED) or dealing
with mock objects.
Trivial scalar values are passed as is.
A "trivial" value is a plain scalar, or a tuple or list consisting
only of trivial values.
"""
# Add zeros before index to keep order
index = "{0:0{1}}".format(index + 1, index_len)
# if not is_trivial(value):
# return "{0}_{1}".format(name, index)
# 对字典数据的处理
if not is_trivial(value) and not isinstance(value, (dict, tuple)):
return "{0}_{1}".format(name, index)
# value是字典,获取字典中case_name对应的值,加到测试用例名中
if isinstance(value, dict):
try:
value = value['case_name']
except KeyError:
return "{0}_{1}".format(name, index)
# value是命名元组,获取元组中case_name对应的值,加到测试用例名中
if isinstance(value, tuple):
try:
value = value.title
except KeyError:
return "{0}_{1}".format(name, index)
try:
value = str(value)
except UnicodeEncodeError:
# fallback for python2
value = value.encode('ascii', 'backslashreplace')
test_name = "{0}_{1}_{2}".format(name, index, value)
return re.sub(r'\W|^(?=\d)', '_', test_name)
def feed_data(func, new_name, test_data_docstring, *args, **kwargs):
"""
This internal method decorator feeds the test data item to the test.
"""
@wraps(func)
def wrapper(self):
return func(self, *args, **kwargs)
wrapper.__name__ = new_name
wrapper.__wrapped__ = func
# set docstring if exists
if test_data_docstring is not None:
wrapper.__doc__ = test_data_docstring
else:
# Try to call format on the docstring
if func.__doc__:
try:
wrapper.__doc__ = func.__doc__.format(*args, **kwargs)
except (IndexError, KeyError):
# Maybe the user has added some of the formating strings
# unintentionally in the docstring. Do not raise an exception
# as it could be that user is not aware of the
# formating feature.
pass
return wrapper
def add_test(cls, test_name, test_docstring, func, *args, **kwargs):
"""
Add a test case to this class.
The test will be based on an existing function but will give it a new
name.
"""
setattr(cls, test_name, feed_data(func, test_name, test_docstring,
*args, **kwargs))
def process_file_data(cls, name, func, file_attr):
"""
Process the parameter in the `file_data` decorator.
"""
cls_path = os.path.abspath(inspect.getsourcefile(cls))
data_file_path = os.path.join(os.path.dirname(cls_path), file_attr)
def create_error_func(message): # pylint: disable-msg=W0613
def func(*args):
raise ValueError(message % file_attr)
return func
# If file does not exist, provide an error function instead
if not os.path.exists(data_file_path):
test_name = mk_test_name(name, "error")
test_docstring = """Error!"""
add_test(cls, test_name, test_docstring,
create_error_func("%s does not exist"), None)
return
_is_yaml_file = data_file_path.endswith((".yml", ".yaml"))
# Don't have YAML but want to use YAML file.
if _is_yaml_file and not _have_yaml:
test_name = mk_test_name(name, "error")
test_docstring = """Error!"""
add_test(
cls,
test_name,
test_docstring,
create_error_func("%s is a YAML file, please install PyYAML"),
None
)
return
with codecs.open(data_file_path, 'r', 'utf-8') as f:
# Load the data from YAML or JSON
if _is_yaml_file:
data = yaml.safe_load(f)
else:
data = json.load(f)
_add_tests_from_data(cls, name, func, data)
def _add_tests_from_data(cls, name, func, data):
"""
Add tests from data loaded from the data file into the class
"""
for i, elem in enumerate(data):
if isinstance(data, dict):
key, value = elem, data[elem]
test_name = mk_test_name(name, key, i)
elif isinstance(data, list):
value = elem
test_name = mk_test_name(name, value, i)
if isinstance(value, dict):
add_test(cls, test_name, test_name, func, **value)
else:
add_test(cls, test_name, test_name, func, value)
def _is_primitive(obj):
"""Finds out if the obj is a "primitive". It is somewhat hacky but it works.
"""
return not hasattr(obj, '__dict__')
def _get_test_data_docstring(func, value):
"""Returns a docstring based on the following resolution strategy:
1. Passed value is not a "primitive" and has a docstring, then use it.
2. In all other cases return None, i.e the test name is used.
"""
if not _is_primitive(value) and value.__doc__:
return value.__doc__
else:
return None
def ddt(cls):
"""
Class decorator for subclasses of ``unittest.TestCase``.
Apply this decorator to the test case class, and then
decorate test methods with ``@data``.
For each method decorated with ``@data``, this will effectively create as
many methods as data items are passed as parameters to ``@data``.
The names of the test methods follow the pattern
``original_test_name_{ordinal}_{data}``. ``ordinal`` is the position of the
data argument, starting with 1.
For data we use a string representation of the data value converted into a
valid python identifier. If ``data.__name__`` exists, we use that instead.
For each method decorated with ``@file_data('test_data.json')``, the
decorator will try to load the test_data.json file located relative
to the python file containing the method that is decorated. It will,
for each ``test_name`` key create as many methods in the list of values
from the ``data`` key.
"""
for name, func in list(cls.__dict__.items()):
if hasattr(func, DATA_ATTR):
for i, v in enumerate(getattr(func, DATA_ATTR)):
test_name = mk_test_name(name, getattr(v, "__name__", v), i)
test_data_docstring = _get_test_data_docstring(func, v)
if hasattr(func, UNPACK_ATTR):
if isinstance(v, tuple) or isinstance(v, list):
add_test(
cls,
test_name,
test_data_docstring,
func,
*v
)
else:
# unpack dictionary
add_test(
cls,
test_name,
test_data_docstring,
func,
**v
)
else:
add_test(cls, test_name, test_data_docstring, func, v)
delattr(cls, name)
elif hasattr(func, FILE_ATTR):
file_attr = getattr(func, FILE_ATTR)
process_file_data(cls, name, func, file_attr)
delattr(cls, name)
return cls
| 31.633333 | 80 | 0.618929 |
import inspect
import json
import os
import re
import codecs
from functools import wraps
try:
import yaml
except ImportError:
_have_yaml = False
else:
_have_yaml = True
__version__ = '1.2.1'
DATA_ATTR = '%values'
FILE_ATTR = '%file_path'
UNPACK_ATTR = '%unpack'
index_len = 5
try:
trivial_types = (type(None), bool, int, float, basestring)
except NameError:
trivial_types = (type(None), bool, int, float, str)
def is_trivial(value):
if isinstance(value, trivial_types):
return True
elif isinstance(value, (list, tuple)):
return all(map(is_trivial, value))
return False
def unpack(func):
setattr(func, UNPACK_ATTR, True)
return func
def data(*values):
global index_len
index_len = len(str(len(values)))
return idata(values)
def idata(iterable):
def wrapper(func):
setattr(func, DATA_ATTR, iterable)
return func
return wrapper
def file_data(value):
def wrapper(func):
setattr(func, FILE_ATTR, value)
return func
return wrapper
def mk_test_name(name, value, index=0):
index = "{0:0{1}}".format(index + 1, index_len)
if not is_trivial(value) and not isinstance(value, (dict, tuple)):
return "{0}_{1}".format(name, index)
if isinstance(value, dict):
try:
value = value['case_name']
except KeyError:
return "{0}_{1}".format(name, index)
if isinstance(value, tuple):
try:
value = value.title
except KeyError:
return "{0}_{1}".format(name, index)
try:
value = str(value)
except UnicodeEncodeError:
value = value.encode('ascii', 'backslashreplace')
test_name = "{0}_{1}_{2}".format(name, index, value)
return re.sub(r'\W|^(?=\d)', '_', test_name)
def feed_data(func, new_name, test_data_docstring, *args, **kwargs):
@wraps(func)
def wrapper(self):
return func(self, *args, **kwargs)
wrapper.__name__ = new_name
wrapper.__wrapped__ = func
if test_data_docstring is not None:
wrapper.__doc__ = test_data_docstring
else:
if func.__doc__:
try:
wrapper.__doc__ = func.__doc__.format(*args, **kwargs)
except (IndexError, KeyError):
pass
return wrapper
def add_test(cls, test_name, test_docstring, func, *args, **kwargs):
setattr(cls, test_name, feed_data(func, test_name, test_docstring,
*args, **kwargs))
def process_file_data(cls, name, func, file_attr):
cls_path = os.path.abspath(inspect.getsourcefile(cls))
data_file_path = os.path.join(os.path.dirname(cls_path), file_attr)
def create_error_func(message):
def func(*args):
raise ValueError(message % file_attr)
return func
if not os.path.exists(data_file_path):
test_name = mk_test_name(name, "error")
test_docstring = """Error!"""
add_test(cls, test_name, test_docstring,
create_error_func("%s does not exist"), None)
return
_is_yaml_file = data_file_path.endswith((".yml", ".yaml"))
if _is_yaml_file and not _have_yaml:
test_name = mk_test_name(name, "error")
test_docstring = """Error!"""
add_test(
cls,
test_name,
test_docstring,
create_error_func("%s is a YAML file, please install PyYAML"),
None
)
return
with codecs.open(data_file_path, 'r', 'utf-8') as f:
# Load the data from YAML or JSON
if _is_yaml_file:
data = yaml.safe_load(f)
else:
data = json.load(f)
_add_tests_from_data(cls, name, func, data)
def _add_tests_from_data(cls, name, func, data):
for i, elem in enumerate(data):
if isinstance(data, dict):
key, value = elem, data[elem]
test_name = mk_test_name(name, key, i)
elif isinstance(data, list):
value = elem
test_name = mk_test_name(name, value, i)
if isinstance(value, dict):
add_test(cls, test_name, test_name, func, **value)
else:
add_test(cls, test_name, test_name, func, value)
def _is_primitive(obj):
return not hasattr(obj, '__dict__')
def _get_test_data_docstring(func, value):
if not _is_primitive(value) and value.__doc__:
return value.__doc__
else:
return None
def ddt(cls):
for name, func in list(cls.__dict__.items()):
if hasattr(func, DATA_ATTR):
for i, v in enumerate(getattr(func, DATA_ATTR)):
test_name = mk_test_name(name, getattr(v, "__name__", v), i)
test_data_docstring = _get_test_data_docstring(func, v)
if hasattr(func, UNPACK_ATTR):
if isinstance(v, tuple) or isinstance(v, list):
add_test(
cls,
test_name,
test_data_docstring,
func,
*v
)
else:
# unpack dictionary
add_test(
cls,
test_name,
test_data_docstring,
func,
**v
)
else:
add_test(cls, test_name, test_data_docstring, func, v)
delattr(cls, name)
elif hasattr(func, FILE_ATTR):
file_attr = getattr(func, FILE_ATTR)
process_file_data(cls, name, func, file_attr)
delattr(cls, name)
return cls
| true | true |
f7f5560ed27c2b1ebb59b52d01c5db3a89eb6169 | 7,613 | py | Python | clarisse/var/lib/windows/export_alembic.py | GuillaumeVFX/pipel | a1bd726239e6887745396723c3aad5d61e88ce44 | [
"MIT"
] | 2 | 2020-05-12T11:38:44.000Z | 2022-03-07T04:13:50.000Z | clarisse/var/lib/windows/export_alembic.py | GuillaumeVFX/pipel | a1bd726239e6887745396723c3aad5d61e88ce44 | [
"MIT"
] | null | null | null | clarisse/var/lib/windows/export_alembic.py | GuillaumeVFX/pipel | a1bd726239e6887745396723c3aad5d61e88ce44 | [
"MIT"
] | null | null | null | # Copyright (C) 2009 - 2019 Isotropix SAS. All rights reserved.
#
# The information in this file is provided for the exclusive use of
# the software licensees of Isotropix. Contents of this file may not
# be distributed, copied or duplicated in any form, in whole or in
# part, without the prior written permission of Isotropix SAS.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# Define AbcExportOptionsUi attributes.
#
abc_export_options_ui_cid = '''
attribute_group "output" {
filename_save "filename" {
extension "abc"
}
}
attribute_group "animation" {
long[2] "frame_range" {
value 0 0
doc "Specify the start/end frames to export."
}
bool "write_one_frame_per_file" {
value no
doc "When checked, the export process will create one file per frame."
}
bool "transfer_source_data" {
value no
doc "When checked, Alembic objects will be exported by transferring the original data from the source file to the new one. Disabling this option will re-bake objects using Clarisse's frame rate, and custom properties will be lost."
}
}
attribute_group "combiners" {
bool "export_combiners" {
value yes
doc "When checked, combiners will be exported as pivots with their sub-objects as direct children. Sub-combiners are exported recursively."
}
}
attribute_group "scatterers" {
bool "export_scatterers" {
value yes
doc "When checked, scatterers will be exported as a hierarchy of instances, otherwise scatterers and their instances are not exported."
}
long "scatterer_export_mode" {
value 0
preset "Instances As Geometries" "0"
preset "Instances As Bounding Boxes" "1"
doc "When using the Geometries mode, scattered instances are exported as is. When using the Bounding Boxes mode, instances are replaced by their bounding box."
}
}
attribute_group "properties" {
bool "export_properties" {
value yes
doc "When checked, Particle Container properties will be exported. The exported properties are created under the standard Alembic property group .arbGeomParams."
}
long "compatibility_mode" {
value 0
preset "None" "0"
preset "Houdini" "1"
preset "Houdini and Katana" "2"
doc "Select a preset for the properties options Fill Sparse Properties, Promote To Geometry Parameter and Bake Indexed Properties. Options are editable when using the mode None, otherwise they are read-only with pre-defined values."
}
bool "fill_sparse_properties" {
value no
doc "When checked, sparse properties will be filled with a default value (zero). This option must be enabled to be compatible with applications that don't support sparse properties (e.g. Houdini)."
}
bool "promote_to_geometry_parameter" {
value yes
doc "When checked, all properties whose traits (type, size, ...) match Alembic's Typed Geometry Parameters will be exported as Geometry Parameters, and properties that don't match such traits are exported as regular properties. When unchecked, all properties are exported as regular properties. It is recommended to enable this option unless the target application doesn't support Geometry Parameters (e.g. Katana)."
}
bool "bake_indexed_properties" {
value no
doc "When checked, all indexed properties except Geometry Parameters will be baked as non-sparse array properties: values are unshared and will use more memory and disk space. Enabling this option can improve compatibility with other applications. When unchecked, indexed properties will be exported unchanged (a compound property with 2 children properties: an array of indices and an array of indexed values)."
}
}
'''
#
# Scripted class engine for AbcExportOptionsUi.
#
class AbcExportOptionsUiEngine(ix.api.ModuleScriptedClassEngine):
#
# Initialize the engine.
#
def __init__(self):
ix.api.ModuleScriptedClassEngine.__init__(self)
#
# Handle attribute change.
#
def on_attribute_change(self, object, attr, dirtiness, dirtiness_flags):
if attr.get_name() == "write_one_frame_per_file":
object.get_attribute("transfer_source_data").set_read_only(attr.get_bool() == True)
return
if attr.get_name() == "export_scatterers":
object.get_attribute("scatterer_export_mode").set_read_only(attr.get_bool() == False)
return
if attr.get_name() == "export_properties":
is_read_only = attr.get_bool() == False
object.get_attribute("compatibility_mode").set_read_only(is_read_only)
object.get_attribute("fill_sparse_properties").set_read_only(is_read_only)
object.get_attribute("promote_to_geometry_parameter").set_read_only(is_read_only)
object.get_attribute("bake_indexed_properties").set_read_only(is_read_only)
return
if attr.get_name() == "compatibility_mode":
attr_fill = object.get_attribute("fill_sparse_properties")
attr_promote = object.get_attribute("promote_to_geometry_parameter")
attr_bake = object.get_attribute("bake_indexed_properties")
mode = attr.get_long()
attr_fill.set_bool(ix.api.AbcExportOptions.get_fill_sparse_properties(mode))
attr_promote.set_bool(ix.api.AbcExportOptions.get_promote_to_geometry_parameter(mode))
attr_bake.set_bool(ix.api.AbcExportOptions.get_bake_indexed_properties(mode))
is_read_only = mode != ix.api.AbcExportOptions.PropertiesCompatibilityMode_Default
attr_fill.set_read_only(is_read_only)
attr_promote.set_read_only(is_read_only)
attr_bake.set_read_only(is_read_only)
return
#
# Register the scripted class and engine.
#
scripted_class_name = 'AbcExportOptionsUi'
if not ix.api.ModuleScriptedClass.register_scripted_class(ix.application, scripted_class_name, AbcExportOptionsUiEngine(), abc_export_options_ui_cid, True):
ix.log_error('Failed to register class {}: the CID might be invalid or the class might already exist.'.format(scripted_class_name))
#
# Check that class registration is complete.
#
scripted_class = ix.application.get_factory().get_classes().get(scripted_class_name)
if scripted_class and scripted_class.get_attribute_count() > 0:
ix.log_info('Successfully registered class {}.'.format(scripted_class_name))
else:
ix.log_error('Incomplete registration of class {}, Alembic export is disabled.'.format(scripted_class_name))
| 46.139394 | 428 | 0.704059 |
abc_export_options_ui_cid = '''
attribute_group "output" {
filename_save "filename" {
extension "abc"
}
}
attribute_group "animation" {
long[2] "frame_range" {
value 0 0
doc "Specify the start/end frames to export."
}
bool "write_one_frame_per_file" {
value no
doc "When checked, the export process will create one file per frame."
}
bool "transfer_source_data" {
value no
doc "When checked, Alembic objects will be exported by transferring the original data from the source file to the new one. Disabling this option will re-bake objects using Clarisse's frame rate, and custom properties will be lost."
}
}
attribute_group "combiners" {
bool "export_combiners" {
value yes
doc "When checked, combiners will be exported as pivots with their sub-objects as direct children. Sub-combiners are exported recursively."
}
}
attribute_group "scatterers" {
bool "export_scatterers" {
value yes
doc "When checked, scatterers will be exported as a hierarchy of instances, otherwise scatterers and their instances are not exported."
}
long "scatterer_export_mode" {
value 0
preset "Instances As Geometries" "0"
preset "Instances As Bounding Boxes" "1"
doc "When using the Geometries mode, scattered instances are exported as is. When using the Bounding Boxes mode, instances are replaced by their bounding box."
}
}
attribute_group "properties" {
bool "export_properties" {
value yes
doc "When checked, Particle Container properties will be exported. The exported properties are created under the standard Alembic property group .arbGeomParams."
}
long "compatibility_mode" {
value 0
preset "None" "0"
preset "Houdini" "1"
preset "Houdini and Katana" "2"
doc "Select a preset for the properties options Fill Sparse Properties, Promote To Geometry Parameter and Bake Indexed Properties. Options are editable when using the mode None, otherwise they are read-only with pre-defined values."
}
bool "fill_sparse_properties" {
value no
doc "When checked, sparse properties will be filled with a default value (zero). This option must be enabled to be compatible with applications that don't support sparse properties (e.g. Houdini)."
}
bool "promote_to_geometry_parameter" {
value yes
doc "When checked, all properties whose traits (type, size, ...) match Alembic's Typed Geometry Parameters will be exported as Geometry Parameters, and properties that don't match such traits are exported as regular properties. When unchecked, all properties are exported as regular properties. It is recommended to enable this option unless the target application doesn't support Geometry Parameters (e.g. Katana)."
}
bool "bake_indexed_properties" {
value no
doc "When checked, all indexed properties except Geometry Parameters will be baked as non-sparse array properties: values are unshared and will use more memory and disk space. Enabling this option can improve compatibility with other applications. When unchecked, indexed properties will be exported unchanged (a compound property with 2 children properties: an array of indices and an array of indexed values)."
}
}
'''
#
# Scripted class engine for AbcExportOptionsUi.
#
class AbcExportOptionsUiEngine(ix.api.ModuleScriptedClassEngine):
#
# Initialize the engine.
#
def __init__(self):
ix.api.ModuleScriptedClassEngine.__init__(self)
#
# Handle attribute change.
#
def on_attribute_change(self, object, attr, dirtiness, dirtiness_flags):
if attr.get_name() == "write_one_frame_per_file":
object.get_attribute("transfer_source_data").set_read_only(attr.get_bool() == True)
return
if attr.get_name() == "export_scatterers":
object.get_attribute("scatterer_export_mode").set_read_only(attr.get_bool() == False)
return
if attr.get_name() == "export_properties":
is_read_only = attr.get_bool() == False
object.get_attribute("compatibility_mode").set_read_only(is_read_only)
object.get_attribute("fill_sparse_properties").set_read_only(is_read_only)
object.get_attribute("promote_to_geometry_parameter").set_read_only(is_read_only)
object.get_attribute("bake_indexed_properties").set_read_only(is_read_only)
return
if attr.get_name() == "compatibility_mode":
attr_fill = object.get_attribute("fill_sparse_properties")
attr_promote = object.get_attribute("promote_to_geometry_parameter")
attr_bake = object.get_attribute("bake_indexed_properties")
mode = attr.get_long()
attr_fill.set_bool(ix.api.AbcExportOptions.get_fill_sparse_properties(mode))
attr_promote.set_bool(ix.api.AbcExportOptions.get_promote_to_geometry_parameter(mode))
attr_bake.set_bool(ix.api.AbcExportOptions.get_bake_indexed_properties(mode))
is_read_only = mode != ix.api.AbcExportOptions.PropertiesCompatibilityMode_Default
attr_fill.set_read_only(is_read_only)
attr_promote.set_read_only(is_read_only)
attr_bake.set_read_only(is_read_only)
return
#
# Register the scripted class and engine.
#
scripted_class_name = 'AbcExportOptionsUi'
if not ix.api.ModuleScriptedClass.register_scripted_class(ix.application, scripted_class_name, AbcExportOptionsUiEngine(), abc_export_options_ui_cid, True):
ix.log_error('Failed to register class {}: the CID might be invalid or the class might already exist.'.format(scripted_class_name))
#
# Check that class registration is complete.
#
scripted_class = ix.application.get_factory().get_classes().get(scripted_class_name)
if scripted_class and scripted_class.get_attribute_count() > 0:
ix.log_info('Successfully registered class {}.'.format(scripted_class_name))
else:
ix.log_error('Incomplete registration of class {}, Alembic export is disabled.'.format(scripted_class_name))
| true | true |
f7f5561f7f7ad3d26e016f026638abb1a6d1c89d | 3,092 | py | Python | tests/test_rfc7914.py | pysnmp/pyasn1-modules | 93f5699988fbb090be13aaa339498c128ba7dedb | [
"BSD-2-Clause"
] | null | null | null | tests/test_rfc7914.py | pysnmp/pyasn1-modules | 93f5699988fbb090be13aaa339498c128ba7dedb | [
"BSD-2-Clause"
] | null | null | null | tests/test_rfc7914.py | pysnmp/pyasn1-modules | 93f5699988fbb090be13aaa339498c128ba7dedb | [
"BSD-2-Clause"
] | null | null | null | #
# This file is part of pyasn1-modules software.
#
# Created by Russ Housley
# Copyright (c) 2019, Vigil Security, LLC
# License: http://snmplabs.com/pyasn1/license.html
#
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1_modules import pem, rfc5280, rfc5958, rfc7914, rfc8018
# From RFC 7914, Section 13
class MultiprimeRSAPrivateKeyTestCase(unittest.TestCase):
pem_text = """\
MIHiME0GCSqGSIb3DQEFDTBAMB8GCSsGAQQB2kcECzASBAVNb3VzZQIDEAAAAgEI
AgEBMB0GCWCGSAFlAwQBKgQQyYmguHMsOwzGMPoyObk/JgSBkJb47EWd5iAqJlyy
+ni5ftd6gZgOPaLQClL7mEZc2KQay0VhjZm/7MbBUNbqOAXNM6OGebXxVp6sHUAL
iBGY/Dls7B1TsWeGObE0sS1MXEpuREuloZjcsNVcNXWPlLdZtkSH6uwWzR0PyG/Z
+ZXfNodZtd/voKlvLOw5B3opGIFaLkbtLZQwMiGtl42AS89lZg==
"""
def setUp(self):
self.asn1Spec = rfc5958.EncryptedPrivateKeyInfo()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
ea = asn1Object["encryptionAlgorithm"]
self.assertEqual(rfc8018.id_PBES2, ea["algorithm"])
self.assertIn(ea["algorithm"], rfc5280.algorithmIdentifierMap)
params, rest = der_decoder(
ea["parameters"], asn1Spec=rfc5280.algorithmIdentifierMap[ea["algorithm"]]
)
self.assertFalse(rest)
self.assertTrue(params.prettyPrint())
self.assertEqual(ea["parameters"], der_encoder(params))
kdf = params["keyDerivationFunc"]
self.assertEqual(rfc7914.id_scrypt, kdf["algorithm"])
self.assertIn(kdf["algorithm"], rfc5280.algorithmIdentifierMap)
kdfp, rest = der_decoder(
kdf["parameters"], asn1Spec=rfc5280.algorithmIdentifierMap[kdf["algorithm"]]
)
self.assertFalse(rest)
self.assertTrue(kdfp.prettyPrint())
self.assertTrue(kdf["parameters"], der_encoder(kdfp))
self.assertEqual(1048576, kdfp["costParameter"])
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True
)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
ea = asn1Object["encryptionAlgorithm"]
self.assertEqual(rfc8018.id_PBES2, ea["algorithm"])
params = asn1Object["encryptionAlgorithm"]["parameters"]
self.assertEqual(rfc7914.id_scrypt, params["keyDerivationFunc"]["algorithm"])
kdfp = params["keyDerivationFunc"]["parameters"]
self.assertEqual(1048576, kdfp["costParameter"])
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
| 32.893617 | 88 | 0.719599 |
import sys
import unittest
from pyasn1.codec.der.decoder import decode as der_decoder
from pyasn1.codec.der.encoder import encode as der_encoder
from pyasn1_modules import pem, rfc5280, rfc5958, rfc7914, rfc8018
class MultiprimeRSAPrivateKeyTestCase(unittest.TestCase):
pem_text = """\
MIHiME0GCSqGSIb3DQEFDTBAMB8GCSsGAQQB2kcECzASBAVNb3VzZQIDEAAAAgEI
AgEBMB0GCWCGSAFlAwQBKgQQyYmguHMsOwzGMPoyObk/JgSBkJb47EWd5iAqJlyy
+ni5ftd6gZgOPaLQClL7mEZc2KQay0VhjZm/7MbBUNbqOAXNM6OGebXxVp6sHUAL
iBGY/Dls7B1TsWeGObE0sS1MXEpuREuloZjcsNVcNXWPlLdZtkSH6uwWzR0PyG/Z
+ZXfNodZtd/voKlvLOw5B3opGIFaLkbtLZQwMiGtl42AS89lZg==
"""
def setUp(self):
self.asn1Spec = rfc5958.EncryptedPrivateKeyInfo()
def testDerCodec(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
ea = asn1Object["encryptionAlgorithm"]
self.assertEqual(rfc8018.id_PBES2, ea["algorithm"])
self.assertIn(ea["algorithm"], rfc5280.algorithmIdentifierMap)
params, rest = der_decoder(
ea["parameters"], asn1Spec=rfc5280.algorithmIdentifierMap[ea["algorithm"]]
)
self.assertFalse(rest)
self.assertTrue(params.prettyPrint())
self.assertEqual(ea["parameters"], der_encoder(params))
kdf = params["keyDerivationFunc"]
self.assertEqual(rfc7914.id_scrypt, kdf["algorithm"])
self.assertIn(kdf["algorithm"], rfc5280.algorithmIdentifierMap)
kdfp, rest = der_decoder(
kdf["parameters"], asn1Spec=rfc5280.algorithmIdentifierMap[kdf["algorithm"]]
)
self.assertFalse(rest)
self.assertTrue(kdfp.prettyPrint())
self.assertTrue(kdf["parameters"], der_encoder(kdfp))
self.assertEqual(1048576, kdfp["costParameter"])
def testOpenTypes(self):
substrate = pem.readBase64fromText(self.pem_text)
asn1Object, rest = der_decoder(
substrate, asn1Spec=self.asn1Spec, decodeOpenTypes=True
)
self.assertFalse(rest)
self.assertTrue(asn1Object.prettyPrint())
self.assertEqual(substrate, der_encoder(asn1Object))
ea = asn1Object["encryptionAlgorithm"]
self.assertEqual(rfc8018.id_PBES2, ea["algorithm"])
params = asn1Object["encryptionAlgorithm"]["parameters"]
self.assertEqual(rfc7914.id_scrypt, params["keyDerivationFunc"]["algorithm"])
kdfp = params["keyDerivationFunc"]["parameters"]
self.assertEqual(1048576, kdfp["costParameter"])
suite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(suite)
sys.exit(not result.wasSuccessful())
| true | true |
f7f557f2e715aee0f29b40a8196c67caff51cb68 | 1,573 | py | Python | bookbar/orders/migrations/0001_initial.py | trenev/bookbar | bccfdf52293f7cf105d6768bb2a1a643c9a58bb7 | [
"MIT"
] | null | null | null | bookbar/orders/migrations/0001_initial.py | trenev/bookbar | bccfdf52293f7cf105d6768bb2a1a643c9a58bb7 | [
"MIT"
] | null | null | null | bookbar/orders/migrations/0001_initial.py | trenev/bookbar | bccfdf52293f7cf105d6768bb2a1a643c9a58bb7 | [
"MIT"
] | null | null | null | # Generated by Django 4.0.3 on 2022-03-23 21:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('books', '0012_rename_description_book_annotation'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='OrderBook',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(blank=True, default=1, null=True)),
('ordered', models.BooleanField(default=False)),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='books.book')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_date', models.DateTimeField(auto_now_add=True)),
('ordered', models.BooleanField(default=False)),
('books', models.ManyToManyField(to='orders.orderbook')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 40.333333 | 122 | 0.626192 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('books', '0012_rename_description_book_annotation'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='OrderBook',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(blank=True, default=1, null=True)),
('ordered', models.BooleanField(default=False)),
('book', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='books.book')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_date', models.DateTimeField(auto_now_add=True)),
('ordered', models.BooleanField(default=False)),
('books', models.ManyToManyField(to='orders.orderbook')),
('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f7f558fab50ed3fac7b3810faa3194d0589dc83c | 755 | py | Python | algorithms/FASTdetector.py | chandniagarwal/image_analyser | 2732df649646ada0a209eabae113ab9488d166a3 | [
"MIT"
] | null | null | null | algorithms/FASTdetector.py | chandniagarwal/image_analyser | 2732df649646ada0a209eabae113ab9488d166a3 | [
"MIT"
] | null | null | null | algorithms/FASTdetector.py | chandniagarwal/image_analyser | 2732df649646ada0a209eabae113ab9488d166a3 | [
"MIT"
] | null | null | null | import cv2 as cv
def fast_true(img):
# Initiate FAST object with default values
fast = cv.FastFeatureDetector_create()
return fast_analyser(img, fast)
def fast_false(img):
fast = cv.FastFeatureDetector_create()
# Disable nonmaxSuppression
fast.setNonmaxSuppression(0)
return fast_analyser(img, fast)
def fast_analyser(img, fast):
kp = fast.detect(img, None)
img2 = cv.drawKeypoints(img, kp, None, color=(255, 0, 0))
# Print all default params
print("Threshold: {}".format(fast.getThreshold()))
print("nonmaxSuppression:{}".format(fast.getNonmaxSuppression()))
print("neighborhood: {}".format(fast.getType()))
print("Total Keypoints with nonmaxSuppression: {}".format(len(kp)))
return img2
| 29.038462 | 71 | 0.699338 | import cv2 as cv
def fast_true(img):
fast = cv.FastFeatureDetector_create()
return fast_analyser(img, fast)
def fast_false(img):
fast = cv.FastFeatureDetector_create()
fast.setNonmaxSuppression(0)
return fast_analyser(img, fast)
def fast_analyser(img, fast):
kp = fast.detect(img, None)
img2 = cv.drawKeypoints(img, kp, None, color=(255, 0, 0))
print("Threshold: {}".format(fast.getThreshold()))
print("nonmaxSuppression:{}".format(fast.getNonmaxSuppression()))
print("neighborhood: {}".format(fast.getType()))
print("Total Keypoints with nonmaxSuppression: {}".format(len(kp)))
return img2
| true | true |
f7f5599e1e33310bf0cda3f5e34f3cfa498ed92b | 55,514 | py | Python | src/olympia/addons/tests/test_serializers.py | NeilRashbrook/addons-server | ba8842473b0fbda0ad97eeed690c6704462d97af | [
"BSD-3-Clause"
] | null | null | null | src/olympia/addons/tests/test_serializers.py | NeilRashbrook/addons-server | ba8842473b0fbda0ad97eeed690c6704462d97af | [
"BSD-3-Clause"
] | null | null | null | src/olympia/addons/tests/test_serializers.py | NeilRashbrook/addons-server | ba8842473b0fbda0ad97eeed690c6704462d97af | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from django.utils.translation import override
from rest_framework.test import APIRequestFactory
from olympia import amo
from olympia.accounts.tests.test_serializers import TestBaseUserSerializer
from olympia.addons.models import (
Addon, AddonCategory, AddonUser, Category, CompatOverride,
CompatOverrideRange, Persona, Preview, ReplacementAddon)
from olympia.addons.serializers import (
AddonDeveloperSerializer, AddonSerializer, AddonSerializerWithUnlistedData,
CompatOverrideSerializer, ESAddonAutoCompleteSerializer, ESAddonSerializer,
ESAddonSerializerWithUnlistedData, LanguageToolsSerializer,
LicenseSerializer, ReplacementAddonSerializer, SimpleVersionSerializer,
VersionSerializer)
from olympia.addons.utils import generate_addon_guid
from olympia.addons.views import AddonAutoCompleteSearchView, AddonSearchView
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import (
ESTestCase, TestCase, addon_factory, collection_factory, file_factory,
user_factory, version_factory)
from olympia.amo.urlresolvers import get_outgoing_url, reverse
from olympia.bandwagon.models import FeaturedCollection
from olympia.constants.categories import CATEGORIES
from olympia.constants.licenses import LICENSES_BY_BUILTIN
from olympia.files.models import WebextPermission
from olympia.versions.models import (
ApplicationsVersions, AppVersion, License, VersionPreview)
class AddonSerializerOutputTestMixin(object):
"""Mixin containing tests to execute on both the regular and the ES Addon
serializer."""
def setUp(self):
super(AddonSerializerOutputTestMixin, self).setUp()
self.request = APIRequestFactory().get('/')
def _test_author(self, author, data):
assert data == {
'id': author.pk,
'name': author.name,
'picture_url': None,
'url': absolutify(author.get_url_path()),
'username': author.username,
}
def _test_version_license_and_release_notes(self, version, data):
assert data['release_notes'] == {
'en-US': u'Release notes in english',
'fr': u'Notes de version en français',
}
assert data['license']
assert dict(data['license']) == {
'id': version.license.pk,
'name': {'en-US': u'My License', 'fr': u'Mä Licence'},
# License text is not present in version serializer used from
# AddonSerializer.
'url': 'http://license.example.com/',
}
def _test_version(self, version, data):
assert data['id'] == version.pk
assert data['compatibility']
assert len(data['compatibility']) == len(version.compatible_apps)
for app, compat in version.compatible_apps.items():
assert data['compatibility'][app.short] == {
'min': compat.min.version,
'max': compat.max.version
}
assert data['is_strict_compatibility_enabled'] is False
assert data['files']
assert len(data['files']) == 1
result_file = data['files'][0]
file_ = version.files.latest('pk')
assert result_file['id'] == file_.pk
assert result_file['created'] == (
file_.created.replace(microsecond=0).isoformat() + 'Z')
assert result_file['hash'] == file_.hash
assert result_file['is_restart_required'] == file_.is_restart_required
assert result_file['is_webextension'] == file_.is_webextension
assert (
result_file['is_mozilla_signed_extension'] ==
file_.is_mozilla_signed_extension)
assert result_file['platform'] == (
amo.PLATFORM_CHOICES_API[file_.platform])
assert result_file['size'] == file_.size
assert result_file['status'] == amo.STATUS_CHOICES_API[file_.status]
assert result_file['url'] == file_.get_url_path(src='')
assert result_file['permissions'] == file_.webext_permissions_list
assert data['edit_url'] == absolutify(
self.addon.get_dev_url(
'versions.edit', args=[version.pk], prefix_only=True))
assert data['reviewed'] == version.reviewed
assert data['version'] == version.version
assert data['url'] == absolutify(version.get_url_path())
def test_basic(self):
cat1 = Category.from_static_category(
CATEGORIES[amo.FIREFOX.id][amo.ADDON_EXTENSION]['bookmarks'])
cat1.save()
license = License.objects.create(
name={
'en-US': u'My License',
'fr': u'Mä Licence',
},
text={
'en-US': u'Lorem ipsum dolor sit amet, has nemore patrioqué',
},
url='http://license.example.com/'
)
self.addon = addon_factory(
average_daily_users=4242,
average_rating=4.21,
bayesian_rating=4.22,
category=cat1,
contributions=u'https://paypal.me/foobar/',
description=u'My Addôn description',
developer_comments=u'Dévelopers Addôn comments',
file_kw={
'hash': 'fakehash',
'is_restart_required': False,
'is_webextension': True,
'platform': amo.PLATFORM_WIN.id,
'size': 42,
},
guid=generate_addon_guid(),
homepage=u'https://www.example.org/',
icon_hash='fakehash',
icon_type='image/png',
name=u'My Addôn',
public_stats=True,
slug='my-addon',
summary=u'My Addôn summary',
support_email=u'support@example.org',
support_url=u'https://support.example.org/support/my-addon/',
tags=['some_tag', 'some_other_tag'],
total_ratings=666,
text_ratings_count=555,
version_kw={
'license': license,
'releasenotes': {
'en-US': u'Release notes in english',
'fr': u'Notes de version en français',
},
},
weekly_downloads=2147483647,
)
AddonUser.objects.create(user=user_factory(username='hidden_author'),
addon=self.addon, listed=False)
second_author = user_factory(
username='second_author', display_name=u'Secönd Author')
first_author = user_factory(
username='first_author', display_name=u'First Authôr')
AddonUser.objects.create(
user=second_author, addon=self.addon, position=2)
AddonUser.objects.create(
user=first_author, addon=self.addon, position=1)
second_preview = Preview.objects.create(
addon=self.addon, position=2,
caption={'en-US': u'My câption', 'fr': u'Mön tîtré'},
sizes={'thumbnail': [199, 99], 'image': [567, 780]})
first_preview = Preview.objects.create(addon=self.addon, position=1)
av_min = AppVersion.objects.get_or_create(
application=amo.THUNDERBIRD.id, version='2.0.99')[0]
av_max = AppVersion.objects.get_or_create(
application=amo.THUNDERBIRD.id, version='3.0.99')[0]
ApplicationsVersions.objects.get_or_create(
application=amo.THUNDERBIRD.id, version=self.addon.current_version,
min=av_min, max=av_max)
# Reset current_version.compatible_apps now that we've added an app.
del self.addon.current_version._compatible_apps
cat2 = Category.from_static_category(
CATEGORIES[amo.FIREFOX.id][amo.ADDON_EXTENSION]['alerts-updates'])
cat2.save()
AddonCategory.objects.create(addon=self.addon, category=cat2)
cat3 = Category.from_static_category(
CATEGORIES[amo.THUNDERBIRD.id][amo.ADDON_EXTENSION]['calendar'])
cat3.save()
AddonCategory.objects.create(addon=self.addon, category=cat3)
result = self.serialize()
assert result['id'] == self.addon.pk
assert result['average_daily_users'] == self.addon.average_daily_users
assert result['categories'] == {
'firefox': ['alerts-updates', 'bookmarks'],
'thunderbird': ['calendar']}
# In this serializer latest_unlisted_version is omitted.
assert 'latest_unlisted_version' not in result
assert result['current_version']
self._test_version(
self.addon.current_version, result['current_version'])
assert result['current_version']['url'] == absolutify(
reverse('addons.versions',
args=[self.addon.slug, self.addon.current_version.version])
)
self._test_version_license_and_release_notes(
self.addon.current_version, result['current_version'])
assert result['authors']
assert len(result['authors']) == 2
self._test_author(first_author, result['authors'][0])
self._test_author(second_author, result['authors'][1])
assert result['contributions_url'] == self.addon.contributions
assert result['edit_url'] == absolutify(self.addon.get_dev_url())
assert result['default_locale'] == self.addon.default_locale
assert result['description'] == {'en-US': self.addon.description}
assert result['developer_comments'] == {
'en-US': self.addon.developer_comments}
assert result['guid'] == self.addon.guid
assert result['has_eula'] is False
assert result['has_privacy_policy'] is False
assert result['homepage'] == {
'en-US': unicode(self.addon.homepage),
}
assert result['icon_url'] == absolutify(self.addon.get_icon_url(64))
assert result['icons'] == {
'32': absolutify(self.addon.get_icon_url(32)),
'64': absolutify(self.addon.get_icon_url(64))
}
assert result['is_disabled'] == self.addon.is_disabled
assert result['is_experimental'] == self.addon.is_experimental is False
assert result['is_featured'] == self.addon.is_featured() is False
assert result['is_source_public'] == self.addon.view_source
assert result['last_updated'] == (
self.addon.last_updated.replace(microsecond=0).isoformat() + 'Z')
assert result['name'] == {'en-US': self.addon.name}
assert result['previews']
assert len(result['previews']) == 2
result_preview = result['previews'][0]
assert result_preview['id'] == first_preview.pk
assert result_preview['caption'] is None
assert result_preview['image_url'] == absolutify(
first_preview.image_url)
assert result_preview['thumbnail_url'] == absolutify(
first_preview.thumbnail_url)
assert result_preview['image_size'] == first_preview.image_size
assert result_preview['thumbnail_size'] == first_preview.thumbnail_size
result_preview = result['previews'][1]
assert result_preview['id'] == second_preview.pk
assert result_preview['caption'] == {
'en-US': u'My câption',
'fr': u'Mön tîtré'
}
assert result_preview['image_url'] == absolutify(
second_preview.image_url)
assert result_preview['thumbnail_url'] == absolutify(
second_preview.thumbnail_url)
assert (result_preview['image_size'] == second_preview.image_size ==
[567, 780])
assert (result_preview['thumbnail_size'] ==
second_preview.thumbnail_size == [199, 99])
assert result['ratings'] == {
'average': self.addon.average_rating,
'bayesian_average': self.addon.bayesian_rating,
'count': self.addon.total_ratings,
'text_count': self.addon.text_ratings_count,
}
assert (
result['ratings_url'] == absolutify(self.addon.ratings_url) ==
absolutify(reverse('addons.ratings.list', args=[self.addon.slug])))
assert result['public_stats'] == self.addon.public_stats
assert result['requires_payment'] == self.addon.requires_payment
assert result['review_url'] == absolutify(
reverse('reviewers.review', args=[self.addon.pk]))
assert result['slug'] == self.addon.slug
assert result['status'] == 'public'
assert result['summary'] == {'en-US': self.addon.summary}
assert result['support_email'] == {'en-US': self.addon.support_email}
assert result['support_url'] == {
'en-US': unicode(self.addon.support_url),
}
assert 'theme_data' not in result
assert set(result['tags']) == set(['some_tag', 'some_other_tag'])
assert result['type'] == 'extension'
assert result['url'] == absolutify(self.addon.get_url_path())
assert result['weekly_downloads'] == self.addon.weekly_downloads
return result
def test_wrap_outgoing_links(self):
self.addon = addon_factory(
contributions=u'https://paypal.me/fôobar',
homepage='http://support.example.com/',
support_url=u'https://support.example.org/support/my-âddon/')
self.request = APIRequestFactory().get('/', {'wrap_outgoing_links': 1})
result = self.serialize()
assert result['contributions_url'] == (
get_outgoing_url(unicode(self.addon.contributions)))
assert result['homepage'] == {
'en-US': get_outgoing_url(unicode(self.addon.homepage)),
}
assert result['support_url'] == {
'en-US': get_outgoing_url(unicode(self.addon.support_url)),
}
# Try a single translation.
self.request = APIRequestFactory().get('/', {
'lang': 'en-US', 'wrap_outgoing_links': 1})
result = self.serialize()
assert result['contributions_url'] == (
get_outgoing_url(unicode(self.addon.contributions)))
assert result['homepage'] == (
get_outgoing_url(unicode(self.addon.homepage))
)
assert result['support_url'] == (
get_outgoing_url(unicode(self.addon.support_url))
)
# Try with empty strings/None. Annoyingly, contribution model field
# does not let us set it to None, so use a translated field for that
# part of the test.
self.addon.update(contributions='', homepage=None)
result = self.serialize()
assert result['contributions_url'] == ''
assert result['homepage'] is None
def test_latest_unlisted_version(self):
self.addon = addon_factory()
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED,
version='1.1')
assert self.addon.latest_unlisted_version
result = self.serialize()
# In this serializer latest_unlisted_version is omitted even if there
# is one, because it's limited to users with specific rights.
assert 'latest_unlisted_version' not in result
def test_latest_unlisted_version_with_rights(self):
self.serializer_class = self.serializer_class_with_unlisted_data
self.addon = addon_factory()
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED,
version='1.1')
assert self.addon.latest_unlisted_version
result = self.serialize()
# In this serializer latest_unlisted_version is present.
assert result['latest_unlisted_version']
self._test_version(
self.addon.latest_unlisted_version,
result['latest_unlisted_version'])
assert result['latest_unlisted_version']['url'] == absolutify('')
def test_is_disabled(self):
self.addon = addon_factory(disabled_by_user=True)
result = self.serialize()
assert result['is_disabled'] is True
def test_is_source_public(self):
self.addon = addon_factory(view_source=True)
result = self.serialize()
assert result['is_source_public'] is True
def test_is_experimental(self):
self.addon = addon_factory(is_experimental=True)
result = self.serialize()
assert result['is_experimental'] is True
def test_requires_payment(self):
self.addon = addon_factory(requires_payment=True)
result = self.serialize()
assert result['requires_payment'] is True
def test_icon_url_without_icon_type_set(self):
self.addon = addon_factory()
result = self.serialize()
assert result['id'] == self.addon.pk
assert result['icon_url'] == absolutify(self.addon.get_icon_url(64))
assert result['icons'] == {
'32': absolutify(self.addon.get_icon_url(32)),
'64': absolutify(self.addon.get_icon_url(64))
}
def test_no_current_version(self):
self.addon = addon_factory(name='lol')
self.addon.current_version.delete()
result = self.serialize()
assert result['id'] == self.addon.pk
assert result['current_version'] is None
def test_no_current_version_files(self):
self.addon = addon_factory(name='lol')
# Just removing the last file deletes the version, so we have to be
# creative and replace the version manually with one that has no files.
self.addon.current_version.delete()
version = self.addon.versions.create(version='0.42')
self.addon._current_version = version
self.addon.save()
result = self.serialize()
assert result['id'] == self.addon.pk
assert result['current_version']
result_version = result['current_version']
assert result_version['reviewed'] == version.reviewed
assert result_version['version'] == version.version
assert result_version['files'] == []
assert result_version['is_strict_compatibility_enabled'] is False
assert result_version['compatibility'] == {}
def test_deleted(self):
self.addon = addon_factory(name=u'My Deleted Addôn')
self.addon.delete()
result = self.serialize()
assert result['id'] == self.addon.pk
assert result['status'] == 'deleted'
def test_has_policies(self):
self.addon = addon_factory()
self.addon.eula = {
'en-US': u'My Addôn EULA in english',
'fr': u'Houlalà',
}
self.addon.privacy_policy = 'lol'
self.addon.save()
result = self.serialize()
assert result['has_eula'] is True
assert result['has_privacy_policy'] is True
def test_is_featured(self):
self.addon = addon_factory()
collection = collection_factory()
FeaturedCollection.objects.create(collection=collection,
application=collection.application)
collection.add_addon(self.addon)
assert self.addon.is_featured()
result = self.serialize()
assert result['is_featured'] is True
def test_translations(self):
translated_descriptions = {
'en-US': u'My Addôn description in english',
'fr': u'Description de mon Addôn',
}
translated_homepages = {
'en-US': u'http://www.google.com/',
'fr': u'http://www.googlé.fr/',
}
self.addon = addon_factory()
self.addon.description = translated_descriptions
self.addon.homepage = translated_homepages
self.addon.save()
result = self.serialize()
assert result['description'] == translated_descriptions
assert result['homepage'] == translated_homepages
# Try a single translation. The locale activation is normally done by
# LocaleAndAppURLMiddleware, but since we're directly calling the
# serializer we need to do it ourselves.
self.request = APIRequestFactory().get('/', {'lang': 'fr'})
with override('fr'):
result = self.serialize()
assert result['description'] == translated_descriptions['fr']
assert result['homepage'] == translated_homepages['fr']
def test_persona_with_persona_id(self):
self.addon = addon_factory(type=amo.ADDON_PERSONA)
persona = self.addon.persona
persona.persona_id = 42
persona.header = u'myheader.jpg'
persona.footer = u'myfooter.jpg'
persona.accentcolor = u'336699'
persona.textcolor = u'f0f0f0'
persona.author = u'Me-me-me-Myself'
persona.display_username = u'my-username'
persona.save()
assert not persona.is_new()
result = self.serialize()
assert result['theme_data'] == persona.theme_data
def test_persona(self):
self.addon = addon_factory(
name=u'My Personâ',
description=u'<script>alert(42)</script>My Personä description',
type=amo.ADDON_PERSONA)
persona = self.addon.persona
persona.persona_id = 0 # For "new" style Personas this is always 0.
persona.header = u'myheader.png'
persona.footer = u'myfooter.png'
persona.accentcolor = u'336699'
persona.textcolor = u'f0f0f0'
persona.author = u'Me-me-me-Myself'
persona.display_username = u'my-username'
persona.popularity = 123456
persona.save()
assert persona.is_new()
result = self.serialize()
assert result['theme_data'] == persona.theme_data
assert '<script>' not in result['theme_data']['description']
assert '<script>' in result['theme_data']['description']
assert result['average_daily_users'] == persona.popularity
assert 'weekly_downloads' not in result
def test_handle_persona_without_persona_data_in_db(self):
self.addon = addon_factory(type=amo.ADDON_PERSONA)
Persona.objects.get(addon=self.addon).delete()
# .reload() does not clear self.addon.persona, so do it manually.
self.addon = Addon.objects.get(pk=self.addon.pk)
result = self.serialize()
assert result['id'] == self.addon.pk
assert result['type'] == 'persona'
# theme_data should be missing, which sucks, but is better than a 500.
assert 'theme_data' not in result
# icon url should just be a default icon instead of the Persona icon.
assert result['icon_url'] == (
'http://testserver/static/img/addon-icons/default-64.png')
assert result['icons'] == {
'32': 'http://testserver/static/img/addon-icons/default-32.png',
'64': 'http://testserver/static/img/addon-icons/default-64.png'
}
def test_webextension(self):
self.addon = addon_factory(file_kw={'is_webextension': True})
# Give one of the versions some webext permissions to test that.
WebextPermission.objects.create(
file=self.addon.current_version.all_files[0],
permissions=['bookmarks', 'random permission']
)
result = self.serialize()
self._test_version(
self.addon.current_version, result['current_version'])
# Double check the permissions got correctly set.
assert result['current_version']['files'][0]['permissions'] == ([
'bookmarks', 'random permission'])
def test_is_restart_required(self):
self.addon = addon_factory(file_kw={'is_restart_required': True})
result = self.serialize()
self._test_version(
self.addon.current_version, result['current_version'])
def test_special_compatibility_cases(self):
# Test an add-on with strict compatibility enabled.
self.addon = addon_factory(file_kw={'strict_compatibility': True})
result_version = self.serialize()['current_version']
assert result_version['compatibility'] == {
'firefox': {'max': u'5.0.99', 'min': u'4.0.99'}
}
assert result_version['is_strict_compatibility_enabled'] is True
# Test an add-on with no compatibility info.
self.addon = addon_factory()
ApplicationsVersions.objects.filter(
version=self.addon.current_version).delete()
result_version = self.serialize()['current_version']
assert result_version['compatibility'] == {}
assert result_version['is_strict_compatibility_enabled'] is False
# Test an add-on with some compatibility info but that should be
# ignored because its type is in NO_COMPAT.
self.addon = addon_factory(type=amo.ADDON_SEARCH)
av_min = AppVersion.objects.get_or_create(
application=amo.THUNDERBIRD.id, version='2.0.99')[0]
av_max = AppVersion.objects.get_or_create(
application=amo.THUNDERBIRD.id, version='3.0.99')[0]
ApplicationsVersions.objects.get_or_create(
application=amo.THUNDERBIRD.id, version=self.addon.current_version,
min=av_min, max=av_max)
result_version = self.serialize()['current_version']
assert result_version['compatibility'] == {
'android': {'max': '9999', 'min': '11.0'},
'firefox': {'max': '9999', 'min': '4.0'},
'seamonkey': {'max': '9999', 'min': '2.1'},
# No thunderbird: it does not support that type, and when we return
# fake compatibility data for NO_COMPAT add-ons we do obey that.
}
assert result_version['is_strict_compatibility_enabled'] is False
def test_static_theme_preview(self):
self.addon = addon_factory(type=amo.ADDON_STATICTHEME)
# Attach some Preview instances do the add-on, they should be ignored
# since it's a static theme.
Preview.objects.create(
addon=self.addon, position=1,
caption={'en-US': u'My câption', 'fr': u'Mön tîtré'},
sizes={'thumbnail': [123, 45], 'image': [678, 910]})
result = self.serialize()
assert result['previews'] == []
# Add a second version, attach VersionPreview to both, make sure we
# take the right one.
first_version = self.addon.current_version
VersionPreview.objects.create(
version=first_version,
sizes={'thumbnail': [12, 34], 'image': [56, 78]})
second_version = version_factory(addon=self.addon)
current_preview = VersionPreview.objects.create(
version=second_version,
sizes={'thumbnail': [56, 78], 'image': [91, 234]})
assert self.addon.reload().current_version == second_version
result = self.serialize()
assert len(result['previews']) == 1
assert result['previews'][0]['id'] == current_preview.pk
assert result['previews'][0]['caption'] is None
assert result['previews'][0]['image_url'] == absolutify(
current_preview.image_url)
assert result['previews'][0]['thumbnail_url'] == absolutify(
current_preview.thumbnail_url)
assert result['previews'][0]['image_size'] == (
current_preview.image_size)
assert result['previews'][0]['thumbnail_size'] == (
current_preview.thumbnail_size)
# Make sure we don't fail if somehow there is no current version.
self.addon.update(_current_version=None)
result = self.serialize()
assert result['current_version'] is None
assert result['previews'] == []
class TestAddonSerializerOutput(AddonSerializerOutputTestMixin, TestCase):
serializer_class = AddonSerializer
serializer_class_with_unlisted_data = AddonSerializerWithUnlistedData
def serialize(self):
self.serializer = self.serializer_class(
context={'request': self.request})
# Manually reload the add-on first to clear any cached properties.
self.addon = Addon.unfiltered.get(pk=self.addon.pk)
return self.serializer.to_representation(self.addon)
class TestESAddonSerializerOutput(AddonSerializerOutputTestMixin, ESTestCase):
serializer_class = ESAddonSerializer
serializer_class_with_unlisted_data = ESAddonSerializerWithUnlistedData
def tearDown(self):
super(TestESAddonSerializerOutput, self).tearDown()
self.empty_index('default')
self.refresh()
def search(self):
self.reindex(Addon)
view = AddonSearchView()
view.request = self.request
qs = view.get_queryset()
return qs.filter('term', id=self.addon.pk).execute()[0]
def serialize(self):
self.serializer = self.serializer_class(
context={'request': self.request})
obj = self.search()
with self.assertNumQueries(0):
result = self.serializer.to_representation(obj)
return result
def _test_author(self, author, data):
"""Override because the ES serializer doesn't include picture_url."""
assert data == {
'id': author.pk,
'name': author.name,
'url': absolutify(author.get_url_path()),
'username': author.username,
}
def _test_version_license_and_release_notes(self, version, data):
"""Override because the ES serializer doesn't include those fields."""
assert 'license' not in data
assert 'release_notes' not in data
class TestVersionSerializerOutput(TestCase):
def setUp(self):
super(TestVersionSerializerOutput, self).setUp()
self.request = APIRequestFactory().get('/')
def serialize(self):
serializer = VersionSerializer(context={'request': self.request})
return serializer.to_representation(self.version)
def test_basic(self):
now = self.days_ago(0)
license = License.objects.create(
name={
'en-US': u'My License',
'fr': u'Mä Licence',
},
text={
'en-US': u'Lorem ipsum dolor sit amet, has nemore patrioqué',
},
url='http://license.example.com/'
)
addon = addon_factory(
file_kw={
'hash': 'fakehash',
'is_webextension': True,
'is_mozilla_signed_extension': True,
'platform': amo.PLATFORM_WIN.id,
'size': 42,
},
version_kw={
'license': license,
'min_app_version': '50.0',
'max_app_version': '*',
'releasenotes': {
'en-US': u'Release notes in english',
'fr': u'Notes de version en français',
},
'reviewed': now,
}
)
self.version = addon.current_version
first_file = self.version.files.latest('pk')
file_factory(
version=self.version, platform=amo.PLATFORM_MAC.id)
second_file = self.version.files.latest('pk')
# Force reload of all_files cached property.
del self.version.all_files
result = self.serialize()
assert result['id'] == self.version.pk
assert result['compatibility'] == {
'firefox': {'max': u'*', 'min': u'50.0'}
}
assert result['files']
assert len(result['files']) == 2
assert result['files'][0]['id'] == first_file.pk
assert result['files'][0]['created'] == (
first_file.created.replace(microsecond=0).isoformat() + 'Z')
assert result['files'][0]['hash'] == first_file.hash
assert result['files'][0]['is_webextension'] == (
first_file.is_webextension)
assert result['files'][0]['is_mozilla_signed_extension'] == (
first_file.is_mozilla_signed_extension)
assert result['files'][0]['platform'] == 'windows'
assert result['files'][0]['size'] == first_file.size
assert result['files'][0]['status'] == 'public'
assert result['files'][0]['url'] == first_file.get_url_path(src='')
assert result['files'][1]['id'] == second_file.pk
assert result['files'][1]['created'] == (
second_file.created.replace(microsecond=0).isoformat() + 'Z')
assert result['files'][1]['hash'] == second_file.hash
assert result['files'][1]['is_webextension'] == (
second_file.is_webextension)
assert result['files'][1]['is_mozilla_signed_extension'] == (
second_file.is_mozilla_signed_extension)
assert result['files'][1]['platform'] == 'mac'
assert result['files'][1]['size'] == second_file.size
assert result['files'][1]['status'] == 'public'
assert result['files'][1]['url'] == second_file.get_url_path(src='')
assert result['channel'] == 'listed'
assert result['edit_url'] == absolutify(addon.get_dev_url(
'versions.edit', args=[self.version.pk], prefix_only=True))
assert result['release_notes'] == {
'en-US': u'Release notes in english',
'fr': u'Notes de version en français',
}
assert result['license']
assert dict(result['license']) == {
'id': license.pk,
'name': {'en-US': u'My License', 'fr': u'Mä Licence'},
'text': {
'en-US': u'Lorem ipsum dolor sit amet, has nemore patrioqué',
},
'url': 'http://license.example.com/',
}
assert result['reviewed'] == (
now.replace(microsecond=0).isoformat() + 'Z')
assert result['url'] == absolutify(self.version.get_url_path())
def test_unlisted(self):
addon = addon_factory()
self.version = version_factory(
addon=addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
result = self.serialize()
assert result['channel'] == 'unlisted'
def test_no_license(self):
addon = addon_factory()
self.version = addon.current_version
self.version.update(license=None)
result = self.serialize()
assert result['id'] == self.version.pk
assert result['license'] is None
def test_license_no_url(self):
addon = addon_factory()
self.version = addon.current_version
license = self.version.license
license.update(url=None, builtin=license.OTHER)
result = self.serialize()
assert result['id'] == self.version.pk
assert result['license']
assert result['license']['id'] == license.pk
assert result['license']['url'] == absolutify(
self.version.license_url())
license.update(builtin=1)
result = self.serialize()
# Builtin licenses with no url shouldn't get the version license url.
assert result['license']['url'] is None
def test_license_serializer_no_url_no_parent(self):
# This should not happen (LicenseSerializer should always be called
# from a parent VersionSerializer) but we don't want the API to 500
# if that does happens.
addon = addon_factory()
self.version = addon.current_version
license = self.version.license
license.update(url=None)
result = LicenseSerializer(
context={'request': self.request}).to_representation(license)
assert result['id'] == license.pk
# LicenseSerializer is unable to find the Version, so it falls back to
# None.
assert result['url'] is None
def test_builtin_license(self):
addon = addon_factory()
self.version = addon.current_version
license = self.version.license
license.update(builtin=18)
assert license._constant == LICENSES_BY_BUILTIN[18]
result = LicenseSerializer(
context={'request': self.request}).to_representation(license)
assert result['id'] == license.pk
# A request with no ?lang gets you the site default l10n in a dict to
# match how non-constant values are returned.
assert result['name'] == {
'en-US': unicode(LICENSES_BY_BUILTIN[18].name)}
accept_request = APIRequestFactory().get('/')
accept_request.LANG = 'de'
result = LicenseSerializer(
context={'request': accept_request}).to_representation(license)
# An Accept-Language should result in a different default though.
assert result['name'] == {
'de': unicode(LICENSES_BY_BUILTIN[18].name)}
# But a requested lang returns a flat string
lang_request = APIRequestFactory().get('/?lang=fr')
result = LicenseSerializer(
context={'request': lang_request}).to_representation(license)
assert result['name'] == unicode(LICENSES_BY_BUILTIN[18].name)
def test_file_webext_permissions(self):
self.version = addon_factory().current_version
result = self.serialize()
# No permissions.
assert result['files'][0]['permissions'] == []
self.version = addon_factory(
file_kw={'is_webextension': True}).current_version
permissions = ['dangerdanger', 'high', 'voltage']
WebextPermission.objects.create(
permissions=permissions, file=self.version.all_files[0])
result = self.serialize()
assert result['files'][0]['permissions'] == permissions
class TestSimpleVersionSerializerOutput(TestCase):
def setUp(self):
self.request = APIRequestFactory().get('/')
def serialize(self):
serializer = SimpleVersionSerializer(context={'request': self.request})
return serializer.to_representation(self.version)
def test_license_included_without_text(self):
now = self.days_ago(0)
license = License.objects.create(
name={
'en-US': u'My License',
'fr': u'Mä Licence',
},
text={
'en-US': u'Lorem ipsum dolor sit amet, has nemore patrioqué',
},
url='http://license.example.com/'
)
addon = addon_factory(
version_kw={
'license': license,
'reviewed': now,
}
)
self.version = addon.current_version
result = self.serialize()
assert result['id'] == self.version.pk
assert result['license'] is not None
assert result['license']['id'] == license.pk
assert result['license']['name']['en-US'] == 'My License'
assert result['license']['name']['fr'] == u'Mä Licence'
assert result['license']['url'] == 'http://license.example.com/'
assert 'text' not in result['license']
class TestLanguageToolsSerializerOutput(TestCase):
def setUp(self):
self.request = APIRequestFactory().get('/')
def serialize(self):
serializer = LanguageToolsSerializer(context={'request': self.request})
return serializer.to_representation(self.addon)
def test_basic(self):
self.addon = addon_factory(
type=amo.ADDON_LPAPP, target_locale='fr',
locale_disambiguation=u'lolé')
result = self.serialize()
assert result['id'] == self.addon.pk
assert result['default_locale'] == self.addon.default_locale
assert result['guid'] == self.addon.guid
assert result['locale_disambiguation'] == (
self.addon.locale_disambiguation)
assert result['name'] == {'en-US': self.addon.name}
assert result['slug'] == self.addon.slug
assert result['target_locale'] == self.addon.target_locale
assert result['type'] == 'language'
assert result['url'] == absolutify(self.addon.get_url_path())
assert 'current_compatible_version' not in result
def test_basic_dict(self):
self.addon = addon_factory(type=amo.ADDON_DICT)
result = self.serialize()
assert result['type'] == 'dictionary'
assert 'current_compatible_version' not in result
def test_current_compatible_version(self):
# Set a filename to make sure the file actually exists.
# file_factory (used via addon_factory) copies files that exists
# as fixtures in src/olympia/files/fixtures/files to their rightful
# place. We need that to test the localepicker properly.
file_kw = {'filename': 'langpack-localepicker.xpi'}
self.addon = addon_factory(type=amo.ADDON_LPAPP, file_kw=file_kw)
# compatible_versions is set by the view through prefetch, it
# looks like a list.
self.addon.compatible_versions = [self.addon.current_version]
self.addon.compatible_versions[0].update(created=self.days_ago(1))
# Create a new current version, just to prove that
# current_compatible_version does not use that.
version_factory(addon=self.addon, file_kw=file_kw)
self.addon.reload
assert (
self.addon.compatible_versions[0] !=
self.addon.current_version)
self.request = APIRequestFactory().get('/?app=firefox&appversion=57.0')
result = self.serialize()
assert 'current_compatible_version' in result
assert result['current_compatible_version'] is not None
assert set(result['current_compatible_version'].keys()) == set(
['id', 'files', 'reviewed', 'version'])
self.addon.compatible_versions = None
result = self.serialize()
assert 'current_compatible_version' in result
assert result['current_compatible_version'] is None
self.addon.compatible_versions = []
result = self.serialize()
assert 'current_compatible_version' in result
assert result['current_compatible_version'] is None
class TestESAddonAutoCompleteSerializer(ESTestCase):
def setUp(self):
super(TestESAddonAutoCompleteSerializer, self).setUp()
self.request = APIRequestFactory().get('/')
def tearDown(self):
super(TestESAddonAutoCompleteSerializer, self).tearDown()
self.empty_index('default')
self.refresh()
def search(self):
self.reindex(Addon)
view = AddonAutoCompleteSearchView()
view.request = self.request
qs = view.get_queryset()
return qs.filter('term', id=self.addon.pk).execute()[0]
def serialize(self):
self.serializer = ESAddonAutoCompleteSerializer(
context={'request': self.request})
obj = self.search()
with self.assertNumQueries(0):
result = self.serializer.to_representation(obj)
return result
def test_basic(self):
self.addon = addon_factory()
result = self.serialize()
assert set(result.keys()) == set(['id', 'name', 'icon_url', u'url'])
assert result['id'] == self.addon.pk
assert result['name'] == {'en-US': unicode(self.addon.name)}
assert result['icon_url'] == absolutify(self.addon.get_icon_url(64))
assert result['url'] == absolutify(self.addon.get_url_path())
def test_translations(self):
translated_name = {
'en-US': u'My Addôn name in english',
'fr': u'Nom de mon Addôn',
}
self.addon = addon_factory()
self.addon.name = translated_name
self.addon.save()
result = self.serialize()
assert result['name'] == translated_name
# Try a single translation. The locale activation is normally done by
# LocaleAndAppURLMiddleware, but since we're directly calling the
# serializer we need to do it ourselves.
self.request = APIRequestFactory().get('/', {'lang': 'fr'})
with override('fr'):
result = self.serialize()
assert result['name'] == translated_name['fr']
def test_icon_url_with_persona_id(self):
self.addon = addon_factory(type=amo.ADDON_PERSONA)
persona = self.addon.persona
persona.persona_id = 42
persona.header = u'myheader.jpg'
persona.footer = u'myfooter.jpg'
persona.accentcolor = u'336699'
persona.textcolor = u'f0f0f0'
persona.author = u'Me-me-me-Myself'
persona.display_username = u'my-username'
persona.save()
assert not persona.is_new()
result = self.serialize()
assert set(result.keys()) == set(['id', 'name', 'icon_url', u'url'])
assert result['icon_url'] == absolutify(self.addon.get_icon_url(64))
def test_icon_url_persona_with_no_persona_id(self):
self.addon = addon_factory(
name=u'My Personâ',
description=u'<script>alert(42)</script>My Personä description',
type=amo.ADDON_PERSONA)
persona = self.addon.persona
persona.persona_id = 0 # For "new" style Personas this is always 0.
persona.header = u'myheader.png'
persona.footer = u'myfooter.png'
persona.accentcolor = u'336699'
persona.textcolor = u'f0f0f0'
persona.author = u'Me-me-me-Myself'
persona.display_username = u'my-username'
persona.save()
assert persona.is_new()
result = self.serialize()
assert set(result.keys()) == set(['id', 'name', 'icon_url', u'url'])
assert result['icon_url'] == absolutify(self.addon.get_icon_url(64))
class TestAddonDeveloperSerializer(TestBaseUserSerializer):
serializer_class = AddonDeveloperSerializer
def test_picture(self):
serialized = self.serialize()
assert serialized['picture_url'] is None
self.user.update(picture_type='image/jpeg')
serialized = self.serialize()
assert serialized['picture_url'] == absolutify(self.user.picture_url)
assert '%s.png' % self.user.id in serialized['picture_url']
class TestReplacementAddonSerializer(TestCase):
def serialize(self, replacement):
serializer = ReplacementAddonSerializer()
return serializer.to_representation(replacement)
def test_valid_addon_path(self):
addon = addon_factory(slug=u'stuff', guid=u'newstuff@mozilla')
rep = ReplacementAddon.objects.create(
guid='legacy@mozilla', path=u'/addon/stuff/')
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == [u'newstuff@mozilla']
# Edge case, but should accept numeric IDs too
rep.update(path=u'/addon/%s/' % addon.id)
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == [u'newstuff@mozilla']
def test_invalid_addons(self):
"""Broken paths, invalid add-ons, etc, should fail gracefully to None.
"""
rep = ReplacementAddon.objects.create(
guid='legacy@mozilla', path=u'/addon/stuff/')
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
# Addon path doesn't exist.
assert result['replacement'] == []
# Add the add-on but make it not public
addon = addon_factory(slug=u'stuff', guid=u'newstuff@mozilla',
status=amo.STATUS_NULL)
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == []
# Double check that the test is good and it will work once public.
addon.update(status=amo.STATUS_PUBLIC)
result = self.serialize(rep)
assert result['replacement'] == [u'newstuff@mozilla']
# But urls aren't resolved - and don't break everything
rep.update(path=absolutify(addon.get_url_path()))
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == []
def test_valid_collection_path(self):
addon = addon_factory(slug=u'stuff', guid=u'newstuff@mozilla')
me = user_factory(username=u'me')
collection = collection_factory(slug=u'bag', author=me)
collection.add_addon(addon)
rep = ReplacementAddon.objects.create(
guid=u'legacy@mozilla', path=u'/collections/me/bag/')
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == [u'newstuff@mozilla']
# Edge case, but should accept numeric user IDs too
rep.update(path=u'/collections/%s/bag/' % me.id)
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == [u'newstuff@mozilla']
def test_invalid_collections(self):
"""Broken paths, invalid users or collections, should fail gracefully
to None."""
rep = ReplacementAddon.objects.create(
guid=u'legacy@mozilla', path=u'/collections/me/bag/')
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == []
# Create the user but not the collection.
me = user_factory(username=u'me')
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == []
# Create the collection but make the add-on invalid.
addon = addon_factory(slug=u'stuff', guid=u'newstuff@mozilla',
status=amo.STATUS_NULL)
collection = collection_factory(slug=u'bag', author=me)
collection.add_addon(addon)
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == []
# Double check that the test is good and it will work once public.
addon.update(status=amo.STATUS_PUBLIC)
result = self.serialize(rep)
assert result['replacement'] == [u'newstuff@mozilla']
class TestCompatOverrideSerializer(TestCase):
def serialize(self, override):
serializer = CompatOverrideSerializer()
return serializer.to_representation(override)
def test_linked_addon(self):
addon = addon_factory(guid='extrabad@thing')
override = CompatOverride.objects.create(
name='override with addon', guid=addon.guid, addon=addon)
CompatOverrideRange.objects.create(
compat=override, app=amo.FIREFOX.id)
result = self.serialize(override)
assert ['addon_guid', 'addon_id', 'name', 'version_ranges'] == sorted(
result.keys())
assert result['addon_guid'] == 'extrabad@thing'
assert result['addon_id'] == addon.id
assert result['name'] == 'override with addon'
version_range = {
'addon_min_version': '0',
'addon_max_version': '*',
'applications': [{
'name': amo.FIREFOX.pretty,
'id': amo.FIREFOX.id,
'min_version': '0',
'max_version': '*',
'guid': amo.FIREFOX.guid
}]
}
assert result['version_ranges'] == [version_range]
def test_no_addon(self):
override = CompatOverride.objects.create(
name='override', guid='foo@baa')
CompatOverrideRange.objects.create(
compat=override, app=amo.FIREFOX.id)
result = self.serialize(override)
assert ['addon_guid', 'addon_id', 'name', 'version_ranges'] == sorted(
result.keys())
assert result['addon_guid'] == 'foo@baa'
assert result['addon_id'] is None
assert result['name'] == 'override'
version_range = {
'addon_min_version': '0',
'addon_max_version': '*',
'applications': [{
'name': amo.FIREFOX.pretty,
'id': amo.FIREFOX.id,
'min_version': '0',
'max_version': '*',
'guid': amo.FIREFOX.guid
}]
}
assert result['version_ranges'] == [version_range]
def test_multiple_ranges(self):
override = CompatOverride.objects.create(
name='override with multiple ranges', guid='foo@baa')
CompatOverrideRange.objects.create(
compat=override, app=amo.FIREFOX.id, min_version='23.4',
max_version='56.7.*')
CompatOverrideRange.objects.create(
compat=override, app=amo.THUNDERBIRD.id, min_app_version='1.35',
max_app_version='90.*')
result = self.serialize(override)
assert ['addon_guid', 'addon_id', 'name', 'version_ranges'] == sorted(
result.keys())
assert result['addon_guid'] == 'foo@baa'
assert result['addon_id'] is None
assert result['name'] == 'override with multiple ranges'
assert len(result['version_ranges']) == 2
version_range_firefox = {
'addon_min_version': '23.4',
'addon_max_version': '56.7.*',
'applications': [{
'name': amo.FIREFOX.pretty,
'id': amo.FIREFOX.id,
'min_version': '0',
'max_version': '*',
'guid': amo.FIREFOX.guid
}]
}
assert version_range_firefox in result['version_ranges']
version_range_thunderbird = {
'addon_min_version': '0',
'addon_max_version': '*',
'applications': [{
'name': amo.THUNDERBIRD.pretty,
'id': amo.THUNDERBIRD.id,
'min_version': '1.35',
'max_version': '90.*',
'guid': amo.THUNDERBIRD.guid
}]
}
assert version_range_thunderbird in result['version_ranges']
def test_collapsed_ranges(self):
"""Collapsed ranges are where there is a single version range of
affected addons, but multiple applications affected."""
override = CompatOverride.objects.create(
name='override with single version range', guid='foo@baa')
CompatOverrideRange.objects.create(
compat=override, app=amo.FIREFOX.id,
min_version='23.4', max_version='56.7.*')
CompatOverrideRange.objects.create(
compat=override, app=amo.THUNDERBIRD.id,
min_version='23.4', max_version='56.7.*',
min_app_version='1.35', max_app_version='90.*')
result = self.serialize(override)
assert ['addon_guid', 'addon_id', 'name', 'version_ranges'] == sorted(
result.keys())
assert result['addon_guid'] == 'foo@baa'
assert result['addon_id'] is None
assert result['name'] == 'override with single version range'
assert len(result['version_ranges']) == 1
assert result['version_ranges'][0]['addon_min_version'] == '23.4'
assert result['version_ranges'][0]['addon_max_version'] == '56.7.*'
applications = result['version_ranges'][0]['applications']
assert len(applications) == 2
application_firefox = {
'name': amo.FIREFOX.pretty,
'id': amo.FIREFOX.id,
'min_version': '0',
'max_version': '*',
'guid': amo.FIREFOX.guid
}
assert application_firefox in applications
application_thunderbird = {
'name': amo.THUNDERBIRD.pretty,
'id': amo.THUNDERBIRD.id,
'min_version': '1.35',
'max_version': '90.*',
'guid': amo.THUNDERBIRD.guid
}
assert application_thunderbird in applications
| 41.121481 | 79 | 0.617304 |
from django.utils.translation import override
from rest_framework.test import APIRequestFactory
from olympia import amo
from olympia.accounts.tests.test_serializers import TestBaseUserSerializer
from olympia.addons.models import (
Addon, AddonCategory, AddonUser, Category, CompatOverride,
CompatOverrideRange, Persona, Preview, ReplacementAddon)
from olympia.addons.serializers import (
AddonDeveloperSerializer, AddonSerializer, AddonSerializerWithUnlistedData,
CompatOverrideSerializer, ESAddonAutoCompleteSerializer, ESAddonSerializer,
ESAddonSerializerWithUnlistedData, LanguageToolsSerializer,
LicenseSerializer, ReplacementAddonSerializer, SimpleVersionSerializer,
VersionSerializer)
from olympia.addons.utils import generate_addon_guid
from olympia.addons.views import AddonAutoCompleteSearchView, AddonSearchView
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.tests import (
ESTestCase, TestCase, addon_factory, collection_factory, file_factory,
user_factory, version_factory)
from olympia.amo.urlresolvers import get_outgoing_url, reverse
from olympia.bandwagon.models import FeaturedCollection
from olympia.constants.categories import CATEGORIES
from olympia.constants.licenses import LICENSES_BY_BUILTIN
from olympia.files.models import WebextPermission
from olympia.versions.models import (
ApplicationsVersions, AppVersion, License, VersionPreview)
class AddonSerializerOutputTestMixin(object):
def setUp(self):
super(AddonSerializerOutputTestMixin, self).setUp()
self.request = APIRequestFactory().get('/')
def _test_author(self, author, data):
assert data == {
'id': author.pk,
'name': author.name,
'picture_url': None,
'url': absolutify(author.get_url_path()),
'username': author.username,
}
def _test_version_license_and_release_notes(self, version, data):
assert data['release_notes'] == {
'en-US': u'Release notes in english',
'fr': u'Notes de version en français',
}
assert data['license']
assert dict(data['license']) == {
'id': version.license.pk,
'name': {'en-US': u'My License', 'fr': u'Mä Licence'},
'url': 'http://license.example.com/',
}
def _test_version(self, version, data):
assert data['id'] == version.pk
assert data['compatibility']
assert len(data['compatibility']) == len(version.compatible_apps)
for app, compat in version.compatible_apps.items():
assert data['compatibility'][app.short] == {
'min': compat.min.version,
'max': compat.max.version
}
assert data['is_strict_compatibility_enabled'] is False
assert data['files']
assert len(data['files']) == 1
result_file = data['files'][0]
file_ = version.files.latest('pk')
assert result_file['id'] == file_.pk
assert result_file['created'] == (
file_.created.replace(microsecond=0).isoformat() + 'Z')
assert result_file['hash'] == file_.hash
assert result_file['is_restart_required'] == file_.is_restart_required
assert result_file['is_webextension'] == file_.is_webextension
assert (
result_file['is_mozilla_signed_extension'] ==
file_.is_mozilla_signed_extension)
assert result_file['platform'] == (
amo.PLATFORM_CHOICES_API[file_.platform])
assert result_file['size'] == file_.size
assert result_file['status'] == amo.STATUS_CHOICES_API[file_.status]
assert result_file['url'] == file_.get_url_path(src='')
assert result_file['permissions'] == file_.webext_permissions_list
assert data['edit_url'] == absolutify(
self.addon.get_dev_url(
'versions.edit', args=[version.pk], prefix_only=True))
assert data['reviewed'] == version.reviewed
assert data['version'] == version.version
assert data['url'] == absolutify(version.get_url_path())
def test_basic(self):
cat1 = Category.from_static_category(
CATEGORIES[amo.FIREFOX.id][amo.ADDON_EXTENSION]['bookmarks'])
cat1.save()
license = License.objects.create(
name={
'en-US': u'My License',
'fr': u'Mä Licence',
},
text={
'en-US': u'Lorem ipsum dolor sit amet, has nemore patrioqué',
},
url='http://license.example.com/'
)
self.addon = addon_factory(
average_daily_users=4242,
average_rating=4.21,
bayesian_rating=4.22,
category=cat1,
contributions=u'https://paypal.me/foobar/',
description=u'My Addôn description',
developer_comments=u'Dévelopers Addôn comments',
file_kw={
'hash': 'fakehash',
'is_restart_required': False,
'is_webextension': True,
'platform': amo.PLATFORM_WIN.id,
'size': 42,
},
guid=generate_addon_guid(),
homepage=u'https://www.example.org/',
icon_hash='fakehash',
icon_type='image/png',
name=u'My Addôn',
public_stats=True,
slug='my-addon',
summary=u'My Addôn summary',
support_email=u'support@example.org',
support_url=u'https://support.example.org/support/my-addon/',
tags=['some_tag', 'some_other_tag'],
total_ratings=666,
text_ratings_count=555,
version_kw={
'license': license,
'releasenotes': {
'en-US': u'Release notes in english',
'fr': u'Notes de version en français',
},
},
weekly_downloads=2147483647,
)
AddonUser.objects.create(user=user_factory(username='hidden_author'),
addon=self.addon, listed=False)
second_author = user_factory(
username='second_author', display_name=u'Secönd Author')
first_author = user_factory(
username='first_author', display_name=u'First Authôr')
AddonUser.objects.create(
user=second_author, addon=self.addon, position=2)
AddonUser.objects.create(
user=first_author, addon=self.addon, position=1)
second_preview = Preview.objects.create(
addon=self.addon, position=2,
caption={'en-US': u'My câption', 'fr': u'Mön tîtré'},
sizes={'thumbnail': [199, 99], 'image': [567, 780]})
first_preview = Preview.objects.create(addon=self.addon, position=1)
av_min = AppVersion.objects.get_or_create(
application=amo.THUNDERBIRD.id, version='2.0.99')[0]
av_max = AppVersion.objects.get_or_create(
application=amo.THUNDERBIRD.id, version='3.0.99')[0]
ApplicationsVersions.objects.get_or_create(
application=amo.THUNDERBIRD.id, version=self.addon.current_version,
min=av_min, max=av_max)
del self.addon.current_version._compatible_apps
cat2 = Category.from_static_category(
CATEGORIES[amo.FIREFOX.id][amo.ADDON_EXTENSION]['alerts-updates'])
cat2.save()
AddonCategory.objects.create(addon=self.addon, category=cat2)
cat3 = Category.from_static_category(
CATEGORIES[amo.THUNDERBIRD.id][amo.ADDON_EXTENSION]['calendar'])
cat3.save()
AddonCategory.objects.create(addon=self.addon, category=cat3)
result = self.serialize()
assert result['id'] == self.addon.pk
assert result['average_daily_users'] == self.addon.average_daily_users
assert result['categories'] == {
'firefox': ['alerts-updates', 'bookmarks'],
'thunderbird': ['calendar']}
# In this serializer latest_unlisted_version is omitted.
assert 'latest_unlisted_version' not in result
assert result['current_version']
self._test_version(
self.addon.current_version, result['current_version'])
assert result['current_version']['url'] == absolutify(
reverse('addons.versions',
args=[self.addon.slug, self.addon.current_version.version])
)
self._test_version_license_and_release_notes(
self.addon.current_version, result['current_version'])
assert result['authors']
assert len(result['authors']) == 2
self._test_author(first_author, result['authors'][0])
self._test_author(second_author, result['authors'][1])
assert result['contributions_url'] == self.addon.contributions
assert result['edit_url'] == absolutify(self.addon.get_dev_url())
assert result['default_locale'] == self.addon.default_locale
assert result['description'] == {'en-US': self.addon.description}
assert result['developer_comments'] == {
'en-US': self.addon.developer_comments}
assert result['guid'] == self.addon.guid
assert result['has_eula'] is False
assert result['has_privacy_policy'] is False
assert result['homepage'] == {
'en-US': unicode(self.addon.homepage),
}
assert result['icon_url'] == absolutify(self.addon.get_icon_url(64))
assert result['icons'] == {
'32': absolutify(self.addon.get_icon_url(32)),
'64': absolutify(self.addon.get_icon_url(64))
}
assert result['is_disabled'] == self.addon.is_disabled
assert result['is_experimental'] == self.addon.is_experimental is False
assert result['is_featured'] == self.addon.is_featured() is False
assert result['is_source_public'] == self.addon.view_source
assert result['last_updated'] == (
self.addon.last_updated.replace(microsecond=0).isoformat() + 'Z')
assert result['name'] == {'en-US': self.addon.name}
assert result['previews']
assert len(result['previews']) == 2
result_preview = result['previews'][0]
assert result_preview['id'] == first_preview.pk
assert result_preview['caption'] is None
assert result_preview['image_url'] == absolutify(
first_preview.image_url)
assert result_preview['thumbnail_url'] == absolutify(
first_preview.thumbnail_url)
assert result_preview['image_size'] == first_preview.image_size
assert result_preview['thumbnail_size'] == first_preview.thumbnail_size
result_preview = result['previews'][1]
assert result_preview['id'] == second_preview.pk
assert result_preview['caption'] == {
'en-US': u'My câption',
'fr': u'Mön tîtré'
}
assert result_preview['image_url'] == absolutify(
second_preview.image_url)
assert result_preview['thumbnail_url'] == absolutify(
second_preview.thumbnail_url)
assert (result_preview['image_size'] == second_preview.image_size ==
[567, 780])
assert (result_preview['thumbnail_size'] ==
second_preview.thumbnail_size == [199, 99])
assert result['ratings'] == {
'average': self.addon.average_rating,
'bayesian_average': self.addon.bayesian_rating,
'count': self.addon.total_ratings,
'text_count': self.addon.text_ratings_count,
}
assert (
result['ratings_url'] == absolutify(self.addon.ratings_url) ==
absolutify(reverse('addons.ratings.list', args=[self.addon.slug])))
assert result['public_stats'] == self.addon.public_stats
assert result['requires_payment'] == self.addon.requires_payment
assert result['review_url'] == absolutify(
reverse('reviewers.review', args=[self.addon.pk]))
assert result['slug'] == self.addon.slug
assert result['status'] == 'public'
assert result['summary'] == {'en-US': self.addon.summary}
assert result['support_email'] == {'en-US': self.addon.support_email}
assert result['support_url'] == {
'en-US': unicode(self.addon.support_url),
}
assert 'theme_data' not in result
assert set(result['tags']) == set(['some_tag', 'some_other_tag'])
assert result['type'] == 'extension'
assert result['url'] == absolutify(self.addon.get_url_path())
assert result['weekly_downloads'] == self.addon.weekly_downloads
return result
def test_wrap_outgoing_links(self):
self.addon = addon_factory(
contributions=u'https://paypal.me/fôobar',
homepage='http://support.example.com/',
support_url=u'https://support.example.org/support/my-âddon/')
self.request = APIRequestFactory().get('/', {'wrap_outgoing_links': 1})
result = self.serialize()
assert result['contributions_url'] == (
get_outgoing_url(unicode(self.addon.contributions)))
assert result['homepage'] == {
'en-US': get_outgoing_url(unicode(self.addon.homepage)),
}
assert result['support_url'] == {
'en-US': get_outgoing_url(unicode(self.addon.support_url)),
}
# Try a single translation.
self.request = APIRequestFactory().get('/', {
'lang': 'en-US', 'wrap_outgoing_links': 1})
result = self.serialize()
assert result['contributions_url'] == (
get_outgoing_url(unicode(self.addon.contributions)))
assert result['homepage'] == (
get_outgoing_url(unicode(self.addon.homepage))
)
assert result['support_url'] == (
get_outgoing_url(unicode(self.addon.support_url))
)
# Try with empty strings/None. Annoyingly, contribution model field
# does not let us set it to None, so use a translated field for that
# part of the test.
self.addon.update(contributions='', homepage=None)
result = self.serialize()
assert result['contributions_url'] == ''
assert result['homepage'] is None
def test_latest_unlisted_version(self):
self.addon = addon_factory()
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED,
version='1.1')
assert self.addon.latest_unlisted_version
result = self.serialize()
# In this serializer latest_unlisted_version is omitted even if there
# is one, because it's limited to users with specific rights.
assert 'latest_unlisted_version' not in result
def test_latest_unlisted_version_with_rights(self):
self.serializer_class = self.serializer_class_with_unlisted_data
self.addon = addon_factory()
version_factory(
addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED,
version='1.1')
assert self.addon.latest_unlisted_version
result = self.serialize()
assert result['latest_unlisted_version']
self._test_version(
self.addon.latest_unlisted_version,
result['latest_unlisted_version'])
assert result['latest_unlisted_version']['url'] == absolutify('')
def test_is_disabled(self):
self.addon = addon_factory(disabled_by_user=True)
result = self.serialize()
assert result['is_disabled'] is True
def test_is_source_public(self):
self.addon = addon_factory(view_source=True)
result = self.serialize()
assert result['is_source_public'] is True
def test_is_experimental(self):
self.addon = addon_factory(is_experimental=True)
result = self.serialize()
assert result['is_experimental'] is True
def test_requires_payment(self):
self.addon = addon_factory(requires_payment=True)
result = self.serialize()
assert result['requires_payment'] is True
def test_icon_url_without_icon_type_set(self):
self.addon = addon_factory()
result = self.serialize()
assert result['id'] == self.addon.pk
assert result['icon_url'] == absolutify(self.addon.get_icon_url(64))
assert result['icons'] == {
'32': absolutify(self.addon.get_icon_url(32)),
'64': absolutify(self.addon.get_icon_url(64))
}
def test_no_current_version(self):
self.addon = addon_factory(name='lol')
self.addon.current_version.delete()
result = self.serialize()
assert result['id'] == self.addon.pk
assert result['current_version'] is None
def test_no_current_version_files(self):
self.addon = addon_factory(name='lol')
self.addon.current_version.delete()
version = self.addon.versions.create(version='0.42')
self.addon._current_version = version
self.addon.save()
result = self.serialize()
assert result['id'] == self.addon.pk
assert result['current_version']
result_version = result['current_version']
assert result_version['reviewed'] == version.reviewed
assert result_version['version'] == version.version
assert result_version['files'] == []
assert result_version['is_strict_compatibility_enabled'] is False
assert result_version['compatibility'] == {}
def test_deleted(self):
self.addon = addon_factory(name=u'My Deleted Addôn')
self.addon.delete()
result = self.serialize()
assert result['id'] == self.addon.pk
assert result['status'] == 'deleted'
def test_has_policies(self):
self.addon = addon_factory()
self.addon.eula = {
'en-US': u'My Addôn EULA in english',
'fr': u'Houlalà',
}
self.addon.privacy_policy = 'lol'
self.addon.save()
result = self.serialize()
assert result['has_eula'] is True
assert result['has_privacy_policy'] is True
def test_is_featured(self):
self.addon = addon_factory()
collection = collection_factory()
FeaturedCollection.objects.create(collection=collection,
application=collection.application)
collection.add_addon(self.addon)
assert self.addon.is_featured()
result = self.serialize()
assert result['is_featured'] is True
def test_translations(self):
translated_descriptions = {
'en-US': u'My Addôn description in english',
'fr': u'Description de mon Addôn',
}
translated_homepages = {
'en-US': u'http://www.google.com/',
'fr': u'http://www.googlé.fr/',
}
self.addon = addon_factory()
self.addon.description = translated_descriptions
self.addon.homepage = translated_homepages
self.addon.save()
result = self.serialize()
assert result['description'] == translated_descriptions
assert result['homepage'] == translated_homepages
# serializer we need to do it ourselves.
self.request = APIRequestFactory().get('/', {'lang': 'fr'})
with override('fr'):
result = self.serialize()
assert result['description'] == translated_descriptions['fr']
assert result['homepage'] == translated_homepages['fr']
def test_persona_with_persona_id(self):
self.addon = addon_factory(type=amo.ADDON_PERSONA)
persona = self.addon.persona
persona.persona_id = 42
persona.header = u'myheader.jpg'
persona.footer = u'myfooter.jpg'
persona.accentcolor = u'336699'
persona.textcolor = u'f0f0f0'
persona.author = u'Me-me-me-Myself'
persona.display_username = u'my-username'
persona.save()
assert not persona.is_new()
result = self.serialize()
assert result['theme_data'] == persona.theme_data
def test_persona(self):
self.addon = addon_factory(
name=u'My Personâ',
description=u'<script>alert(42)</script>My Personä description',
type=amo.ADDON_PERSONA)
persona = self.addon.persona
persona.persona_id = 0 # For "new" style Personas this is always 0.
persona.header = u'myheader.png'
persona.footer = u'myfooter.png'
persona.accentcolor = u'336699'
persona.textcolor = u'f0f0f0'
persona.author = u'Me-me-me-Myself'
persona.display_username = u'my-username'
persona.popularity = 123456
persona.save()
assert persona.is_new()
result = self.serialize()
assert result['theme_data'] == persona.theme_data
assert '<script>' not in result['theme_data']['description']
assert '<script>' in result['theme_data']['description']
assert result['average_daily_users'] == persona.popularity
assert 'weekly_downloads' not in result
def test_handle_persona_without_persona_data_in_db(self):
self.addon = addon_factory(type=amo.ADDON_PERSONA)
Persona.objects.get(addon=self.addon).delete()
# .reload() does not clear self.addon.persona, so do it manually.
self.addon = Addon.objects.get(pk=self.addon.pk)
result = self.serialize()
assert result['id'] == self.addon.pk
assert result['type'] == 'persona'
# theme_data should be missing, which sucks, but is better than a 500.
assert 'theme_data' not in result
# icon url should just be a default icon instead of the Persona icon.
assert result['icon_url'] == (
'http://testserver/static/img/addon-icons/default-64.png')
assert result['icons'] == {
'32': 'http://testserver/static/img/addon-icons/default-32.png',
'64': 'http://testserver/static/img/addon-icons/default-64.png'
}
def test_webextension(self):
self.addon = addon_factory(file_kw={'is_webextension': True})
# Give one of the versions some webext permissions to test that.
WebextPermission.objects.create(
file=self.addon.current_version.all_files[0],
permissions=['bookmarks', 'random permission']
)
result = self.serialize()
self._test_version(
self.addon.current_version, result['current_version'])
# Double check the permissions got correctly set.
assert result['current_version']['files'][0]['permissions'] == ([
'bookmarks', 'random permission'])
def test_is_restart_required(self):
self.addon = addon_factory(file_kw={'is_restart_required': True})
result = self.serialize()
self._test_version(
self.addon.current_version, result['current_version'])
def test_special_compatibility_cases(self):
# Test an add-on with strict compatibility enabled.
self.addon = addon_factory(file_kw={'strict_compatibility': True})
result_version = self.serialize()['current_version']
assert result_version['compatibility'] == {
'firefox': {'max': u'5.0.99', 'min': u'4.0.99'}
}
assert result_version['is_strict_compatibility_enabled'] is True
# Test an add-on with no compatibility info.
self.addon = addon_factory()
ApplicationsVersions.objects.filter(
version=self.addon.current_version).delete()
result_version = self.serialize()['current_version']
assert result_version['compatibility'] == {}
assert result_version['is_strict_compatibility_enabled'] is False
# Test an add-on with some compatibility info but that should be
# ignored because its type is in NO_COMPAT.
self.addon = addon_factory(type=amo.ADDON_SEARCH)
av_min = AppVersion.objects.get_or_create(
application=amo.THUNDERBIRD.id, version='2.0.99')[0]
av_max = AppVersion.objects.get_or_create(
application=amo.THUNDERBIRD.id, version='3.0.99')[0]
ApplicationsVersions.objects.get_or_create(
application=amo.THUNDERBIRD.id, version=self.addon.current_version,
min=av_min, max=av_max)
result_version = self.serialize()['current_version']
assert result_version['compatibility'] == {
'android': {'max': '9999', 'min': '11.0'},
'firefox': {'max': '9999', 'min': '4.0'},
'seamonkey': {'max': '9999', 'min': '2.1'},
# No thunderbird: it does not support that type, and when we return
# fake compatibility data for NO_COMPAT add-ons we do obey that.
}
assert result_version['is_strict_compatibility_enabled'] is False
def test_static_theme_preview(self):
self.addon = addon_factory(type=amo.ADDON_STATICTHEME)
# Attach some Preview instances do the add-on, they should be ignored
# since it's a static theme.
Preview.objects.create(
addon=self.addon, position=1,
caption={'en-US': u'My câption', 'fr': u'Mön tîtré'},
sizes={'thumbnail': [123, 45], 'image': [678, 910]})
result = self.serialize()
assert result['previews'] == []
first_version = self.addon.current_version
VersionPreview.objects.create(
version=first_version,
sizes={'thumbnail': [12, 34], 'image': [56, 78]})
second_version = version_factory(addon=self.addon)
current_preview = VersionPreview.objects.create(
version=second_version,
sizes={'thumbnail': [56, 78], 'image': [91, 234]})
assert self.addon.reload().current_version == second_version
result = self.serialize()
assert len(result['previews']) == 1
assert result['previews'][0]['id'] == current_preview.pk
assert result['previews'][0]['caption'] is None
assert result['previews'][0]['image_url'] == absolutify(
current_preview.image_url)
assert result['previews'][0]['thumbnail_url'] == absolutify(
current_preview.thumbnail_url)
assert result['previews'][0]['image_size'] == (
current_preview.image_size)
assert result['previews'][0]['thumbnail_size'] == (
current_preview.thumbnail_size)
self.addon.update(_current_version=None)
result = self.serialize()
assert result['current_version'] is None
assert result['previews'] == []
class TestAddonSerializerOutput(AddonSerializerOutputTestMixin, TestCase):
serializer_class = AddonSerializer
serializer_class_with_unlisted_data = AddonSerializerWithUnlistedData
def serialize(self):
self.serializer = self.serializer_class(
context={'request': self.request})
# Manually reload the add-on first to clear any cached properties.
self.addon = Addon.unfiltered.get(pk=self.addon.pk)
return self.serializer.to_representation(self.addon)
class TestESAddonSerializerOutput(AddonSerializerOutputTestMixin, ESTestCase):
serializer_class = ESAddonSerializer
serializer_class_with_unlisted_data = ESAddonSerializerWithUnlistedData
def tearDown(self):
super(TestESAddonSerializerOutput, self).tearDown()
self.empty_index('default')
self.refresh()
def search(self):
self.reindex(Addon)
view = AddonSearchView()
view.request = self.request
qs = view.get_queryset()
return qs.filter('term', id=self.addon.pk).execute()[0]
def serialize(self):
self.serializer = self.serializer_class(
context={'request': self.request})
obj = self.search()
with self.assertNumQueries(0):
result = self.serializer.to_representation(obj)
return result
def _test_author(self, author, data):
assert data == {
'id': author.pk,
'name': author.name,
'url': absolutify(author.get_url_path()),
'username': author.username,
}
def _test_version_license_and_release_notes(self, version, data):
assert 'license' not in data
assert 'release_notes' not in data
class TestVersionSerializerOutput(TestCase):
def setUp(self):
super(TestVersionSerializerOutput, self).setUp()
self.request = APIRequestFactory().get('/')
def serialize(self):
serializer = VersionSerializer(context={'request': self.request})
return serializer.to_representation(self.version)
def test_basic(self):
now = self.days_ago(0)
license = License.objects.create(
name={
'en-US': u'My License',
'fr': u'Mä Licence',
},
text={
'en-US': u'Lorem ipsum dolor sit amet, has nemore patrioqué',
},
url='http://license.example.com/'
)
addon = addon_factory(
file_kw={
'hash': 'fakehash',
'is_webextension': True,
'is_mozilla_signed_extension': True,
'platform': amo.PLATFORM_WIN.id,
'size': 42,
},
version_kw={
'license': license,
'min_app_version': '50.0',
'max_app_version': '*',
'releasenotes': {
'en-US': u'Release notes in english',
'fr': u'Notes de version en français',
},
'reviewed': now,
}
)
self.version = addon.current_version
first_file = self.version.files.latest('pk')
file_factory(
version=self.version, platform=amo.PLATFORM_MAC.id)
second_file = self.version.files.latest('pk')
# Force reload of all_files cached property.
del self.version.all_files
result = self.serialize()
assert result['id'] == self.version.pk
assert result['compatibility'] == {
'firefox': {'max': u'*', 'min': u'50.0'}
}
assert result['files']
assert len(result['files']) == 2
assert result['files'][0]['id'] == first_file.pk
assert result['files'][0]['created'] == (
first_file.created.replace(microsecond=0).isoformat() + 'Z')
assert result['files'][0]['hash'] == first_file.hash
assert result['files'][0]['is_webextension'] == (
first_file.is_webextension)
assert result['files'][0]['is_mozilla_signed_extension'] == (
first_file.is_mozilla_signed_extension)
assert result['files'][0]['platform'] == 'windows'
assert result['files'][0]['size'] == first_file.size
assert result['files'][0]['status'] == 'public'
assert result['files'][0]['url'] == first_file.get_url_path(src='')
assert result['files'][1]['id'] == second_file.pk
assert result['files'][1]['created'] == (
second_file.created.replace(microsecond=0).isoformat() + 'Z')
assert result['files'][1]['hash'] == second_file.hash
assert result['files'][1]['is_webextension'] == (
second_file.is_webextension)
assert result['files'][1]['is_mozilla_signed_extension'] == (
second_file.is_mozilla_signed_extension)
assert result['files'][1]['platform'] == 'mac'
assert result['files'][1]['size'] == second_file.size
assert result['files'][1]['status'] == 'public'
assert result['files'][1]['url'] == second_file.get_url_path(src='')
assert result['channel'] == 'listed'
assert result['edit_url'] == absolutify(addon.get_dev_url(
'versions.edit', args=[self.version.pk], prefix_only=True))
assert result['release_notes'] == {
'en-US': u'Release notes in english',
'fr': u'Notes de version en français',
}
assert result['license']
assert dict(result['license']) == {
'id': license.pk,
'name': {'en-US': u'My License', 'fr': u'Mä Licence'},
'text': {
'en-US': u'Lorem ipsum dolor sit amet, has nemore patrioqué',
},
'url': 'http://license.example.com/',
}
assert result['reviewed'] == (
now.replace(microsecond=0).isoformat() + 'Z')
assert result['url'] == absolutify(self.version.get_url_path())
def test_unlisted(self):
addon = addon_factory()
self.version = version_factory(
addon=addon, channel=amo.RELEASE_CHANNEL_UNLISTED)
result = self.serialize()
assert result['channel'] == 'unlisted'
def test_no_license(self):
addon = addon_factory()
self.version = addon.current_version
self.version.update(license=None)
result = self.serialize()
assert result['id'] == self.version.pk
assert result['license'] is None
def test_license_no_url(self):
addon = addon_factory()
self.version = addon.current_version
license = self.version.license
license.update(url=None, builtin=license.OTHER)
result = self.serialize()
assert result['id'] == self.version.pk
assert result['license']
assert result['license']['id'] == license.pk
assert result['license']['url'] == absolutify(
self.version.license_url())
license.update(builtin=1)
result = self.serialize()
# Builtin licenses with no url shouldn't get the version license url.
assert result['license']['url'] is None
def test_license_serializer_no_url_no_parent(self):
# if that does happens.
addon = addon_factory()
self.version = addon.current_version
license = self.version.license
license.update(url=None)
result = LicenseSerializer(
context={'request': self.request}).to_representation(license)
assert result['id'] == license.pk
# LicenseSerializer is unable to find the Version, so it falls back to
# None.
assert result['url'] is None
def test_builtin_license(self):
addon = addon_factory()
self.version = addon.current_version
license = self.version.license
license.update(builtin=18)
assert license._constant == LICENSES_BY_BUILTIN[18]
result = LicenseSerializer(
context={'request': self.request}).to_representation(license)
assert result['id'] == license.pk
# A request with no ?lang gets you the site default l10n in a dict to
# match how non-constant values are returned.
assert result['name'] == {
'en-US': unicode(LICENSES_BY_BUILTIN[18].name)}
accept_request = APIRequestFactory().get('/')
accept_request.LANG = 'de'
result = LicenseSerializer(
context={'request': accept_request}).to_representation(license)
# An Accept-Language should result in a different default though.
assert result['name'] == {
'de': unicode(LICENSES_BY_BUILTIN[18].name)}
# But a requested lang returns a flat string
lang_request = APIRequestFactory().get('/?lang=fr')
result = LicenseSerializer(
context={'request': lang_request}).to_representation(license)
assert result['name'] == unicode(LICENSES_BY_BUILTIN[18].name)
def test_file_webext_permissions(self):
self.version = addon_factory().current_version
result = self.serialize()
# No permissions.
assert result['files'][0]['permissions'] == []
self.version = addon_factory(
file_kw={'is_webextension': True}).current_version
permissions = ['dangerdanger', 'high', 'voltage']
WebextPermission.objects.create(
permissions=permissions, file=self.version.all_files[0])
result = self.serialize()
assert result['files'][0]['permissions'] == permissions
class TestSimpleVersionSerializerOutput(TestCase):
def setUp(self):
self.request = APIRequestFactory().get('/')
def serialize(self):
serializer = SimpleVersionSerializer(context={'request': self.request})
return serializer.to_representation(self.version)
def test_license_included_without_text(self):
now = self.days_ago(0)
license = License.objects.create(
name={
'en-US': u'My License',
'fr': u'Mä Licence',
},
text={
'en-US': u'Lorem ipsum dolor sit amet, has nemore patrioqué',
},
url='http://license.example.com/'
)
addon = addon_factory(
version_kw={
'license': license,
'reviewed': now,
}
)
self.version = addon.current_version
result = self.serialize()
assert result['id'] == self.version.pk
assert result['license'] is not None
assert result['license']['id'] == license.pk
assert result['license']['name']['en-US'] == 'My License'
assert result['license']['name']['fr'] == u'Mä Licence'
assert result['license']['url'] == 'http://license.example.com/'
assert 'text' not in result['license']
class TestLanguageToolsSerializerOutput(TestCase):
def setUp(self):
self.request = APIRequestFactory().get('/')
def serialize(self):
serializer = LanguageToolsSerializer(context={'request': self.request})
return serializer.to_representation(self.addon)
def test_basic(self):
self.addon = addon_factory(
type=amo.ADDON_LPAPP, target_locale='fr',
locale_disambiguation=u'lolé')
result = self.serialize()
assert result['id'] == self.addon.pk
assert result['default_locale'] == self.addon.default_locale
assert result['guid'] == self.addon.guid
assert result['locale_disambiguation'] == (
self.addon.locale_disambiguation)
assert result['name'] == {'en-US': self.addon.name}
assert result['slug'] == self.addon.slug
assert result['target_locale'] == self.addon.target_locale
assert result['type'] == 'language'
assert result['url'] == absolutify(self.addon.get_url_path())
assert 'current_compatible_version' not in result
def test_basic_dict(self):
self.addon = addon_factory(type=amo.ADDON_DICT)
result = self.serialize()
assert result['type'] == 'dictionary'
assert 'current_compatible_version' not in result
def test_current_compatible_version(self):
# Set a filename to make sure the file actually exists.
# file_factory (used via addon_factory) copies files that exists
# as fixtures in src/olympia/files/fixtures/files to their rightful
# place. We need that to test the localepicker properly.
file_kw = {'filename': 'langpack-localepicker.xpi'}
self.addon = addon_factory(type=amo.ADDON_LPAPP, file_kw=file_kw)
# compatible_versions is set by the view through prefetch, it
# looks like a list.
self.addon.compatible_versions = [self.addon.current_version]
self.addon.compatible_versions[0].update(created=self.days_ago(1))
# Create a new current version, just to prove that
# current_compatible_version does not use that.
version_factory(addon=self.addon, file_kw=file_kw)
self.addon.reload
assert (
self.addon.compatible_versions[0] !=
self.addon.current_version)
self.request = APIRequestFactory().get('/?app=firefox&appversion=57.0')
result = self.serialize()
assert 'current_compatible_version' in result
assert result['current_compatible_version'] is not None
assert set(result['current_compatible_version'].keys()) == set(
['id', 'files', 'reviewed', 'version'])
self.addon.compatible_versions = None
result = self.serialize()
assert 'current_compatible_version' in result
assert result['current_compatible_version'] is None
self.addon.compatible_versions = []
result = self.serialize()
assert 'current_compatible_version' in result
assert result['current_compatible_version'] is None
class TestESAddonAutoCompleteSerializer(ESTestCase):
def setUp(self):
super(TestESAddonAutoCompleteSerializer, self).setUp()
self.request = APIRequestFactory().get('/')
def tearDown(self):
super(TestESAddonAutoCompleteSerializer, self).tearDown()
self.empty_index('default')
self.refresh()
def search(self):
self.reindex(Addon)
view = AddonAutoCompleteSearchView()
view.request = self.request
qs = view.get_queryset()
return qs.filter('term', id=self.addon.pk).execute()[0]
def serialize(self):
self.serializer = ESAddonAutoCompleteSerializer(
context={'request': self.request})
obj = self.search()
with self.assertNumQueries(0):
result = self.serializer.to_representation(obj)
return result
def test_basic(self):
self.addon = addon_factory()
result = self.serialize()
assert set(result.keys()) == set(['id', 'name', 'icon_url', u'url'])
assert result['id'] == self.addon.pk
assert result['name'] == {'en-US': unicode(self.addon.name)}
assert result['icon_url'] == absolutify(self.addon.get_icon_url(64))
assert result['url'] == absolutify(self.addon.get_url_path())
def test_translations(self):
translated_name = {
'en-US': u'My Addôn name in english',
'fr': u'Nom de mon Addôn',
}
self.addon = addon_factory()
self.addon.name = translated_name
self.addon.save()
result = self.serialize()
assert result['name'] == translated_name
# Try a single translation. The locale activation is normally done by
# LocaleAndAppURLMiddleware, but since we're directly calling the
self.request = APIRequestFactory().get('/', {'lang': 'fr'})
with override('fr'):
result = self.serialize()
assert result['name'] == translated_name['fr']
def test_icon_url_with_persona_id(self):
self.addon = addon_factory(type=amo.ADDON_PERSONA)
persona = self.addon.persona
persona.persona_id = 42
persona.header = u'myheader.jpg'
persona.footer = u'myfooter.jpg'
persona.accentcolor = u'336699'
persona.textcolor = u'f0f0f0'
persona.author = u'Me-me-me-Myself'
persona.display_username = u'my-username'
persona.save()
assert not persona.is_new()
result = self.serialize()
assert set(result.keys()) == set(['id', 'name', 'icon_url', u'url'])
assert result['icon_url'] == absolutify(self.addon.get_icon_url(64))
def test_icon_url_persona_with_no_persona_id(self):
self.addon = addon_factory(
name=u'My Personâ',
description=u'<script>alert(42)</script>My Personä description',
type=amo.ADDON_PERSONA)
persona = self.addon.persona
persona.persona_id = 0
persona.header = u'myheader.png'
persona.footer = u'myfooter.png'
persona.accentcolor = u'336699'
persona.textcolor = u'f0f0f0'
persona.author = u'Me-me-me-Myself'
persona.display_username = u'my-username'
persona.save()
assert persona.is_new()
result = self.serialize()
assert set(result.keys()) == set(['id', 'name', 'icon_url', u'url'])
assert result['icon_url'] == absolutify(self.addon.get_icon_url(64))
class TestAddonDeveloperSerializer(TestBaseUserSerializer):
serializer_class = AddonDeveloperSerializer
def test_picture(self):
serialized = self.serialize()
assert serialized['picture_url'] is None
self.user.update(picture_type='image/jpeg')
serialized = self.serialize()
assert serialized['picture_url'] == absolutify(self.user.picture_url)
assert '%s.png' % self.user.id in serialized['picture_url']
class TestReplacementAddonSerializer(TestCase):
def serialize(self, replacement):
serializer = ReplacementAddonSerializer()
return serializer.to_representation(replacement)
def test_valid_addon_path(self):
addon = addon_factory(slug=u'stuff', guid=u'newstuff@mozilla')
rep = ReplacementAddon.objects.create(
guid='legacy@mozilla', path=u'/addon/stuff/')
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == [u'newstuff@mozilla']
rep.update(path=u'/addon/%s/' % addon.id)
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == [u'newstuff@mozilla']
def test_invalid_addons(self):
rep = ReplacementAddon.objects.create(
guid='legacy@mozilla', path=u'/addon/stuff/')
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == []
# Add the add-on but make it not public
addon = addon_factory(slug=u'stuff', guid=u'newstuff@mozilla',
status=amo.STATUS_NULL)
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == []
# Double check that the test is good and it will work once public.
addon.update(status=amo.STATUS_PUBLIC)
result = self.serialize(rep)
assert result['replacement'] == [u'newstuff@mozilla']
# But urls aren't resolved - and don't break everything
rep.update(path=absolutify(addon.get_url_path()))
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == []
def test_valid_collection_path(self):
addon = addon_factory(slug=u'stuff', guid=u'newstuff@mozilla')
me = user_factory(username=u'me')
collection = collection_factory(slug=u'bag', author=me)
collection.add_addon(addon)
rep = ReplacementAddon.objects.create(
guid=u'legacy@mozilla', path=u'/collections/me/bag/')
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == [u'newstuff@mozilla']
# Edge case, but should accept numeric user IDs too
rep.update(path=u'/collections/%s/bag/' % me.id)
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == [u'newstuff@mozilla']
def test_invalid_collections(self):
rep = ReplacementAddon.objects.create(
guid=u'legacy@mozilla', path=u'/collections/me/bag/')
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == []
# Create the user but not the collection.
me = user_factory(username=u'me')
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == []
# Create the collection but make the add-on invalid.
addon = addon_factory(slug=u'stuff', guid=u'newstuff@mozilla',
status=amo.STATUS_NULL)
collection = collection_factory(slug=u'bag', author=me)
collection.add_addon(addon)
result = self.serialize(rep)
assert result['guid'] == u'legacy@mozilla'
assert result['replacement'] == []
# Double check that the test is good and it will work once public.
addon.update(status=amo.STATUS_PUBLIC)
result = self.serialize(rep)
assert result['replacement'] == [u'newstuff@mozilla']
class TestCompatOverrideSerializer(TestCase):
def serialize(self, override):
serializer = CompatOverrideSerializer()
return serializer.to_representation(override)
def test_linked_addon(self):
addon = addon_factory(guid='extrabad@thing')
override = CompatOverride.objects.create(
name='override with addon', guid=addon.guid, addon=addon)
CompatOverrideRange.objects.create(
compat=override, app=amo.FIREFOX.id)
result = self.serialize(override)
assert ['addon_guid', 'addon_id', 'name', 'version_ranges'] == sorted(
result.keys())
assert result['addon_guid'] == 'extrabad@thing'
assert result['addon_id'] == addon.id
assert result['name'] == 'override with addon'
version_range = {
'addon_min_version': '0',
'addon_max_version': '*',
'applications': [{
'name': amo.FIREFOX.pretty,
'id': amo.FIREFOX.id,
'min_version': '0',
'max_version': '*',
'guid': amo.FIREFOX.guid
}]
}
assert result['version_ranges'] == [version_range]
def test_no_addon(self):
override = CompatOverride.objects.create(
name='override', guid='foo@baa')
CompatOverrideRange.objects.create(
compat=override, app=amo.FIREFOX.id)
result = self.serialize(override)
assert ['addon_guid', 'addon_id', 'name', 'version_ranges'] == sorted(
result.keys())
assert result['addon_guid'] == 'foo@baa'
assert result['addon_id'] is None
assert result['name'] == 'override'
version_range = {
'addon_min_version': '0',
'addon_max_version': '*',
'applications': [{
'name': amo.FIREFOX.pretty,
'id': amo.FIREFOX.id,
'min_version': '0',
'max_version': '*',
'guid': amo.FIREFOX.guid
}]
}
assert result['version_ranges'] == [version_range]
def test_multiple_ranges(self):
override = CompatOverride.objects.create(
name='override with multiple ranges', guid='foo@baa')
CompatOverrideRange.objects.create(
compat=override, app=amo.FIREFOX.id, min_version='23.4',
max_version='56.7.*')
CompatOverrideRange.objects.create(
compat=override, app=amo.THUNDERBIRD.id, min_app_version='1.35',
max_app_version='90.*')
result = self.serialize(override)
assert ['addon_guid', 'addon_id', 'name', 'version_ranges'] == sorted(
result.keys())
assert result['addon_guid'] == 'foo@baa'
assert result['addon_id'] is None
assert result['name'] == 'override with multiple ranges'
assert len(result['version_ranges']) == 2
version_range_firefox = {
'addon_min_version': '23.4',
'addon_max_version': '56.7.*',
'applications': [{
'name': amo.FIREFOX.pretty,
'id': amo.FIREFOX.id,
'min_version': '0',
'max_version': '*',
'guid': amo.FIREFOX.guid
}]
}
assert version_range_firefox in result['version_ranges']
version_range_thunderbird = {
'addon_min_version': '0',
'addon_max_version': '*',
'applications': [{
'name': amo.THUNDERBIRD.pretty,
'id': amo.THUNDERBIRD.id,
'min_version': '1.35',
'max_version': '90.*',
'guid': amo.THUNDERBIRD.guid
}]
}
assert version_range_thunderbird in result['version_ranges']
def test_collapsed_ranges(self):
override = CompatOverride.objects.create(
name='override with single version range', guid='foo@baa')
CompatOverrideRange.objects.create(
compat=override, app=amo.FIREFOX.id,
min_version='23.4', max_version='56.7.*')
CompatOverrideRange.objects.create(
compat=override, app=amo.THUNDERBIRD.id,
min_version='23.4', max_version='56.7.*',
min_app_version='1.35', max_app_version='90.*')
result = self.serialize(override)
assert ['addon_guid', 'addon_id', 'name', 'version_ranges'] == sorted(
result.keys())
assert result['addon_guid'] == 'foo@baa'
assert result['addon_id'] is None
assert result['name'] == 'override with single version range'
assert len(result['version_ranges']) == 1
assert result['version_ranges'][0]['addon_min_version'] == '23.4'
assert result['version_ranges'][0]['addon_max_version'] == '56.7.*'
applications = result['version_ranges'][0]['applications']
assert len(applications) == 2
application_firefox = {
'name': amo.FIREFOX.pretty,
'id': amo.FIREFOX.id,
'min_version': '0',
'max_version': '*',
'guid': amo.FIREFOX.guid
}
assert application_firefox in applications
application_thunderbird = {
'name': amo.THUNDERBIRD.pretty,
'id': amo.THUNDERBIRD.id,
'min_version': '1.35',
'max_version': '90.*',
'guid': amo.THUNDERBIRD.guid
}
assert application_thunderbird in applications
| true | true |
f7f55aa87cdbd866f9fd82c3b26476625521f82b | 735 | py | Python | pr1081m/smallest_subsequence.py | l33tdaima/l33tdaima | 0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90 | [
"MIT"
] | 1 | 2020-02-20T12:04:46.000Z | 2020-02-20T12:04:46.000Z | pr1081m/smallest_subsequence.py | l33tdaima/l33tdaima | 0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90 | [
"MIT"
] | null | null | null | pr1081m/smallest_subsequence.py | l33tdaima/l33tdaima | 0a7a9573dc6b79e22dcb54357493ebaaf5e0aa90 | [
"MIT"
] | null | null | null | from collections import Counter
class Solution:
def smallestSubsequence(self, s: str) -> str:
if s == "":
return ""
counter = Counter(s)
pos = 0
for i, c in enumerate(s):
if c < s[pos]:
pos = i
counter[c] -= 1
if counter[c] == 0:
break
return s[pos] + self.smallestSubsequence(s[pos + 1 :].replace(s[pos], ""))
# TESTS
tests = [
("a", "a"),
("bca", "bca"),
("abacb", "abc"),
("bcabc", "abc"),
("cbacdcbc", "acdb"),
]
for s, expected in tests:
sol = Solution()
actual = sol.smallestSubsequence(s)
print("Removing duplicates in", s, "->", actual)
assert actual == expected
| 22.96875 | 82 | 0.491156 | from collections import Counter
class Solution:
def smallestSubsequence(self, s: str) -> str:
if s == "":
return ""
counter = Counter(s)
pos = 0
for i, c in enumerate(s):
if c < s[pos]:
pos = i
counter[c] -= 1
if counter[c] == 0:
break
return s[pos] + self.smallestSubsequence(s[pos + 1 :].replace(s[pos], ""))
tests = [
("a", "a"),
("bca", "bca"),
("abacb", "abc"),
("bcabc", "abc"),
("cbacdcbc", "acdb"),
]
for s, expected in tests:
sol = Solution()
actual = sol.smallestSubsequence(s)
print("Removing duplicates in", s, "->", actual)
assert actual == expected
| true | true |
f7f55c219086eb02779b767c8218ad18348f5b41 | 1,239 | py | Python | 2020/day07-handy-haversacks/bags.py | rajitbanerjee/advent-of-code | 9eb68d59a593433b972af7002893ff3c631b34f9 | [
"CC0-1.0"
] | null | null | null | 2020/day07-handy-haversacks/bags.py | rajitbanerjee/advent-of-code | 9eb68d59a593433b972af7002893ff3c631b34f9 | [
"CC0-1.0"
] | null | null | null | 2020/day07-handy-haversacks/bags.py | rajitbanerjee/advent-of-code | 9eb68d59a593433b972af7002893ff3c631b34f9 | [
"CC0-1.0"
] | null | null | null | #!/usr/bin/env python
def parseBags(lines: list) -> dict:
lines = [l.split(' contain ') for l in lines]
bags = {}
for line in lines:
outer = line[0][:line[0].index('bags') - 1]
inner = {}
if 'no other' not in line[1]:
for each in line[1].split(','):
each = each.strip()
first_space = each.index(' ')
last_space = each.index(' bag')
colour = each[first_space + 1:last_space]
num = int(each[:first_space])
inner[colour] = num
bags[outer] = inner
return bags
def countShiny(bags: dict) -> int:
return sum([int(containsShiny(bags, colour)) for colour in bags])
def containsShiny(bags: dict, colour: str) -> bool:
for each in bags[colour]:
if each == 'shiny gold' or containsShiny(bags, each):
return True
return False
def countInside(bags: dict, colour: str) -> int:
return sum([v + v * countInside(bags, k) for k, v in bags[colour].items()])
if __name__ == '__main__':
with open('day7.in') as f:
bags = parseBags(f.readlines())
print(f"Part 1 = {countShiny(bags)}")
print(f"Part 2 = {countInside(bags, 'shiny gold')}")
| 28.813953 | 79 | 0.557708 |
def parseBags(lines: list) -> dict:
lines = [l.split(' contain ') for l in lines]
bags = {}
for line in lines:
outer = line[0][:line[0].index('bags') - 1]
inner = {}
if 'no other' not in line[1]:
for each in line[1].split(','):
each = each.strip()
first_space = each.index(' ')
last_space = each.index(' bag')
colour = each[first_space + 1:last_space]
num = int(each[:first_space])
inner[colour] = num
bags[outer] = inner
return bags
def countShiny(bags: dict) -> int:
return sum([int(containsShiny(bags, colour)) for colour in bags])
def containsShiny(bags: dict, colour: str) -> bool:
for each in bags[colour]:
if each == 'shiny gold' or containsShiny(bags, each):
return True
return False
def countInside(bags: dict, colour: str) -> int:
return sum([v + v * countInside(bags, k) for k, v in bags[colour].items()])
if __name__ == '__main__':
with open('day7.in') as f:
bags = parseBags(f.readlines())
print(f"Part 1 = {countShiny(bags)}")
print(f"Part 2 = {countInside(bags, 'shiny gold')}")
| true | true |
f7f55d2d0dcbcebe5e5202565cd95ffe342d1afd | 198 | py | Python | designkit_test/test_example.py | DavidErzmann/designkit_test | 4756c0812c1a881ae506845dcc8b5ef2eee62468 | [
"Apache-2.0"
] | null | null | null | designkit_test/test_example.py | DavidErzmann/designkit_test | 4756c0812c1a881ae506845dcc8b5ef2eee62468 | [
"Apache-2.0"
] | null | null | null | designkit_test/test_example.py | DavidErzmann/designkit_test | 4756c0812c1a881ae506845dcc8b5ef2eee62468 | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED! DO NOT EDIT! File to edit: test_example.ipynb (unless otherwise specified).
__all__ = ['say_hello']
# Cell
def say_hello(to):
"Say hello to somebody"
return f'Hello {to}!' | 24.75 | 92 | 0.70202 |
__all__ = ['say_hello']
def say_hello(to):
return f'Hello {to}!' | true | true |
f7f55d9333a27cbbdb4479ed9582b215f8d298a0 | 1,857 | py | Python | setup.py | linewalks/flask-sqlacodegen | 4429732fca6e2a78f78c6946dcebfe4554c47ef1 | [
"MIT"
] | 1 | 2021-01-24T12:12:10.000Z | 2021-01-24T12:12:10.000Z | setup.py | linewalks/flask-sqlacodegen | 4429732fca6e2a78f78c6946dcebfe4554c47ef1 | [
"MIT"
] | null | null | null | setup.py | linewalks/flask-sqlacodegen | 4429732fca6e2a78f78c6946dcebfe4554c47ef1 | [
"MIT"
] | 1 | 2019-11-07T20:07:45.000Z | 2019-11-07T20:07:45.000Z | import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sqlacodegen
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
extra_requirements = ()
if sys.version_info < (2, 7):
extra_requirements = ('argparse',)
setup(
name='flask-sqlacodegen',
description='Automatic model code generator for SQLAlchemy with Flask support',
long_description=open('README.rst').read(),
version=sqlacodegen.version,
author='Kamil Sindi',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Topic :: Database',
'Topic :: Software Development :: Code Generators',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
keywords=['sqlalchemy', 'sqlacodegen', 'flask'],
license='MIT',
packages=find_packages(exclude=['tests']),
install_requires=(
'SQLAlchemy >= 0.6.0',
'inflect >= 0.2.0'
) + extra_requirements,
tests_require=['pytest', 'pytest-pep8'],
cmdclass={'test': PyTest},
zip_safe=False,
entry_points={
'console_scripts': [
'flask-sqlacodegen=sqlacodegen.main:main'
]
}
)
| 29.47619 | 83 | 0.620894 | import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import sqlacodegen
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
extra_requirements = ()
if sys.version_info < (2, 7):
extra_requirements = ('argparse',)
setup(
name='flask-sqlacodegen',
description='Automatic model code generator for SQLAlchemy with Flask support',
long_description=open('README.rst').read(),
version=sqlacodegen.version,
author='Kamil Sindi',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Topic :: Database',
'Topic :: Software Development :: Code Generators',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
],
keywords=['sqlalchemy', 'sqlacodegen', 'flask'],
license='MIT',
packages=find_packages(exclude=['tests']),
install_requires=(
'SQLAlchemy >= 0.6.0',
'inflect >= 0.2.0'
) + extra_requirements,
tests_require=['pytest', 'pytest-pep8'],
cmdclass={'test': PyTest},
zip_safe=False,
entry_points={
'console_scripts': [
'flask-sqlacodegen=sqlacodegen.main:main'
]
}
)
| true | true |
f7f55e00c3e17e76b13448703d6dac67cae6ca4f | 23 | py | Python | trafficgenerator/__init__.py | p-sherratt/PyTrafficGenerator | 61ddd0465b9ecdd72134e18668756d2a6ccbf646 | [
"Apache-2.0"
] | null | null | null | trafficgenerator/__init__.py | p-sherratt/PyTrafficGenerator | 61ddd0465b9ecdd72134e18668756d2a6ccbf646 | [
"Apache-2.0"
] | null | null | null | trafficgenerator/__init__.py | p-sherratt/PyTrafficGenerator | 61ddd0465b9ecdd72134e18668756d2a6ccbf646 | [
"Apache-2.0"
] | null | null | null |
__version__ = '1.6.3'
| 7.666667 | 21 | 0.608696 |
__version__ = '1.6.3'
| true | true |
f7f55e99303f127d316ea6a2fd21c514ee669cd9 | 215 | py | Python | food_reference_listing/users/tests/test_models.py | bfssi-forest-dussault/food_reference_listing | 85372a81a9201dda02797ab0c11b1bd710f9b70d | [
"MIT"
] | null | null | null | food_reference_listing/users/tests/test_models.py | bfssi-forest-dussault/food_reference_listing | 85372a81a9201dda02797ab0c11b1bd710f9b70d | [
"MIT"
] | null | null | null | food_reference_listing/users/tests/test_models.py | bfssi-forest-dussault/food_reference_listing | 85372a81a9201dda02797ab0c11b1bd710f9b70d | [
"MIT"
] | null | null | null | import pytest
from food_reference_listing.users.models import User
pytestmark = pytest.mark.django_db
def test_user_get_absolute_url(user: User):
assert user.get_absolute_url() == f"/users/{user.username}/"
| 21.5 | 64 | 0.786047 | import pytest
from food_reference_listing.users.models import User
pytestmark = pytest.mark.django_db
def test_user_get_absolute_url(user: User):
assert user.get_absolute_url() == f"/users/{user.username}/"
| true | true |
f7f55eeb2efdd9d503782a38ce90a064128fcdb5 | 7,403 | py | Python | testes/testes_fase.py | lgcarvalhoDEV/pythonbirds | 580d95a0ecb5dcc625c53b13196edf9e1803344c | [
"MIT"
] | null | null | null | testes/testes_fase.py | lgcarvalhoDEV/pythonbirds | 580d95a0ecb5dcc625c53b13196edf9e1803344c | [
"MIT"
] | null | null | null | testes/testes_fase.py | lgcarvalhoDEV/pythonbirds | 580d95a0ecb5dcc625c53b13196edf9e1803344c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import sys
from os import path
from unittest.case import TestCase
project_dir = path.dirname(__file__)
project_dir = path.join('..')
sys.path.append(project_dir)
from placa_grafica_tkinter import rodar_fase
project_dir = os.path.join(os.path.dirname(__file__), '..')
project_dir = os.path.normpath(project_dir)
sys.path.append(project_dir)
from atores import (Obstaculo, Porco, PassaroVermelho, PassaroAmarelo,
DESTRUIDO, ATIVO, DuploLancamentoExcecao)
from fase import Fase, Ponto, EM_ANDAMENTO, VITORIA, DERROTA
class AtorFake:
def __init__(self, x=0, y=0):
self.y = y
self.x = x
self.status = ATIVO
self.colidir_executado = False
self.calcular_posicao_executado = False
self.intervalo_colisao = None
def calcular_posicao(self, tempo):
self.calcular_posicao_executado = True
def colidir(self, outro_ator, intervalo):
self.colidir_executado = outro_ator.colidir_executado = True
self.intervalo_colisao = outro_ator.intervalo_colisao = intervalo
def caracter(self):
return ' '
class ObstaculoFake(AtorFake):
pass
class PorcoFake(AtorFake):
pass
class PassaroFake(AtorFake):
def __init__(self, x=0, y=0):
super().__init__(x, y)
self._lancado = False
self.colidir_com_chao_executado = False
def foi_lancado(self):
return self._lancado
def lancar(self, angulo, tempo):
if self._lancado:
raise DuploLancamentoExcecao()
self._lancado = True
def colidir_com_chao(self):
self.colidir_com_chao_executado = True
class FaseTestes(TestCase):
def teste_adicionar_obstaculo(self):
fase = Fase()
self.assertListEqual([], fase._obstaculos)
obstaculo = ObstaculoFake()
fase.adicionar_obstaculo(obstaculo)
self.assertListEqual([obstaculo], fase._obstaculos)
obstaculo1, obstaculo2 = ObstaculoFake(), ObstaculoFake()
fase.adicionar_obstaculo(obstaculo1, obstaculo2)
self.assertListEqual([obstaculo, obstaculo1, obstaculo2],
fase._obstaculos)
def teste_adicionar_porco(self):
fase = Fase()
self.assertListEqual([], fase._porcos)
porco = PorcoFake()
fase.adicionar_porco(porco)
self.assertListEqual([porco], fase._porcos)
porco1, porco2 = PorcoFake(), PorcoFake()
fase.adicionar_porco(porco1, porco2)
self.assertListEqual([porco, porco1, porco2], fase._porcos)
def teste_adicionar_passaro(self):
fase = Fase()
self.assertListEqual([], fase._passaros)
passaro = PassaroFake()
fase.adicionar_passaro(passaro)
self.assertListEqual([passaro], fase._passaros)
passaro1, passaro2 = PassaroFake(), PassaroFake()
fase.adicionar_passaro(passaro1, passaro2)
self.assertListEqual([passaro, passaro1, passaro2], fase._passaros)
def teste_acabou_sem_porcos(self):
fase = Fase()
self.assertEqual(VITORIA, fase.status())
def teste_acabou_com_porcos_e_passaros(self):
fase = Fase()
porcos = [PorcoFake(1, 1) for _ in range(2)] # criando 2 porcos - A variável da lista não é utilizada, portanto se utiliza o underline(_).
passaros = [PassaroFake(1, 1) for _ in range(2)] # criando 2 pássaros
fase.adicionar_porco(*porcos)
fase.adicionar_passaro(*passaros)
self.assertEqual(EM_ANDAMENTO, fase.status())
for ator in porcos + passaros:
ator.status = DESTRUIDO
self.assertEqual(VITORIA, fase.status())
fase.adicionar_obstaculo(Obstaculo())
self.assertEqual(VITORIA, fase.status(),
'Obstáculo não interfere no fim do jogo')
fase.adicionar_porco(PorcoFake())
self.assertEqual(DERROTA, fase.status(),
'Com Porco ativo e sem pássaro para lançar, o jogo '
'deveria acabar')
fase.adicionar_passaro(PassaroFake())
self.assertEqual(EM_ANDAMENTO, fase.status(),
'Com Porco ativo e com pássaro para lançar, o jogo '
'não deveria acabar')
def teste_status(self):
fase = Fase()
porcos = [PorcoFake(1, 1) for _ in range(2)]
passaros = [PassaroFake(1, 1) for _ in range(2)]
fase.adicionar_porco(*porcos)
fase.adicionar_passaro(*passaros)
self.assertEqual(EM_ANDAMENTO, fase.status())
for ator in porcos + passaros:
ator.status = DESTRUIDO
self.assertEqual(VITORIA, fase.status(),
'Sem porcos ativos o jogo deveria terminar com '
'vitória')
fase.adicionar_obstaculo(ObstaculoFake())
self.assertEqual(VITORIA, fase.status(),
'Obstáculo não interfere para definir vitória')
porco = PorcoFake()
fase.adicionar_porco(porco)
self.assertEqual(DERROTA, fase.status(),
'Com Porco ativo e sem pássaro para lançar, o jogo '
'deveria acabar em derrota')
fase.adicionar_passaro(PassaroFake())
self.assertEqual(EM_ANDAMENTO, fase.status(),
'Com Porco ativo e com pássaro para lançar, o jogo '
'não deveria acabar')
porco.status = DESTRUIDO
self.assertEqual(VITORIA, fase.status(),
'Sem porco ativo, o jogo deveria acabar com vitória')
def teste_lancar_passaro_sem_erro_quando_nao_existe_passaro(self):
passaros = [PassaroFake(1, 1) for _ in range(2)]
fase = Fase()
fase.adicionar_passaro(*passaros)
self.assertFalse(passaros[0].foi_lancado())
self.assertFalse(passaros[1].foi_lancado())
fase.lancar(90, 1)
fase.lancar(45, 3)
fase.lancar(31,
5) # testando que lançar passaros depios de todos
# lançados não causa erro
self.assertTrue(passaros[0].foi_lancado())
self.assertTrue(passaros[1].foi_lancado())
def teste_intervalo_de_colisao_padrao(self):
'''
Método que testa se o intervalo de colisão da Fase é repassado aos
atores. Padrão de intervalo é 1
'''
fase = Fase()
passaro = PassaroFake(1, 1)
fase.adicionar_passaro(passaro)
porco = PorcoFake(2, 2)
fase.adicionar_porco(porco)
fase.calcular_pontos(0)
self.assertTrue(passaro.colidir_executado)
self.assertTrue(porco.colidir_executado)
self.assertTrue(passaro.calcular_posicao_executado)
self.assertTrue(passaro.colidir_com_chao_executado)
self.assertEqual(1, passaro.intervalo_colisao)
self.assertEqual(1, porco.intervalo_colisao)
def teste_intervalo_de_colisao_nao_padrao(self):
'''
Método que testa se o intervalo de colisão da Fase é repassado aos
atores. valor testado: 31
'''
fase = Fase(30)
passaro = PassaroFake(1, 1)
fase.adicionar_passaro(passaro)
porco = PorcoFake(31, 31)
fase.adicionar_porco(porco)
fase.calcular_pontos(0)
self.assertEqual(30, passaro.intervalo_colisao)
self.assertEqual(30, porco.intervalo_colisao)
| 34.755869 | 147 | 0.635688 |
import os
import sys
from os import path
from unittest.case import TestCase
project_dir = path.dirname(__file__)
project_dir = path.join('..')
sys.path.append(project_dir)
from placa_grafica_tkinter import rodar_fase
project_dir = os.path.join(os.path.dirname(__file__), '..')
project_dir = os.path.normpath(project_dir)
sys.path.append(project_dir)
from atores import (Obstaculo, Porco, PassaroVermelho, PassaroAmarelo,
DESTRUIDO, ATIVO, DuploLancamentoExcecao)
from fase import Fase, Ponto, EM_ANDAMENTO, VITORIA, DERROTA
class AtorFake:
def __init__(self, x=0, y=0):
self.y = y
self.x = x
self.status = ATIVO
self.colidir_executado = False
self.calcular_posicao_executado = False
self.intervalo_colisao = None
def calcular_posicao(self, tempo):
self.calcular_posicao_executado = True
def colidir(self, outro_ator, intervalo):
self.colidir_executado = outro_ator.colidir_executado = True
self.intervalo_colisao = outro_ator.intervalo_colisao = intervalo
def caracter(self):
return ' '
class ObstaculoFake(AtorFake):
pass
class PorcoFake(AtorFake):
pass
class PassaroFake(AtorFake):
def __init__(self, x=0, y=0):
super().__init__(x, y)
self._lancado = False
self.colidir_com_chao_executado = False
def foi_lancado(self):
return self._lancado
def lancar(self, angulo, tempo):
if self._lancado:
raise DuploLancamentoExcecao()
self._lancado = True
def colidir_com_chao(self):
self.colidir_com_chao_executado = True
class FaseTestes(TestCase):
def teste_adicionar_obstaculo(self):
fase = Fase()
self.assertListEqual([], fase._obstaculos)
obstaculo = ObstaculoFake()
fase.adicionar_obstaculo(obstaculo)
self.assertListEqual([obstaculo], fase._obstaculos)
obstaculo1, obstaculo2 = ObstaculoFake(), ObstaculoFake()
fase.adicionar_obstaculo(obstaculo1, obstaculo2)
self.assertListEqual([obstaculo, obstaculo1, obstaculo2],
fase._obstaculos)
def teste_adicionar_porco(self):
fase = Fase()
self.assertListEqual([], fase._porcos)
porco = PorcoFake()
fase.adicionar_porco(porco)
self.assertListEqual([porco], fase._porcos)
porco1, porco2 = PorcoFake(), PorcoFake()
fase.adicionar_porco(porco1, porco2)
self.assertListEqual([porco, porco1, porco2], fase._porcos)
def teste_adicionar_passaro(self):
fase = Fase()
self.assertListEqual([], fase._passaros)
passaro = PassaroFake()
fase.adicionar_passaro(passaro)
self.assertListEqual([passaro], fase._passaros)
passaro1, passaro2 = PassaroFake(), PassaroFake()
fase.adicionar_passaro(passaro1, passaro2)
self.assertListEqual([passaro, passaro1, passaro2], fase._passaros)
def teste_acabou_sem_porcos(self):
fase = Fase()
self.assertEqual(VITORIA, fase.status())
def teste_acabou_com_porcos_e_passaros(self):
fase = Fase()
porcos = [PorcoFake(1, 1) for _ in range(2)]
passaros = [PassaroFake(1, 1) for _ in range(2)]
fase.adicionar_porco(*porcos)
fase.adicionar_passaro(*passaros)
self.assertEqual(EM_ANDAMENTO, fase.status())
for ator in porcos + passaros:
ator.status = DESTRUIDO
self.assertEqual(VITORIA, fase.status())
fase.adicionar_obstaculo(Obstaculo())
self.assertEqual(VITORIA, fase.status(),
'Obstáculo não interfere no fim do jogo')
fase.adicionar_porco(PorcoFake())
self.assertEqual(DERROTA, fase.status(),
'Com Porco ativo e sem pássaro para lançar, o jogo '
'deveria acabar')
fase.adicionar_passaro(PassaroFake())
self.assertEqual(EM_ANDAMENTO, fase.status(),
'Com Porco ativo e com pássaro para lançar, o jogo '
'não deveria acabar')
def teste_status(self):
fase = Fase()
porcos = [PorcoFake(1, 1) for _ in range(2)]
passaros = [PassaroFake(1, 1) for _ in range(2)]
fase.adicionar_porco(*porcos)
fase.adicionar_passaro(*passaros)
self.assertEqual(EM_ANDAMENTO, fase.status())
for ator in porcos + passaros:
ator.status = DESTRUIDO
self.assertEqual(VITORIA, fase.status(),
'Sem porcos ativos o jogo deveria terminar com '
'vitória')
fase.adicionar_obstaculo(ObstaculoFake())
self.assertEqual(VITORIA, fase.status(),
'Obstáculo não interfere para definir vitória')
porco = PorcoFake()
fase.adicionar_porco(porco)
self.assertEqual(DERROTA, fase.status(),
'Com Porco ativo e sem pássaro para lançar, o jogo '
'deveria acabar em derrota')
fase.adicionar_passaro(PassaroFake())
self.assertEqual(EM_ANDAMENTO, fase.status(),
'Com Porco ativo e com pássaro para lançar, o jogo '
'não deveria acabar')
porco.status = DESTRUIDO
self.assertEqual(VITORIA, fase.status(),
'Sem porco ativo, o jogo deveria acabar com vitória')
def teste_lancar_passaro_sem_erro_quando_nao_existe_passaro(self):
passaros = [PassaroFake(1, 1) for _ in range(2)]
fase = Fase()
fase.adicionar_passaro(*passaros)
self.assertFalse(passaros[0].foi_lancado())
self.assertFalse(passaros[1].foi_lancado())
fase.lancar(90, 1)
fase.lancar(45, 3)
fase.lancar(31,
5)
self.assertTrue(passaros[0].foi_lancado())
self.assertTrue(passaros[1].foi_lancado())
def teste_intervalo_de_colisao_padrao(self):
fase = Fase()
passaro = PassaroFake(1, 1)
fase.adicionar_passaro(passaro)
porco = PorcoFake(2, 2)
fase.adicionar_porco(porco)
fase.calcular_pontos(0)
self.assertTrue(passaro.colidir_executado)
self.assertTrue(porco.colidir_executado)
self.assertTrue(passaro.calcular_posicao_executado)
self.assertTrue(passaro.colidir_com_chao_executado)
self.assertEqual(1, passaro.intervalo_colisao)
self.assertEqual(1, porco.intervalo_colisao)
def teste_intervalo_de_colisao_nao_padrao(self):
fase = Fase(30)
passaro = PassaroFake(1, 1)
fase.adicionar_passaro(passaro)
porco = PorcoFake(31, 31)
fase.adicionar_porco(porco)
fase.calcular_pontos(0)
self.assertEqual(30, passaro.intervalo_colisao)
self.assertEqual(30, porco.intervalo_colisao)
| true | true |
f7f55f48d058423058b5a93edf2e41ae0f0f7055 | 610 | py | Python | md2pdf/renderer.py | billweasley/md2pdf | 161bba679f947758e6436e47ce078b4d0e1943d9 | [
"BSD-3-Clause"
] | 7 | 2015-01-20T14:17:55.000Z | 2018-07-10T13:46:24.000Z | md2pdf/renderer.py | billweasley/md2pdf | 161bba679f947758e6436e47ce078b4d0e1943d9 | [
"BSD-3-Clause"
] | 3 | 2015-01-20T11:49:45.000Z | 2017-12-18T07:41:49.000Z | md2pdf/renderer.py | billweasley/md2pdf | 161bba679f947758e6436e47ce078b4d0e1943d9 | [
"BSD-3-Clause"
] | 5 | 2015-01-20T10:09:03.000Z | 2019-01-17T18:41:00.000Z | # coding=utf8
"""
md2pdf.renderer
~~~~~~~~~~~~~~~
Usage::
>>> from md2pdf.renderer import renderer
>>> renderer(**kwargs)
"""
from jinja2 import Environment, FileSystemLoader
from . import template
from .utils import path_to
class Renderer(object):
def __init__(self, pdf_template, templates_folder):
self.env = Environment(loader=FileSystemLoader(templates_folder))
self.pdf_template = pdf_template
def render(self, **kwargs):
return self.env.get_template(self.pdf_template).render(**kwargs)
renderer = Renderer(template, path_to('res'))
| 20.333333 | 73 | 0.672131 |
from jinja2 import Environment, FileSystemLoader
from . import template
from .utils import path_to
class Renderer(object):
def __init__(self, pdf_template, templates_folder):
self.env = Environment(loader=FileSystemLoader(templates_folder))
self.pdf_template = pdf_template
def render(self, **kwargs):
return self.env.get_template(self.pdf_template).render(**kwargs)
renderer = Renderer(template, path_to('res'))
| true | true |
f7f55f6ca19ee957793245d136155fa6385a468e | 1,170 | py | Python | python/sb_cardToCamera.py | simonbjork/sb-nuke-tools | b8ec375199700e77bff7cc50b489a3ac755a6b6a | [
"MIT"
] | null | null | null | python/sb_cardToCamera.py | simonbjork/sb-nuke-tools | b8ec375199700e77bff7cc50b489a3ac755a6b6a | [
"MIT"
] | null | null | null | python/sb_cardToCamera.py | simonbjork/sb-nuke-tools | b8ec375199700e77bff7cc50b489a3ac755a6b6a | [
"MIT"
] | null | null | null | ################
"""
sb_cardToCamera
Simon Bjork
March 2014
bjork.simon@gmail.com
To install the script:
- Add the script to your Nuke pluginPath.
- Add the following to your menu.py:
import sb_cardToCamera
sb_tools = nuke.toolbar("Nodes").addMenu( "sb_Tools", icon = "sb_tools.png" )
sb_tools.addCommand("Python/sb CardToCamera", 'sb_cardToCamera.sb_cardToCamera()', '')
"""
################
import nuke
################
def sb_cardToCamera():
cameras = []
for i in nuke.selectedNodes():
if i.Class() == "Camera" or i.Class() == "Camera2":
cameras.append(i)
if(len(cameras) == 0):
nuke.message("Select a camera node.")
return
for i in cameras:
card = nuke.createNode("Card2", inpanel = False)
card["selected"].setValue(False)
card["xpos"].setValue(i["xpos"].value())
card["ypos"].setValue(i["ypos"].value() + 100)
# Set values.
card["translate"].fromScript(i["translate"].toScript())
card["rotate"].fromScript(i["rotate"].toScript())
card["lens_in_focal"].fromScript(i["focal"].toScript())
card["lens_in_haperture"].fromScript(i["haperture"].toScript())
card["z"].setValue(1000) | 23.877551 | 90 | 0.632479 | .setValue(i["xpos"].value())
card["ypos"].setValue(i["ypos"].value() + 100)
card["translate"].fromScript(i["translate"].toScript())
card["rotate"].fromScript(i["rotate"].toScript())
card["lens_in_focal"].fromScript(i["focal"].toScript())
card["lens_in_haperture"].fromScript(i["haperture"].toScript())
card["z"].setValue(1000) | true | true |
f7f55f95dfc881a7f41f33855f1c5508367323a0 | 14,253 | py | Python | angr/simos/linux.py | zeroSteiner/angr | 90dc141c424a2c4dba5551baf0beac7ce7bc8837 | [
"BSD-2-Clause"
] | 2 | 2018-12-03T23:14:56.000Z | 2018-12-03T23:15:57.000Z | angr/simos/linux.py | zeroSteiner/angr | 90dc141c424a2c4dba5551baf0beac7ce7bc8837 | [
"BSD-2-Clause"
] | null | null | null | angr/simos/linux.py | zeroSteiner/angr | 90dc141c424a2c4dba5551baf0beac7ce7bc8837 | [
"BSD-2-Clause"
] | null | null | null | import os
import logging
import claripy
from cle import MetaELF
from cle.address_translator import AT
from archinfo import ArchX86, ArchAMD64, ArchARM, ArchAArch64, ArchMIPS32, ArchMIPS64, ArchPPC32, ArchPPC64
from ..tablespecs import StringTableSpec
from ..procedures import SIM_PROCEDURES as P, SIM_LIBRARIES as L
from ..state_plugins import SimFilesystem, SimHostFilesystem
from ..storage.file import SimFile, SimFileBase
from ..errors import AngrSyscallError
from .userland import SimUserland
_l = logging.getLogger('angr.simos.linux')
class SimLinux(SimUserland):
"""
OS-specific configuration for \\*nix-y OSes.
"""
def __init__(self, project, **kwargs):
super(SimLinux, self).__init__(project,
syscall_library=L['linux'],
syscall_addr_alignment=project.arch.instruction_alignment,
name="Linux",
**kwargs)
self._loader_addr = None
self._loader_lock_addr = None
self._loader_unlock_addr = None
self._error_catch_tsd_addr = None
self._vsyscall_addr = None
def configure_project(self): # pylint: disable=arguments-differ
self._loader_addr = self.project.loader.extern_object.allocate()
self._loader_lock_addr = self.project.loader.extern_object.allocate()
self._loader_unlock_addr = self.project.loader.extern_object.allocate()
self._error_catch_tsd_addr = self.project.loader.extern_object.allocate()
self._vsyscall_addr = self.project.loader.extern_object.allocate()
self.project.hook(self._loader_addr, P['linux_loader']['LinuxLoader']())
self.project.hook(self._loader_lock_addr, P['linux_loader']['_dl_rtld_lock_recursive']())
self.project.hook(self._loader_unlock_addr, P['linux_loader']['_dl_rtld_unlock_recursive']())
self.project.hook(self._error_catch_tsd_addr,
P['linux_loader']['_dl_initial_error_catch_tsd'](
static_addr=self.project.loader.extern_object.allocate()
)
)
self.project.hook(self._vsyscall_addr, P['linux_kernel']['_vsyscall']())
ld_obj = self.project.loader.linux_loader_object
if ld_obj is not None:
# there are some functions we MUST use the simprocedures for, regardless of what the user wants
self._weak_hook_symbol('__tls_get_addr', L['ld.so'].get('__tls_get_addr', self.arch), ld_obj)
self._weak_hook_symbol('___tls_get_addr', L['ld.so'].get('___tls_get_addr', self.arch), ld_obj)
# set up some static data in the loader object...
_rtld_global = ld_obj.get_symbol('_rtld_global')
if _rtld_global is not None:
if isinstance(self.project.arch, ArchAMD64):
self.project.loader.memory.pack_word(_rtld_global.rebased_addr + 0xF08, self._loader_lock_addr)
self.project.loader.memory.pack_word(_rtld_global.rebased_addr + 0xF10, self._loader_unlock_addr)
self.project.loader.memory.pack_word(_rtld_global.rebased_addr + 0x990, self._error_catch_tsd_addr)
# TODO: what the hell is this
_rtld_global_ro = ld_obj.get_symbol('_rtld_global_ro')
if _rtld_global_ro is not None:
pass
libc_obj = self.project.loader.find_object('libc.so.6')
if libc_obj:
self._weak_hook_symbol('_dl_vdso_vsym', L['libc.so.6'].get('_dl_vdso_vsym', self.arch), libc_obj)
tls_obj = self.project.loader.tls_object
if tls_obj is not None:
if isinstance(self.project.arch, ArchAMD64):
self.project.loader.memory.pack_word(tls_obj.thread_pointer + 0x28, 0x5f43414e4152595f)
self.project.loader.memory.pack_word(tls_obj.thread_pointer + 0x30, 0x5054524755415244)
elif isinstance(self.project.arch, ArchX86):
self.project.loader.memory.pack_word(tls_obj.thread_pointer + 0x10, self._vsyscall_addr)
elif isinstance(self.project.arch, ArchARM):
self.project.hook(0xffff0fe0, P['linux_kernel']['_kernel_user_helper_get_tls']())
# Only set up ifunc resolution if we are using the ELF backend on AMD64
if isinstance(self.project.loader.main_object, MetaELF):
if isinstance(self.project.arch, (ArchAMD64, ArchX86)):
for binary in self.project.loader.all_objects:
if not isinstance(binary, MetaELF):
continue
for reloc in binary.relocs:
if reloc.symbol is None or reloc.resolvedby is None:
continue
try:
if reloc.resolvedby.elftype != 'STT_GNU_IFUNC':
continue
except AttributeError:
continue
gotaddr = reloc.rebased_addr
gotvalue = self.project.loader.memory.unpack_word(gotaddr)
if self.project.is_hooked(gotvalue):
continue
# Replace it with a ifunc-resolve simprocedure!
kwargs = {
'funcaddr': gotvalue,
'gotaddr': gotaddr,
'funcname': reloc.symbol.name
}
# TODO: should this be replaced with hook_symbol?
randaddr = self.project.loader.extern_object.allocate()
self.project.hook(randaddr, P['linux_loader']['IFuncResolver'](**kwargs))
self.project.loader.memory.pack_word(gotaddr, randaddr)
# maybe move this into archinfo?
if self.arch.name == 'X86':
syscall_abis = ['i386']
elif self.arch.name == 'AMD64':
syscall_abis = ['i386', 'amd64']
elif self.arch.name.startswith('ARM'):
syscall_abis = ['arm']
if self.arch.name == 'ARMHF':
syscall_abis.append('armhf')
elif self.arch.name == 'AARCH64':
syscall_abis = ['aarch64']
# https://www.linux-mips.org/wiki/WhatsWrongWithO32N32N64
elif self.arch.name == 'MIPS32':
syscall_abis = ['mips-o32']
elif self.arch.name == 'MIPS64':
syscall_abis = ['mips-n32', 'mips-n64']
elif self.arch.name == 'PPC32':
syscall_abis = ['ppc']
elif self.arch.name == 'PPC64':
syscall_abis = ['ppc64']
else:
syscall_abis = [] # ?
super(SimLinux, self).configure_project(syscall_abis)
def syscall_abi(self, state):
if state.arch.name != 'AMD64':
return None
if state.history.jumpkind == 'Ijk_Sys_int128':
return 'i386'
elif state.history.jumpkind == 'Ijk_Sys_syscall':
return 'amd64'
else:
raise AngrSyscallError("Unknown syscall jumpkind %s" % state.history.jumpkind)
# pylint: disable=arguments-differ
def state_blank(self, fs=None, concrete_fs=False, chroot=None,
cwd=b'/home/user', pathsep=b'/', **kwargs):
state = super(SimLinux, self).state_blank(**kwargs)
if self.project.loader.tls_object is not None:
if isinstance(state.arch, ArchAMD64):
state.regs.fs = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchX86):
state.regs.gs = self.project.loader.tls_object.user_thread_pointer >> 16
elif isinstance(state.arch, (ArchMIPS32, ArchMIPS64)):
state.regs.ulr = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchPPC32):
state.regs.r2 = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchPPC64):
state.regs.r13 = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchAArch64):
state.regs.tpidr_el0 = self.project.loader.tls_object.user_thread_pointer
if fs is None: fs = {}
for name in fs:
if type(fs[name]) is str:
fs[name] = fs[name].encode('utf-8')
if type(fs[name]) is bytes:
fs[name] = claripy.BVV(fs[name])
if isinstance(fs[name], claripy.Bits):
fs[name] = SimFile(name, content=fs[name])
if not isinstance(fs[name], SimFileBase):
raise TypeError("Provided fs initializer with unusable type %r" % type(fs[name]))
mounts = {}
if concrete_fs:
mounts[pathsep] = SimHostFilesystem(chroot if chroot is not None else os.path.sep)
state.register_plugin('fs', SimFilesystem(files=fs, pathsep=pathsep, cwd=cwd, mountpoints=mounts))
if self.project.loader.main_object.is_ppc64_abiv1:
state.libc.ppc64_abiv = 'ppc64_1'
return state
def state_entry(self, args=None, env=None, argc=None, **kwargs):
state = super(SimLinux, self).state_entry(**kwargs)
# Handle default values
filename = self.project.filename or 'dummy_filename'
if args is None:
args = [filename]
if env is None:
env = {}
# Prepare argc
if argc is None:
argc = claripy.BVV(len(args), state.arch.bits)
elif type(argc) is int: # pylint: disable=unidiomatic-typecheck
argc = claripy.BVV(argc, state.arch.bits)
# Make string table for args/env/auxv
table = StringTableSpec()
# Add args to string table
table.append_args(args)
# Add environment to string table
table.append_env(env)
# Prepare the auxiliary vector and add it to the end of the string table
# TODO: Actually construct a real auxiliary vector
# current vector is an AT_RANDOM entry where the "random" value is 0xaec0aec0aec0...
aux = [(25, b"\xAE\xC0" * 8)]
for a, b in aux:
table.add_pointer(a)
if isinstance(b, bytes):
table.add_string(b)
else:
table.add_pointer(b)
table.add_null()
table.add_null()
# Dump the table onto the stack, calculate pointers to args, env, and auxv
state.memory.store(state.regs.sp - 16, claripy.BVV(0, 8 * 16))
argv = table.dump(state, state.regs.sp - 16)
envp = argv + ((len(args) + 1) * state.arch.bytes)
auxv = argv + ((len(args) + len(env) + 2) * state.arch.bytes)
# Put argc on stack and fix the stack pointer
newsp = argv - state.arch.bytes
state.memory.store(newsp, argc, endness=state.arch.memory_endness)
state.regs.sp = newsp
if state.arch.name in ('PPC32',):
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
# store argc argv envp auxv in the posix plugin
state.posix.argv = argv
state.posix.argc = argc
state.posix.environ = envp
state.posix.auxv = auxv
self.set_entry_register_values(state)
return state
def set_entry_register_values(self, state):
for reg, val in state.arch.entry_register_values.items():
if isinstance(val, int):
state.registers.store(reg, val, size=state.arch.bytes)
elif isinstance(val, (str,)):
if val == 'argc':
state.registers.store(reg, state.posix.argc, size=state.arch.bytes)
elif val == 'argv':
state.registers.store(reg, state.posix.argv)
elif val == 'envp':
state.registers.store(reg, state.posix.environ)
elif val == 'auxv':
state.registers.store(reg, state.posix.auxv)
elif val == 'ld_destructor':
# a pointer to the dynamic linker's destructor routine, to be called at exit
# or NULL. We like NULL. It makes things easier.
state.registers.store(reg, 0)
elif val == 'toc':
if self.project.loader.main_object.is_ppc64_abiv1:
state.registers.store(reg, self.project.loader.main_object.ppc64_initial_rtoc)
elif val == 'thread_pointer':
state.registers.store(reg, self.project.loader.tls_object.user_thread_pointer)
else:
_l.warning('Unknown entry point register value indicator "%s"', val)
else:
_l.error('What the ass kind of default value is %s?', val)
def state_full_init(self, **kwargs):
kwargs['addr'] = self._loader_addr
return super(SimLinux, self).state_full_init(**kwargs)
def prepare_function_symbol(self, symbol_name, basic_addr=None):
"""
Prepare the address space with the data necessary to perform relocations pointing to the given symbol.
Returns a 2-tuple. The first item is the address of the function code, the second is the address of the
relocation target.
"""
if self.project.loader.main_object.is_ppc64_abiv1:
if basic_addr is not None:
pointer = self.project.loader.memory.unpack_word(basic_addr)
return pointer, basic_addr
pseudo_hookaddr = self.project.loader.extern_object.get_pseudo_addr(symbol_name)
pseudo_toc = self.project.loader.extern_object.allocate(size=0x18)
self.project.loader.extern_object.memory.pack_word(
AT.from_mva(pseudo_toc, self.project.loader.extern_object).to_rva(), pseudo_hookaddr)
return pseudo_hookaddr, pseudo_toc
else:
if basic_addr is None:
basic_addr = self.project.loader.extern_object.get_pseudo_addr(symbol_name)
return basic_addr, basic_addr
| 45.977419 | 119 | 0.604434 | import os
import logging
import claripy
from cle import MetaELF
from cle.address_translator import AT
from archinfo import ArchX86, ArchAMD64, ArchARM, ArchAArch64, ArchMIPS32, ArchMIPS64, ArchPPC32, ArchPPC64
from ..tablespecs import StringTableSpec
from ..procedures import SIM_PROCEDURES as P, SIM_LIBRARIES as L
from ..state_plugins import SimFilesystem, SimHostFilesystem
from ..storage.file import SimFile, SimFileBase
from ..errors import AngrSyscallError
from .userland import SimUserland
_l = logging.getLogger('angr.simos.linux')
class SimLinux(SimUserland):
def __init__(self, project, **kwargs):
super(SimLinux, self).__init__(project,
syscall_library=L['linux'],
syscall_addr_alignment=project.arch.instruction_alignment,
name="Linux",
**kwargs)
self._loader_addr = None
self._loader_lock_addr = None
self._loader_unlock_addr = None
self._error_catch_tsd_addr = None
self._vsyscall_addr = None
def configure_project(self):
self._loader_addr = self.project.loader.extern_object.allocate()
self._loader_lock_addr = self.project.loader.extern_object.allocate()
self._loader_unlock_addr = self.project.loader.extern_object.allocate()
self._error_catch_tsd_addr = self.project.loader.extern_object.allocate()
self._vsyscall_addr = self.project.loader.extern_object.allocate()
self.project.hook(self._loader_addr, P['linux_loader']['LinuxLoader']())
self.project.hook(self._loader_lock_addr, P['linux_loader']['_dl_rtld_lock_recursive']())
self.project.hook(self._loader_unlock_addr, P['linux_loader']['_dl_rtld_unlock_recursive']())
self.project.hook(self._error_catch_tsd_addr,
P['linux_loader']['_dl_initial_error_catch_tsd'](
static_addr=self.project.loader.extern_object.allocate()
)
)
self.project.hook(self._vsyscall_addr, P['linux_kernel']['_vsyscall']())
ld_obj = self.project.loader.linux_loader_object
if ld_obj is not None:
self._weak_hook_symbol('__tls_get_addr', L['ld.so'].get('__tls_get_addr', self.arch), ld_obj)
self._weak_hook_symbol('___tls_get_addr', L['ld.so'].get('___tls_get_addr', self.arch), ld_obj)
_rtld_global = ld_obj.get_symbol('_rtld_global')
if _rtld_global is not None:
if isinstance(self.project.arch, ArchAMD64):
self.project.loader.memory.pack_word(_rtld_global.rebased_addr + 0xF08, self._loader_lock_addr)
self.project.loader.memory.pack_word(_rtld_global.rebased_addr + 0xF10, self._loader_unlock_addr)
self.project.loader.memory.pack_word(_rtld_global.rebased_addr + 0x990, self._error_catch_tsd_addr)
_rtld_global_ro = ld_obj.get_symbol('_rtld_global_ro')
if _rtld_global_ro is not None:
pass
libc_obj = self.project.loader.find_object('libc.so.6')
if libc_obj:
self._weak_hook_symbol('_dl_vdso_vsym', L['libc.so.6'].get('_dl_vdso_vsym', self.arch), libc_obj)
tls_obj = self.project.loader.tls_object
if tls_obj is not None:
if isinstance(self.project.arch, ArchAMD64):
self.project.loader.memory.pack_word(tls_obj.thread_pointer + 0x28, 0x5f43414e4152595f)
self.project.loader.memory.pack_word(tls_obj.thread_pointer + 0x30, 0x5054524755415244)
elif isinstance(self.project.arch, ArchX86):
self.project.loader.memory.pack_word(tls_obj.thread_pointer + 0x10, self._vsyscall_addr)
elif isinstance(self.project.arch, ArchARM):
self.project.hook(0xffff0fe0, P['linux_kernel']['_kernel_user_helper_get_tls']())
if isinstance(self.project.loader.main_object, MetaELF):
if isinstance(self.project.arch, (ArchAMD64, ArchX86)):
for binary in self.project.loader.all_objects:
if not isinstance(binary, MetaELF):
continue
for reloc in binary.relocs:
if reloc.symbol is None or reloc.resolvedby is None:
continue
try:
if reloc.resolvedby.elftype != 'STT_GNU_IFUNC':
continue
except AttributeError:
continue
gotaddr = reloc.rebased_addr
gotvalue = self.project.loader.memory.unpack_word(gotaddr)
if self.project.is_hooked(gotvalue):
continue
kwargs = {
'funcaddr': gotvalue,
'gotaddr': gotaddr,
'funcname': reloc.symbol.name
}
randaddr = self.project.loader.extern_object.allocate()
self.project.hook(randaddr, P['linux_loader']['IFuncResolver'](**kwargs))
self.project.loader.memory.pack_word(gotaddr, randaddr)
if self.arch.name == 'X86':
syscall_abis = ['i386']
elif self.arch.name == 'AMD64':
syscall_abis = ['i386', 'amd64']
elif self.arch.name.startswith('ARM'):
syscall_abis = ['arm']
if self.arch.name == 'ARMHF':
syscall_abis.append('armhf')
elif self.arch.name == 'AARCH64':
syscall_abis = ['aarch64']
elif self.arch.name == 'MIPS32':
syscall_abis = ['mips-o32']
elif self.arch.name == 'MIPS64':
syscall_abis = ['mips-n32', 'mips-n64']
elif self.arch.name == 'PPC32':
syscall_abis = ['ppc']
elif self.arch.name == 'PPC64':
syscall_abis = ['ppc64']
else:
syscall_abis = []
super(SimLinux, self).configure_project(syscall_abis)
def syscall_abi(self, state):
if state.arch.name != 'AMD64':
return None
if state.history.jumpkind == 'Ijk_Sys_int128':
return 'i386'
elif state.history.jumpkind == 'Ijk_Sys_syscall':
return 'amd64'
else:
raise AngrSyscallError("Unknown syscall jumpkind %s" % state.history.jumpkind)
def state_blank(self, fs=None, concrete_fs=False, chroot=None,
cwd=b'/home/user', pathsep=b'/', **kwargs):
state = super(SimLinux, self).state_blank(**kwargs)
if self.project.loader.tls_object is not None:
if isinstance(state.arch, ArchAMD64):
state.regs.fs = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchX86):
state.regs.gs = self.project.loader.tls_object.user_thread_pointer >> 16
elif isinstance(state.arch, (ArchMIPS32, ArchMIPS64)):
state.regs.ulr = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchPPC32):
state.regs.r2 = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchPPC64):
state.regs.r13 = self.project.loader.tls_object.user_thread_pointer
elif isinstance(state.arch, ArchAArch64):
state.regs.tpidr_el0 = self.project.loader.tls_object.user_thread_pointer
if fs is None: fs = {}
for name in fs:
if type(fs[name]) is str:
fs[name] = fs[name].encode('utf-8')
if type(fs[name]) is bytes:
fs[name] = claripy.BVV(fs[name])
if isinstance(fs[name], claripy.Bits):
fs[name] = SimFile(name, content=fs[name])
if not isinstance(fs[name], SimFileBase):
raise TypeError("Provided fs initializer with unusable type %r" % type(fs[name]))
mounts = {}
if concrete_fs:
mounts[pathsep] = SimHostFilesystem(chroot if chroot is not None else os.path.sep)
state.register_plugin('fs', SimFilesystem(files=fs, pathsep=pathsep, cwd=cwd, mountpoints=mounts))
if self.project.loader.main_object.is_ppc64_abiv1:
state.libc.ppc64_abiv = 'ppc64_1'
return state
def state_entry(self, args=None, env=None, argc=None, **kwargs):
state = super(SimLinux, self).state_entry(**kwargs)
filename = self.project.filename or 'dummy_filename'
if args is None:
args = [filename]
if env is None:
env = {}
if argc is None:
argc = claripy.BVV(len(args), state.arch.bits)
elif type(argc) is int:
argc = claripy.BVV(argc, state.arch.bits)
table = StringTableSpec()
table.append_args(args)
table.append_env(env)
aux = [(25, b"\xAE\xC0" * 8)]
for a, b in aux:
table.add_pointer(a)
if isinstance(b, bytes):
table.add_string(b)
else:
table.add_pointer(b)
table.add_null()
table.add_null()
state.memory.store(state.regs.sp - 16, claripy.BVV(0, 8 * 16))
argv = table.dump(state, state.regs.sp - 16)
envp = argv + ((len(args) + 1) * state.arch.bytes)
auxv = argv + ((len(args) + len(env) + 2) * state.arch.bytes)
newsp = argv - state.arch.bytes
state.memory.store(newsp, argc, endness=state.arch.memory_endness)
state.regs.sp = newsp
if state.arch.name in ('PPC32',):
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
state.stack_push(claripy.BVV(0, 32))
state.posix.argv = argv
state.posix.argc = argc
state.posix.environ = envp
state.posix.auxv = auxv
self.set_entry_register_values(state)
return state
def set_entry_register_values(self, state):
for reg, val in state.arch.entry_register_values.items():
if isinstance(val, int):
state.registers.store(reg, val, size=state.arch.bytes)
elif isinstance(val, (str,)):
if val == 'argc':
state.registers.store(reg, state.posix.argc, size=state.arch.bytes)
elif val == 'argv':
state.registers.store(reg, state.posix.argv)
elif val == 'envp':
state.registers.store(reg, state.posix.environ)
elif val == 'auxv':
state.registers.store(reg, state.posix.auxv)
elif val == 'ld_destructor':
# or NULL. We like NULL. It makes things easier.
state.registers.store(reg, 0)
elif val == 'toc':
if self.project.loader.main_object.is_ppc64_abiv1:
state.registers.store(reg, self.project.loader.main_object.ppc64_initial_rtoc)
elif val == 'thread_pointer':
state.registers.store(reg, self.project.loader.tls_object.user_thread_pointer)
else:
_l.warning('Unknown entry point register value indicator "%s"', val)
else:
_l.error('What the ass kind of default value is %s?', val)
def state_full_init(self, **kwargs):
kwargs['addr'] = self._loader_addr
return super(SimLinux, self).state_full_init(**kwargs)
def prepare_function_symbol(self, symbol_name, basic_addr=None):
if self.project.loader.main_object.is_ppc64_abiv1:
if basic_addr is not None:
pointer = self.project.loader.memory.unpack_word(basic_addr)
return pointer, basic_addr
pseudo_hookaddr = self.project.loader.extern_object.get_pseudo_addr(symbol_name)
pseudo_toc = self.project.loader.extern_object.allocate(size=0x18)
self.project.loader.extern_object.memory.pack_word(
AT.from_mva(pseudo_toc, self.project.loader.extern_object).to_rva(), pseudo_hookaddr)
return pseudo_hookaddr, pseudo_toc
else:
if basic_addr is None:
basic_addr = self.project.loader.extern_object.get_pseudo_addr(symbol_name)
return basic_addr, basic_addr
| true | true |
f7f55fb14b1e7b6bcf5f26d2c969912641e7deeb | 289 | py | Python | wagtailcommerce/stores/middleware.py | theplusagency/wagtail-commerce | 6047170f29199ccaf2778534976ab0970c2877e7 | [
"BSD-3-Clause"
] | 3 | 2019-04-12T15:38:43.000Z | 2019-09-22T10:23:20.000Z | wagtailcommerce/stores/middleware.py | wagtailcommerce/wagtailcommerce | 308ed8348483806c16062d09a7e69ec44d9a2e73 | [
"BSD-3-Clause"
] | null | null | null | wagtailcommerce/stores/middleware.py | wagtailcommerce/wagtailcommerce | 308ed8348483806c16062d09a7e69ec44d9a2e73 | [
"BSD-3-Clause"
] | null | null | null | from .utils import get_store
class StoreMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
request.store = get_store(request)
response = self.get_response(request)
return response
| 22.230769 | 45 | 0.692042 | from .utils import get_store
class StoreMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
request.store = get_store(request)
response = self.get_response(request)
return response
| true | true |
f7f560d95fea1e072301e9cf6f2d400ad15c09a6 | 5,859 | py | Python | src/cleanShpNet.py | ashish-code/gps-denied-geospatial-positioning | 5006b963e0b8fe50b0cabd5e3a9deb6aeb2416f2 | [
"MIT"
] | 2 | 2021-09-07T14:15:50.000Z | 2021-12-27T06:04:54.000Z | src/cleanShpNet.py | ashish-code/gps-denied-geospatial-positioning | 5006b963e0b8fe50b0cabd5e3a9deb6aeb2416f2 | [
"MIT"
] | null | null | null | src/cleanShpNet.py | ashish-code/gps-denied-geospatial-positioning | 5006b963e0b8fe50b0cabd5e3a9deb6aeb2416f2 | [
"MIT"
] | null | null | null | '''
Created on Aug 31, 2015
@author: ash
'''
"""
cleanNetShp -- Tools to clean spatial Network Shapefiles.
"""
import pysal
import numpy
__author__ = "Charles R. Schmidt <schmidtc@gmail.com>"
__all__ = ['snap_verts', 'find_nodes', 'split_at_nodes']
# urlShpFile = "/home/ash/Data/tl_2014_39049_roads/tl_2014_39049_roads.shp"
# outShpFile = "/home/ash/Data/fixedRoads/tl_2014_39049_roads.shp"
# urlShpFile = "/home/ash/Data/tl_2014_11001_roads/tl_2014_11001_roads.shp"
# outShpFile = "/home/ash/Data/fixedRoads/tl_2014_11001_roads.shp"
urlShpFile = "/home/ash/Data/haiti_all_roads/Haiti_all_roads.shp"
outShpFile = "/home/ash/Data/fixedRoads/haiti_all_roads.shp"
def snap_verts(shp,tolerance=0.001,arc=True):
"""
snap_verts -- Snap verts that are within tolerance meters of each other.
Description -- Snapping should be performed with a very small tolerance.
The goal is not to change the network, but to ensure rounding
errors don't prevent edges from being split at proper intersections.
The default of 1mm should be adequate if the input is of decent quality.
Higher snapping values can be used to correct digitizing errors, but care
should be taken.
Arguments
---------
tolerance -- float -- snapping tolerance in meters
arc -- bool -- If true, Ard Distance will be used instead of Euclidean
Returns
-------
generator -- each element is a new pysal.cg.Chain with corrected vertices.
"""
kmtol = tolerance/1000.
data = numpy.concatenate([rec.vertices for rec in shp])
if arc:
kd = pysal.cg.KDTree(data,distance_metric="Arc",radius = pysal.cg.sphere.RADIUS_EARTH_KM)
else:
kd = pysal.cg.KDTree(data)
q = kd.query_ball_tree(kd,kmtol)
### Next three lines assert that snappings are mutual... if 1 snaps to 8, 8 must snap to 1.
for r,a in enumerate(q):
for o in a:
assert a==q[o]
### non-mutual snapping can happen.
### consider the three points, A (-1,0), B (0,0), C (1,0) and a snapping tolerance of 1.
### A-> B
### B-> A,C
### C-> B
### For now, try lowering adjusting the tolerance to avoid this.
data2 = numpy.empty_like(data)
for i,r in enumerate(q):
data2[i] = data[r].mean(0)
pos=0
for rec in shp:
vrts = rec.vertices
n = len(vrts)
nrec = pysal.cg.Chain(map(tuple,data2[pos:pos+n]))
pos+=n
yield nrec
def find_nodes(shp):
"""
find_nodes -- Finds vertices in a line type shapefile that appear more than once and/or are end points of a line
Arguments
---------
shp -- Shapefile Object -- Should be of type Line.
Returns
-------
set
"""
node_count = {}
for road in shp:
vrts = road.vertices
for node in vrts:
if node not in node_count:
node_count[node] = 0
node_count[node] += 1
node_count[vrts[0]] += 1
node_count[vrts[-1]] += 1
return set([node for node,c in node_count.iteritems() if c > 1])
def split_at_nodes(shp):
"""
split_at_nodes -- Split line features at nodes
Arguments
---------
shp -- list or shapefile -- Chain features to be split at common nodes.
Returns
-------
generator -- yields pysal.cg.Chain objects
"""
nodes = find_nodes(shp)
nodeIds = list(nodes)
nodeIds.sort()
nodeIds = dict([(node,i) for i,node in enumerate(nodeIds)])
for road in shp:
vrts = road.vertices
midVrts = set(road.vertices[1:-1]) #we know end points are nodes
midNodes = midVrts.intersection(nodes) # find any nodes in the middle of the feature.
midIdx = [vrts.index(node) for node in midNodes] # Get their indices
midIdx.sort()
if midIdx:
#print vrts
starts = [0]+midIdx
stops = [x+1 for x in midIdx]+[None]
for start,stop in zip(starts,stops):
feat = pysal.cg.Chain(vrts[start:stop])
rec = (nodeIds[feat.vertices[0]],nodeIds[feat.vertices[-1]],False)
yield feat,rec
else:
rec = (nodeIds[road.vertices[0]],nodeIds[road.vertices[-1]],False)
yield road,rec
def readShpFile(_urlShpFile):
roadGraph = nx.read_shp(_urlShpFile)
roadGraph = roadGraph.to_undirected()
print "graph has been read"
nodeList = roadGraph.nodes(data=True)
nNode = len(nodeList)
pos = []
for i in xrange(nNode):
pos.append(nodeList[i][0])
pass
shpLayout = dict(zip(roadGraph,pos))
print "number of nodes: " + str(nx.number_of_nodes(roadGraph))
print "number of edges: " + str(nx.number_of_edges(roadGraph))
# nx.draw_networkx(roadGraph, pos=shpLayout, with_labels=False, node_size = 1, edge_color="r")
nx.draw_networkx_edges(roadGraph, pos=shpLayout, edgelist=None, width=1, edge_color='r')
plt.show()
nx.draw_networkx_nodes(roadGraph, pos=shpLayout, nodelist=None, node_size=1, node_color='b', node_shape='d')
plt.show()
def createSpatialNetworkShapefile(inshp,outshp):
assert inshp.lower().endswith('.shp')
assert outshp.lower().endswith('.shp')
shp = pysal.open(inshp,'r')
snapped = list(snap_verts(shp,.001))
o = pysal.open(outshp,'w')
odb = pysal.open(outshp[:-4]+'.dbf','w')
odb.header = ["FNODE","TNODE","ONEWAY"]
odb.field_spec = [('N',20,0),('N',20,0),('L',1,0)]
new = list(split_at_nodes(snapped))
for feat,rec in new:
o.write(feat)
odb.write(rec)
o.close()
odb.close()
print "Split %d roads in %d network edges"%(len(shp),len(new))
readShpFile(outShpFile)
if __name__=='__main__':
createSpatialNetworkShapefile(urlShpFile,outShpFile) | 32.370166 | 116 | 0.624509 | '''
Created on Aug 31, 2015
@author: ash
'''
"""
cleanNetShp -- Tools to clean spatial Network Shapefiles.
"""
import pysal
import numpy
__author__ = "Charles R. Schmidt <schmidtc@gmail.com>"
__all__ = ['snap_verts', 'find_nodes', 'split_at_nodes']
urlShpFile = "/home/ash/Data/haiti_all_roads/Haiti_all_roads.shp"
outShpFile = "/home/ash/Data/fixedRoads/haiti_all_roads.shp"
def snap_verts(shp,tolerance=0.001,arc=True):
"""
snap_verts -- Snap verts that are within tolerance meters of each other.
Description -- Snapping should be performed with a very small tolerance.
The goal is not to change the network, but to ensure rounding
errors don't prevent edges from being split at proper intersections.
The default of 1mm should be adequate if the input is of decent quality.
Higher snapping values can be used to correct digitizing errors, but care
should be taken.
Arguments
---------
tolerance -- float -- snapping tolerance in meters
arc -- bool -- If true, Ard Distance will be used instead of Euclidean
Returns
-------
generator -- each element is a new pysal.cg.Chain with corrected vertices.
"""
kmtol = tolerance/1000.
data = numpy.concatenate([rec.vertices for rec in shp])
if arc:
kd = pysal.cg.KDTree(data,distance_metric="Arc",radius = pysal.cg.sphere.RADIUS_EARTH_KM)
else:
kd = pysal.cg.KDTree(data)
q = kd.query_ball_tree(kd,kmtol)
### Next three lines assert that snappings are mutual... if 1 snaps to 8, 8 must snap to 1.
for r,a in enumerate(q):
for o in a:
assert a==q[o]
### non-mutual snapping can happen.
### consider the three points, A (-1,0), B (0,0), C (1,0) and a snapping tolerance of 1.
### A-> B
### B-> A,C
### C-> B
### For now, try lowering adjusting the tolerance to avoid this.
data2 = numpy.empty_like(data)
for i,r in enumerate(q):
data2[i] = data[r].mean(0)
pos=0
for rec in shp:
vrts = rec.vertices
n = len(vrts)
nrec = pysal.cg.Chain(map(tuple,data2[pos:pos+n]))
pos+=n
yield nrec
def find_nodes(shp):
"""
find_nodes -- Finds vertices in a line type shapefile that appear more than once and/or are end points of a line
Arguments
---------
shp -- Shapefile Object -- Should be of type Line.
Returns
-------
set
"""
node_count = {}
for road in shp:
vrts = road.vertices
for node in vrts:
if node not in node_count:
node_count[node] = 0
node_count[node] += 1
node_count[vrts[0]] += 1
node_count[vrts[-1]] += 1
return set([node for node,c in node_count.iteritems() if c > 1])
def split_at_nodes(shp):
"""
split_at_nodes -- Split line features at nodes
Arguments
---------
shp -- list or shapefile -- Chain features to be split at common nodes.
Returns
-------
generator -- yields pysal.cg.Chain objects
"""
nodes = find_nodes(shp)
nodeIds = list(nodes)
nodeIds.sort()
nodeIds = dict([(node,i) for i,node in enumerate(nodeIds)])
for road in shp:
vrts = road.vertices
midVrts = set(road.vertices[1:-1]) #we know end points are nodes
midNodes = midVrts.intersection(nodes) # find any nodes in the middle of the feature.
midIdx = [vrts.index(node) for node in midNodes] # Get their indices
midIdx.sort()
if midIdx:
#print vrts
starts = [0]+midIdx
stops = [x+1 for x in midIdx]+[None]
for start,stop in zip(starts,stops):
feat = pysal.cg.Chain(vrts[start:stop])
rec = (nodeIds[feat.vertices[0]],nodeIds[feat.vertices[-1]],False)
yield feat,rec
else:
rec = (nodeIds[road.vertices[0]],nodeIds[road.vertices[-1]],False)
yield road,rec
def readShpFile(_urlShpFile):
roadGraph = nx.read_shp(_urlShpFile)
roadGraph = roadGraph.to_undirected()
print "graph has been read"
nodeList = roadGraph.nodes(data=True)
nNode = len(nodeList)
pos = []
for i in xrange(nNode):
pos.append(nodeList[i][0])
pass
shpLayout = dict(zip(roadGraph,pos))
print "number of nodes: " + str(nx.number_of_nodes(roadGraph))
print "number of edges: " + str(nx.number_of_edges(roadGraph))
# nx.draw_networkx(roadGraph, pos=shpLayout, with_labels=False, node_size = 1, edge_color="r")
nx.draw_networkx_edges(roadGraph, pos=shpLayout, edgelist=None, width=1, edge_color='r')
plt.show()
nx.draw_networkx_nodes(roadGraph, pos=shpLayout, nodelist=None, node_size=1, node_color='b', node_shape='d')
plt.show()
def createSpatialNetworkShapefile(inshp,outshp):
assert inshp.lower().endswith('.shp')
assert outshp.lower().endswith('.shp')
shp = pysal.open(inshp,'r')
snapped = list(snap_verts(shp,.001))
o = pysal.open(outshp,'w')
odb = pysal.open(outshp[:-4]+'.dbf','w')
odb.header = ["FNODE","TNODE","ONEWAY"]
odb.field_spec = [('N',20,0),('N',20,0),('L',1,0)]
new = list(split_at_nodes(snapped))
for feat,rec in new:
o.write(feat)
odb.write(rec)
o.close()
odb.close()
print "Split %d roads in %d network edges"%(len(shp),len(new))
readShpFile(outShpFile)
if __name__=='__main__':
createSpatialNetworkShapefile(urlShpFile,outShpFile) | false | true |
f7f561d0dcdf8be2fa2cc5ae7fe2885f40fc763d | 1,248 | py | Python | triggerflow/libs/cloudevents/sdk/converters/base.py | Dahk/triggerflow-examples | c492942ab911615d144a1375f4bc7933831203bd | [
"Apache-2.0"
] | 38 | 2020-06-11T08:05:21.000Z | 2022-03-17T10:21:18.000Z | triggerflow/libs/cloudevents/sdk/converters/base.py | Dahk/triggerflow-examples | c492942ab911615d144a1375f4bc7933831203bd | [
"Apache-2.0"
] | 1 | 2020-07-07T15:47:56.000Z | 2020-07-07T15:47:56.000Z | triggerflow/libs/cloudevents/sdk/converters/base.py | Dahk/triggerflow-examples | c492942ab911615d144a1375f4bc7933831203bd | [
"Apache-2.0"
] | 7 | 2020-05-18T16:32:06.000Z | 2021-11-30T17:11:12.000Z | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import typing
from ..event import base
class Converter(object):
TYPE = None
def read(
self,
event,
headers: dict,
body: typing.IO,
data_unmarshaller: typing.Callable
) -> base.BaseEvent:
raise Exception("not implemented")
def event_supported(self, event: object) -> bool:
raise Exception("not implemented")
def can_read(self, content_type: str) -> bool:
raise Exception("not implemented")
def write(
self,
event: base.BaseEvent,
data_marshaller: typing.Callable
) -> (dict, object):
raise Exception("not implemented")
| 27.733333 | 78 | 0.66266 |
import typing
from ..event import base
class Converter(object):
TYPE = None
def read(
self,
event,
headers: dict,
body: typing.IO,
data_unmarshaller: typing.Callable
) -> base.BaseEvent:
raise Exception("not implemented")
def event_supported(self, event: object) -> bool:
raise Exception("not implemented")
def can_read(self, content_type: str) -> bool:
raise Exception("not implemented")
def write(
self,
event: base.BaseEvent,
data_marshaller: typing.Callable
) -> (dict, object):
raise Exception("not implemented")
| true | true |
f7f563231b83c189e52431f80f627683fd215e19 | 4,443 | py | Python | sdk/python/pulumi_azure_native/dbforpostgresql/v20171201preview/get_firewall_rule.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/dbforpostgresql/v20171201preview/get_firewall_rule.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/dbforpostgresql/v20171201preview/get_firewall_rule.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetFirewallRuleResult',
'AwaitableGetFirewallRuleResult',
'get_firewall_rule',
]
@pulumi.output_type
class GetFirewallRuleResult:
"""
Represents a server firewall rule.
"""
def __init__(__self__, end_ip_address=None, id=None, name=None, start_ip_address=None, type=None):
if end_ip_address and not isinstance(end_ip_address, str):
raise TypeError("Expected argument 'end_ip_address' to be a str")
pulumi.set(__self__, "end_ip_address", end_ip_address)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if start_ip_address and not isinstance(start_ip_address, str):
raise TypeError("Expected argument 'start_ip_address' to be a str")
pulumi.set(__self__, "start_ip_address", start_ip_address)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="endIpAddress")
def end_ip_address(self) -> str:
"""
The end IP address of the server firewall rule. Must be IPv4 format.
"""
return pulumi.get(self, "end_ip_address")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="startIpAddress")
def start_ip_address(self) -> str:
"""
The start IP address of the server firewall rule. Must be IPv4 format.
"""
return pulumi.get(self, "start_ip_address")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetFirewallRuleResult(GetFirewallRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFirewallRuleResult(
end_ip_address=self.end_ip_address,
id=self.id,
name=self.name,
start_ip_address=self.start_ip_address,
type=self.type)
def get_firewall_rule(firewall_rule_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFirewallRuleResult:
"""
Represents a server firewall rule.
:param str firewall_rule_name: The name of the server firewall rule.
:param str resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param str server_name: The name of the server.
"""
__args__ = dict()
__args__['firewallRuleName'] = firewall_rule_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:dbforpostgresql/v20171201preview:getFirewallRule', __args__, opts=opts, typ=GetFirewallRuleResult).value
return AwaitableGetFirewallRuleResult(
end_ip_address=__ret__.end_ip_address,
id=__ret__.id,
name=__ret__.name,
start_ip_address=__ret__.start_ip_address,
type=__ret__.type)
| 36.719008 | 193 | 0.66284 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetFirewallRuleResult',
'AwaitableGetFirewallRuleResult',
'get_firewall_rule',
]
@pulumi.output_type
class GetFirewallRuleResult:
def __init__(__self__, end_ip_address=None, id=None, name=None, start_ip_address=None, type=None):
if end_ip_address and not isinstance(end_ip_address, str):
raise TypeError("Expected argument 'end_ip_address' to be a str")
pulumi.set(__self__, "end_ip_address", end_ip_address)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if start_ip_address and not isinstance(start_ip_address, str):
raise TypeError("Expected argument 'start_ip_address' to be a str")
pulumi.set(__self__, "start_ip_address", start_ip_address)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="endIpAddress")
def end_ip_address(self) -> str:
return pulumi.get(self, "end_ip_address")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="startIpAddress")
def start_ip_address(self) -> str:
return pulumi.get(self, "start_ip_address")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetFirewallRuleResult(GetFirewallRuleResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetFirewallRuleResult(
end_ip_address=self.end_ip_address,
id=self.id,
name=self.name,
start_ip_address=self.start_ip_address,
type=self.type)
def get_firewall_rule(firewall_rule_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
server_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetFirewallRuleResult:
__args__ = dict()
__args__['firewallRuleName'] = firewall_rule_name
__args__['resourceGroupName'] = resource_group_name
__args__['serverName'] = server_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:dbforpostgresql/v20171201preview:getFirewallRule', __args__, opts=opts, typ=GetFirewallRuleResult).value
return AwaitableGetFirewallRuleResult(
end_ip_address=__ret__.end_ip_address,
id=__ret__.id,
name=__ret__.name,
start_ip_address=__ret__.start_ip_address,
type=__ret__.type)
| true | true |
f7f563c54c87bf8b7c7ccebdcb8840be30d1a427 | 135,605 | py | Python | lib/galaxy/model/mapping.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | 1 | 2021-02-05T13:19:58.000Z | 2021-02-05T13:19:58.000Z | lib/galaxy/model/mapping.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/model/mapping.py | globusgenomics/galaxy | 7caf74d9700057587b3e3434c64e82c5b16540f1 | [
"CC-BY-3.0"
] | null | null | null | """
Details of how the data model objects are mapped onto the relational database
are encapsulated here.
"""
import logging
from sqlalchemy import (
and_,
asc,
Boolean,
Column,
DateTime,
desc,
false,
ForeignKey,
func,
Integer,
MetaData,
not_,
Numeric,
select,
String, Table,
TEXT,
Text,
true,
Unicode,
UniqueConstraint,
VARCHAR
)
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy.orm import backref, class_mapper, column_property, deferred, mapper, object_session, relation
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.sql import exists
from sqlalchemy.types import BigInteger
from galaxy import model
from galaxy.model.base import ModelMapping
from galaxy.model.custom_types import JSONType, MetadataType, TrimmedString, UUIDType
from galaxy.model.orm.engine_factory import build_engine
from galaxy.model.orm.now import now
from galaxy.model.security import GalaxyRBACAgent
log = logging.getLogger(__name__)
metadata = MetaData()
model.WorkerProcess.table = Table(
'worker_process',
metadata,
Column("id", Integer, primary_key=True),
Column("server_name", String(255), index=True),
Column("hostname", String(255)),
Column("update_time", DateTime, default=now, onupdate=now),
UniqueConstraint('server_name', 'hostname'),
)
model.User.table = Table(
"galaxy_user", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("email", TrimmedString(255), index=True, nullable=False),
Column("username", TrimmedString(255), index=True, unique=True),
Column("password", TrimmedString(255), nullable=False),
Column("last_password_change", DateTime, default=now),
Column("external", Boolean, default=False),
Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False),
Column("disk_usage", Numeric(15, 0), index=True),
Column("active", Boolean, index=True, default=True, nullable=False),
Column("activation_token", TrimmedString(64), nullable=True, index=True))
model.UserAddress.table = Table(
"user_address", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("desc", TrimmedString(255)),
Column("name", TrimmedString(255), nullable=False),
Column("institution", TrimmedString(255)),
Column("address", TrimmedString(255), nullable=False),
Column("city", TrimmedString(255), nullable=False),
Column("state", TrimmedString(255), nullable=False),
Column("postal_code", TrimmedString(255), nullable=False),
Column("country", TrimmedString(255), nullable=False),
Column("phone", TrimmedString(255)),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False))
model.PSAAssociation.table = Table(
"psa_association", metadata,
Column('id', Integer, primary_key=True),
Column('server_url', VARCHAR(255)),
Column('handle', VARCHAR(255)),
Column('secret', VARCHAR(255)),
Column('issued', Integer),
Column('lifetime', Integer),
Column('assoc_type', VARCHAR(64)))
model.PSACode.table = Table(
"psa_code", metadata,
Column('id', Integer, primary_key=True),
Column('email', VARCHAR(200)),
Column('code', VARCHAR(32)))
model.PSANonce.table = Table(
"psa_nonce", metadata,
Column('id', Integer, primary_key=True),
Column('server_url', VARCHAR(255)),
Column('timestamp', Integer),
Column('salt', VARCHAR(40)))
model.PSAPartial.table = Table(
"psa_partial", metadata,
Column('id', Integer, primary_key=True),
Column('token', VARCHAR(32)),
Column('data', TEXT),
Column('next_step', Integer),
Column('backend', VARCHAR(32)))
model.UserAuthnzToken.table = Table(
"oidc_user_authnz_tokens", metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey("galaxy_user.id"), index=True),
Column('uid', VARCHAR(255)),
Column('provider', VARCHAR(32)),
Column('extra_data', JSONType, nullable=True),
Column('lifetime', Integer),
Column('assoc_type', VARCHAR(64)))
model.CustosAuthnzToken.table = Table(
"custos_authnz_token", metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey("galaxy_user.id")),
Column('external_user_id', String(64)),
Column('provider', String(255)),
Column('access_token', Text),
Column('id_token', Text),
Column('refresh_token', Text),
Column("expiration_time", DateTime),
Column("refresh_expiration_time", DateTime),
UniqueConstraint("user_id", "external_user_id", "provider"),
UniqueConstraint("external_user_id", "provider"),
)
model.CloudAuthz.table = Table(
"cloudauthz", metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey("galaxy_user.id"), index=True),
Column('provider', String(255)),
Column('config', JSONType),
Column('authn_id', Integer, ForeignKey("oidc_user_authnz_tokens.id"), index=True),
Column('tokens', JSONType),
Column('last_update', DateTime),
Column('last_activity', DateTime),
Column('description', TEXT),
Column('create_time', DateTime, default=now))
model.PasswordResetToken.table = Table(
"password_reset_token", metadata,
Column("token", String(32), primary_key=True, unique=True, index=True),
Column("expiration_time", DateTime),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.DynamicTool.table = Table(
"dynamic_tool", metadata,
Column("id", Integer, primary_key=True),
Column("uuid", UUIDType()),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("tool_id", Unicode(255)),
Column("tool_version", Unicode(255)),
Column("tool_format", Unicode(255)),
Column("tool_path", Unicode(255)),
Column("tool_directory", Unicode(255)),
Column("hidden", Boolean, default=True),
Column("active", Boolean, default=True),
Column("value", JSONType()),
)
model.History.table = Table(
"history", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("name", TrimmedString(255)),
Column("hid_counter", Integer, default=1),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False),
Column("importing", Boolean, index=True, default=False),
Column("genome_build", TrimmedString(40)),
Column("importable", Boolean, default=False),
Column("slug", TEXT, index=True),
Column("published", Boolean, index=True, default=False))
model.HistoryUserShareAssociation.table = Table(
"history_user_share_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.HistoryDatasetAssociation.table = Table(
"history_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", TrimmedString(64), index=True, key="_state"),
Column("copied_from_history_dataset_association_id", Integer,
ForeignKey("history_dataset_association.id"), nullable=True),
Column("copied_from_library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id"), nullable=True),
Column("name", TrimmedString(255)),
Column("info", TrimmedString(255)),
Column("blurb", TrimmedString(255)),
Column("peek", TEXT),
Column("tool_version", TEXT),
Column("extension", TrimmedString(64)),
Column("metadata", MetadataType(), key="_metadata"),
Column("parent_id", Integer, ForeignKey("history_dataset_association.id"), nullable=True),
Column("designation", TrimmedString(255)),
Column("deleted", Boolean, index=True, default=False),
Column("visible", Boolean),
Column("extended_metadata_id", Integer, ForeignKey("extended_metadata.id"), index=True),
Column("version", Integer, default=1, nullable=True, index=True),
Column("hid", Integer),
Column("purged", Boolean, index=True, default=False),
Column("hidden_beneath_collection_instance_id",
ForeignKey("history_dataset_collection_association.id"), nullable=True))
model.HistoryDatasetAssociationHistory.table = Table(
"history_dataset_association_history", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("update_time", DateTime, default=now),
Column("version", Integer),
Column("name", TrimmedString(255)),
Column("extension", TrimmedString(64)),
Column("metadata", MetadataType(), key="_metadata"),
Column("extended_metadata_id", Integer, ForeignKey("extended_metadata.id"), index=True),
)
model.Dataset.table = Table(
"dataset", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("state", TrimmedString(64), index=True),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False),
Column("purgable", Boolean, default=True),
Column("object_store_id", TrimmedString(255), index=True),
Column("external_filename", TEXT),
Column("_extra_files_path", TEXT),
Column('file_size', Numeric(15, 0)),
Column('total_size', Numeric(15, 0)),
Column('uuid', UUIDType()))
model.DatasetSource.table = Table(
"dataset_source", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("source_uri", TEXT),
Column("extra_files_path", TEXT),
Column("transform", JSONType)
)
model.DatasetHash.table = Table(
"dataset_hash", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("hash_function", TEXT),
Column("hash_value", TEXT),
Column("extra_files_path", TEXT),
)
model.DatasetSourceHash.table = Table(
"dataset_source_hash", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_source_id", Integer, ForeignKey("dataset_source.id"), index=True),
Column("hash_function", TEXT),
Column("hash_value", TEXT)
)
# hda read access permission given by a user to a specific site (gen. for external display applications)
model.HistoryDatasetAssociationDisplayAtAuthorization.table = Table(
"history_dataset_association_display_at_authorization", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("site", TrimmedString(255)))
model.HistoryDatasetAssociationSubset.table = Table(
"history_dataset_association_subset", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("history_dataset_association_subset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("location", Unicode(255), index=True))
model.ImplicitlyConvertedDatasetAssociation.table = Table(
"implicitly_converted_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True, nullable=True),
Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True),
Column("hda_parent_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("ldda_parent_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True),
Column("deleted", Boolean, index=True, default=False),
Column("metadata_safe", Boolean, index=True, default=True),
Column("type", TrimmedString(255)))
model.ValidationError.table = Table(
"validation_error", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("message", TrimmedString(255)),
Column("err_type", TrimmedString(64)),
Column("attributes", TEXT))
model.Group.table = Table(
"galaxy_group", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", String(255), index=True, unique=True),
Column("deleted", Boolean, index=True, default=False))
model.UserGroupAssociation.table = Table(
"user_group_association", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("group_id", Integer, ForeignKey("galaxy_group.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.UserRoleAssociation.table = Table(
"user_role_association", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.GroupRoleAssociation.table = Table(
"group_role_association", metadata,
Column("id", Integer, primary_key=True),
Column("group_id", Integer, ForeignKey("galaxy_group.id"), index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.Role.table = Table(
"role", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", String(255), index=True, unique=True),
Column("description", TEXT),
Column("type", String(40), index=True),
Column("deleted", Boolean, index=True, default=False))
model.UserQuotaAssociation.table = Table(
"user_quota_association", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("quota_id", Integer, ForeignKey("quota.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.GroupQuotaAssociation.table = Table(
"group_quota_association", metadata,
Column("id", Integer, primary_key=True),
Column("group_id", Integer, ForeignKey("galaxy_group.id"), index=True),
Column("quota_id", Integer, ForeignKey("quota.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.Quota.table = Table(
"quota", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", String(255), index=True, unique=True),
Column("description", TEXT),
Column("bytes", BigInteger),
Column("operation", String(8)),
Column("deleted", Boolean, index=True, default=False))
model.DefaultQuotaAssociation.table = Table(
"default_quota_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("type", String(32), index=True, unique=True),
Column("quota_id", Integer, ForeignKey("quota.id"), index=True))
model.DatasetPermissions.table = Table(
"dataset_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("action", TEXT),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.LibraryPermissions.table = Table(
"library_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("action", TEXT),
Column("library_id", Integer, ForeignKey("library.id"), nullable=True, index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.LibraryFolderPermissions.table = Table(
"library_folder_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("action", TEXT),
Column("library_folder_id", Integer, ForeignKey("library_folder.id"), nullable=True, index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.LibraryDatasetPermissions.table = Table(
"library_dataset_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("action", TEXT),
Column("library_dataset_id", Integer, ForeignKey("library_dataset.id"), nullable=True, index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.LibraryDatasetDatasetAssociationPermissions.table = Table(
"library_dataset_dataset_association_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("action", TEXT),
Column("library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id"),
nullable=True, index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.DefaultUserPermissions.table = Table(
"default_user_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("action", TEXT),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.DefaultHistoryPermissions.table = Table(
"default_history_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("action", TEXT),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.LibraryDataset.table = Table(
"library_dataset", metadata,
Column("id", Integer, primary_key=True),
# current version of dataset, if null, there is not a current version selected
Column("library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id", use_alter=True, name="library_dataset_dataset_association_id_fk"),
nullable=True, index=True),
Column("folder_id", Integer, ForeignKey("library_folder.id"), index=True),
# not currently being used, but for possible future use
Column("order_id", Integer),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
# when not None/null this will supercede display in library (but not when imported into user's history?)
Column("name", TrimmedString(255), key="_name", index=True),
# when not None/null this will supercede display in library (but not when imported into user's history?)
Column("info", TrimmedString(255), key="_info"),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False))
model.LibraryDatasetDatasetAssociation.table = Table(
"library_dataset_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_id", Integer, ForeignKey("library_dataset.id"), index=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", TrimmedString(64), index=True, key="_state"),
Column("copied_from_history_dataset_association_id", Integer,
ForeignKey("history_dataset_association.id", use_alter=True, name='history_dataset_association_dataset_id_fkey'),
nullable=True),
Column("copied_from_library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id", use_alter=True, name='library_dataset_dataset_association_id_fkey'),
nullable=True),
Column("name", TrimmedString(255), index=True),
Column("info", TrimmedString(255)),
Column("blurb", TrimmedString(255)),
Column("peek", TEXT),
Column("tool_version", TEXT),
Column("extension", TrimmedString(64)),
Column("metadata", MetadataType(), key="_metadata"),
Column("parent_id", Integer, ForeignKey("library_dataset_dataset_association.id"), nullable=True),
Column("designation", TrimmedString(255)),
Column("deleted", Boolean, index=True, default=False),
Column("visible", Boolean),
Column("extended_metadata_id", Integer, ForeignKey("extended_metadata.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("message", TrimmedString(255)))
model.ExtendedMetadata.table = Table(
"extended_metadata", metadata,
Column("id", Integer, primary_key=True),
Column("data", JSONType))
model.ExtendedMetadataIndex.table = Table(
"extended_metadata_index", metadata,
Column("id", Integer, primary_key=True),
Column("extended_metadata_id", Integer,
ForeignKey("extended_metadata.id", onupdate="CASCADE", ondelete="CASCADE"), index=True),
Column("path", String(255)),
Column("value", TEXT))
model.Library.table = Table(
"library", metadata,
Column("id", Integer, primary_key=True),
Column("root_folder_id", Integer, ForeignKey("library_folder.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", String(255), index=True),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False),
Column("description", TEXT),
Column("synopsis", TEXT))
model.LibraryFolder.table = Table(
"library_folder", metadata,
Column("id", Integer, primary_key=True),
Column("parent_id", Integer, ForeignKey("library_folder.id"), nullable=True, index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", TEXT, index=True),
Column("description", TEXT),
Column("order_id", Integer), # not currently being used, but for possible future use
Column("item_count", Integer),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False),
Column("genome_build", TrimmedString(40)))
model.LibraryInfoAssociation.table = Table(
"library_info_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_id", Integer, ForeignKey("library.id"), index=True),
Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True),
Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True),
Column("inheritable", Boolean, index=True, default=False),
Column("deleted", Boolean, index=True, default=False))
model.LibraryFolderInfoAssociation.table = Table(
"library_folder_info_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_folder_id", Integer, ForeignKey("library_folder.id"), nullable=True, index=True),
Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True),
Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True),
Column("inheritable", Boolean, index=True, default=False),
Column("deleted", Boolean, index=True, default=False))
model.LibraryDatasetDatasetInfoAssociation.table = Table(
"library_dataset_dataset_info_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id"), nullable=True, index=True),
Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True),
Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True),
Column("deleted", Boolean, index=True, default=False))
model.Job.table = Table(
"job", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("library_folder_id", Integer, ForeignKey("library_folder.id"), index=True),
Column("tool_id", String(255)),
Column("tool_version", TEXT, default="1.0.0"),
Column("dynamic_tool_id", Integer, ForeignKey("dynamic_tool.id"), index=True, nullable=True),
Column("state", String(64), index=True),
Column("info", TrimmedString(255)),
Column("copied_from_job_id", Integer, nullable=True),
Column("command_line", TEXT),
Column("dependencies", JSONType, nullable=True),
Column("job_messages", JSONType, nullable=True),
Column("param_filename", String(1024)),
Column("runner_name", String(255)),
Column("job_stdout", TEXT),
Column("job_stderr", TEXT),
Column("tool_stdout", TEXT),
Column("tool_stderr", TEXT),
Column("exit_code", Integer, nullable=True),
Column("traceback", TEXT),
Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True, nullable=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=True),
Column("job_runner_name", String(255)),
Column("job_runner_external_id", String(255)),
Column("destination_id", String(255), nullable=True),
Column("destination_params", JSONType, nullable=True),
Column("object_store_id", TrimmedString(255), index=True),
Column("imported", Boolean, default=False, index=True),
Column("params", TrimmedString(255), index=True),
Column("handler", TrimmedString(255), index=True))
model.JobStateHistory.table = Table(
"job_state_history", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("state", String(64), index=True),
Column("info", TrimmedString(255)))
model.JobParameter.table = Table(
"job_parameter", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("name", String(255)),
Column("value", TEXT))
model.JobToInputDatasetAssociation.table = Table(
"job_to_input_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("dataset_version", Integer),
Column("name", String(255)))
model.JobToOutputDatasetAssociation.table = Table(
"job_to_output_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("name", String(255)))
model.JobToInputDatasetCollectionAssociation.table = Table(
"job_to_input_dataset_collection", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True),
Column("name", Unicode(255)))
model.JobToImplicitOutputDatasetCollectionAssociation.table = Table(
"job_to_implicit_output_dataset_collection", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_collection_id", Integer, ForeignKey("dataset_collection.id"), index=True),
Column("name", Unicode(255)))
model.JobToOutputDatasetCollectionAssociation.table = Table(
"job_to_output_dataset_collection", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True),
Column("name", Unicode(255)))
model.JobToInputLibraryDatasetAssociation.table = Table(
"job_to_input_library_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True),
Column("name", String(255)))
model.JobToOutputLibraryDatasetAssociation.table = Table(
"job_to_output_library_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True),
Column("name", String(255)))
model.ImplicitlyCreatedDatasetCollectionInput.table = Table(
"implicitly_created_dataset_collection_inputs", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_collection_id", Integer,
ForeignKey("history_dataset_collection_association.id"), index=True),
Column("input_dataset_collection_id", Integer,
ForeignKey("history_dataset_collection_association.id"), index=True),
Column("name", Unicode(255)))
model.ImplicitCollectionJobs.table = Table(
"implicit_collection_jobs", metadata,
Column("id", Integer, primary_key=True),
Column("populated_state", TrimmedString(64), default='new', nullable=False),
)
model.ImplicitCollectionJobsJobAssociation.table = Table(
"implicit_collection_jobs_job_association", metadata,
Column("id", Integer, primary_key=True),
Column("implicit_collection_jobs_id", Integer, ForeignKey("implicit_collection_jobs.id"), index=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True), # Consider making this nullable...
Column("order_index", Integer, nullable=False),
)
model.JobExternalOutputMetadata.table = Table(
"job_external_output_metadata", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("history_dataset_association_id", Integer,
ForeignKey("history_dataset_association.id"), index=True, nullable=True),
Column("library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True),
Column("is_valid", Boolean, default=True),
Column("filename_in", String(255)),
Column("filename_out", String(255)),
Column("filename_results_code", String(255)),
Column("filename_kwds", String(255)),
Column("filename_override_metadata", String(255)),
Column("job_runner_external_pid", String(255)))
model.JobExportHistoryArchive.table = Table(
"job_export_history_archive", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("compressed", Boolean, index=True, default=False),
Column("history_attrs_filename", TEXT))
model.JobImportHistoryArchive.table = Table(
"job_import_history_archive", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("archive_dir", TEXT))
model.JobMetricText.table = Table(
"job_metric_text", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("plugin", Unicode(255)),
Column("metric_name", Unicode(255)),
Column("metric_value", Unicode(model.JOB_METRIC_MAX_LENGTH)))
model.TaskMetricText.table = Table(
"task_metric_text", metadata,
Column("id", Integer, primary_key=True),
Column("task_id", Integer, ForeignKey("task.id"), index=True),
Column("plugin", Unicode(255)),
Column("metric_name", Unicode(255)),
Column("metric_value", Unicode(model.JOB_METRIC_MAX_LENGTH)))
model.JobMetricNumeric.table = Table(
"job_metric_numeric", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("plugin", Unicode(255)),
Column("metric_name", Unicode(255)),
Column("metric_value", Numeric(model.JOB_METRIC_PRECISION, model.JOB_METRIC_SCALE)))
model.TaskMetricNumeric.table = Table(
"task_metric_numeric", metadata,
Column("id", Integer, primary_key=True),
Column("task_id", Integer, ForeignKey("task.id"), index=True),
Column("plugin", Unicode(255)),
Column("metric_name", Unicode(255)),
Column("metric_value", Numeric(model.JOB_METRIC_PRECISION, model.JOB_METRIC_SCALE)))
model.GenomeIndexToolData.table = Table(
"genome_index_tool_data", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("deferred_job_id", Integer, ForeignKey("deferred_job.id"), index=True),
Column("transfer_job_id", Integer, ForeignKey("transfer_job.id"), index=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("fasta_path", String(255)),
Column("created_time", DateTime, default=now),
Column("modified_time", DateTime, default=now, onupdate=now),
Column("indexer", String(64)),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.Task.table = Table(
"task", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("execution_time", DateTime),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", String(64), index=True),
Column("command_line", TEXT),
Column("param_filename", String(1024)),
Column("runner_name", String(255)),
Column("job_stdout", TEXT), # job_stdout makes sense here because it is short for job script standard out.
Column("job_stderr", TEXT),
Column("tool_stdout", TEXT),
Column("tool_stderr", TEXT),
Column("exit_code", Integer, nullable=True),
Column("job_messages", JSONType, nullable=True),
Column("info", TrimmedString(255)),
Column("traceback", TEXT),
Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=False),
Column("working_directory", String(1024)),
Column("task_runner_name", String(255)),
Column("task_runner_external_id", String(255)),
Column("prepare_input_files_cmd", TEXT))
model.PostJobAction.table = Table(
"post_job_action", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True, nullable=False),
Column("action_type", String(255), nullable=False),
Column("output_name", String(255), nullable=True),
Column("action_arguments", JSONType, nullable=True))
model.PostJobActionAssociation.table = Table(
"post_job_action_association", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=False),
Column("post_job_action_id", Integer, ForeignKey("post_job_action.id"), index=True, nullable=False))
model.DeferredJob.table = Table(
"deferred_job", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", String(64), index=True),
Column("plugin", String(128), index=True),
Column("params", JSONType))
model.TransferJob.table = Table(
"transfer_job", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", String(64), index=True),
Column("path", String(1024)),
Column("info", TEXT),
Column("pid", Integer),
Column("socket", Integer),
Column("params", JSONType))
model.DatasetCollection.table = Table(
"dataset_collection", metadata,
Column("id", Integer, primary_key=True),
Column("collection_type", Unicode(255), nullable=False),
Column("populated_state", TrimmedString(64), default='ok', nullable=False),
Column("populated_state_message", TEXT),
Column("element_count", Integer, nullable=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.HistoryDatasetCollectionAssociation.table = Table(
"history_dataset_collection_association", metadata,
Column("id", Integer, primary_key=True),
Column("collection_id", Integer, ForeignKey("dataset_collection.id"), index=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("name", TrimmedString(255)),
Column("hid", Integer),
Column("visible", Boolean),
Column("deleted", Boolean, default=False),
Column("copied_from_history_dataset_collection_association_id", Integer,
ForeignKey("history_dataset_collection_association.id"), nullable=True),
Column("implicit_output_name", Unicode(255), nullable=True),
Column("job_id", ForeignKey("job.id"), index=True, nullable=True),
Column("implicit_collection_jobs_id", ForeignKey("implicit_collection_jobs.id"), index=True, nullable=True),
)
model.LibraryDatasetCollectionAssociation.table = Table(
"library_dataset_collection_association", metadata,
Column("id", Integer, primary_key=True),
Column("collection_id", Integer, ForeignKey("dataset_collection.id"), index=True),
Column("folder_id", Integer, ForeignKey("library_folder.id"), index=True),
Column("name", TrimmedString(255)),
Column("deleted", Boolean, default=False))
model.DatasetCollectionElement.table = Table(
"dataset_collection_element", metadata,
Column("id", Integer, primary_key=True),
# Parent collection id describing what collection this element belongs to.
Column("dataset_collection_id", Integer, ForeignKey("dataset_collection.id"), index=True, nullable=False),
# Child defined by this association - HDA, LDDA, or another dataset association...
Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True, nullable=True),
Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True),
Column("child_collection_id", Integer, ForeignKey("dataset_collection.id"), index=True, nullable=True),
# Element index and identifier to define this parent-child relationship.
Column("element_index", Integer),
Column("element_identifier", Unicode(255), ))
model.Event.table = Table(
"event", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("history_id", Integer, ForeignKey("history.id"), index=True, nullable=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=True),
Column("message", TrimmedString(1024)),
Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True, nullable=True),
Column("tool_id", String(255)))
model.GalaxySession.table = Table(
"galaxy_session", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=True),
Column("remote_host", String(255)),
Column("remote_addr", String(255)),
Column("referer", TEXT),
Column("current_history_id", Integer, ForeignKey("history.id"), nullable=True),
# unique 128 bit random number coerced to a string
Column("session_key", TrimmedString(255), index=True, unique=True),
Column("is_valid", Boolean, default=False),
# saves a reference to the previous session so we have a way to chain them together
Column("prev_session_id", Integer),
Column("disk_usage", Numeric(15, 0), index=True),
Column("last_action", DateTime))
model.GalaxySessionToHistoryAssociation.table = Table(
"galaxy_session_to_history", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True))
model.StoredWorkflow.table = Table(
"stored_workflow", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False),
Column("latest_workflow_id", Integer,
ForeignKey("workflow.id", use_alter=True, name='stored_workflow_latest_workflow_id_fk'), index=True),
Column("name", TEXT),
Column("deleted", Boolean, default=False),
Column("importable", Boolean, default=False),
Column("slug", TEXT, index=True),
Column("from_path", TEXT, index=True),
Column("published", Boolean, index=True, default=False))
model.Workflow.table = Table(
"workflow", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
# workflows will belong to either a stored workflow or a parent/nesting workflow.
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True, nullable=True),
Column("parent_workflow_id", Integer, ForeignKey("workflow.id"), index=True, nullable=True),
Column("name", TEXT),
Column("has_cycles", Boolean),
Column("has_errors", Boolean),
Column("uuid", UUIDType, nullable=True))
model.WorkflowStep.table = Table(
"workflow_step", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("workflow_id", Integer, ForeignKey("workflow.id"), index=True, nullable=False),
Column("subworkflow_id", Integer, ForeignKey("workflow.id"), index=True, nullable=True),
Column("dynamic_tool_id", Integer, ForeignKey("dynamic_tool.id"), index=True, nullable=True),
Column("type", String(64)),
Column("tool_id", TEXT),
Column("tool_version", TEXT),
Column("tool_inputs", JSONType),
Column("tool_errors", JSONType),
Column("position", JSONType),
Column("config", JSONType),
Column("order_index", Integer),
Column("uuid", UUIDType),
# Column( "input_connections", JSONType ),
Column("label", Unicode(255)))
model.WorkflowStepInput.table = Table(
"workflow_step_input", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("name", TEXT),
Column("merge_type", TEXT),
Column("scatter_type", TEXT),
Column("value_from", JSONType),
Column("value_from_type", TEXT),
Column("default_value", JSONType),
Column("default_value_set", Boolean, default=False),
Column("runtime_value", Boolean, default=False),
UniqueConstraint("workflow_step_id", "name"),
)
model.WorkflowRequestStepState.table = Table(
"workflow_request_step_states", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer,
ForeignKey("workflow_invocation.id", onupdate="CASCADE", ondelete="CASCADE")),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")),
Column("value", JSONType))
model.WorkflowRequestInputParameter.table = Table(
"workflow_request_input_parameters", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer,
ForeignKey("workflow_invocation.id", onupdate="CASCADE", ondelete="CASCADE")),
Column("name", Unicode(255)),
Column("value", TEXT),
Column("type", Unicode(255)))
model.WorkflowRequestInputStepParameter.table = Table(
"workflow_request_input_step_parameter", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")),
Column("parameter_value", JSONType),
)
model.WorkflowRequestToInputDatasetAssociation.table = Table(
"workflow_request_to_input_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("name", String(255)),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True))
model.WorkflowRequestToInputDatasetCollectionAssociation.table = Table(
"workflow_request_to_input_collection_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("name", String(255)),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")),
Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True))
model.WorkflowStepConnection.table = Table(
"workflow_step_connection", metadata,
Column("id", Integer, primary_key=True),
Column("output_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("input_step_input_id", Integer, ForeignKey("workflow_step_input.id"), index=True),
Column("output_name", TEXT),
Column("input_subworkflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
)
model.WorkflowOutput.table = Table(
"workflow_output", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True, nullable=False),
Column("output_name", String(255), nullable=True),
Column("label", Unicode(255)),
Column("uuid", UUIDType),
)
model.WorkflowInvocation.table = Table(
"workflow_invocation", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("workflow_id", Integer, ForeignKey("workflow.id"), index=True, nullable=False),
Column("state", TrimmedString(64), index=True),
Column("scheduler", TrimmedString(255), index=True),
Column("handler", TrimmedString(255), index=True),
Column('uuid', UUIDType()),
Column("history_id", Integer, ForeignKey("history.id"), index=True))
model.WorkflowInvocationStep.table = Table(
"workflow_invocation_step", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True, nullable=False),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True, nullable=False),
Column("state", TrimmedString(64), index=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=True),
Column("implicit_collection_jobs_id", Integer, ForeignKey("implicit_collection_jobs.id"), index=True, nullable=True),
Column("action", JSONType, nullable=True))
model.WorkflowInvocationOutputDatasetAssociation.table = Table(
"workflow_invocation_output_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("workflow_output_id", Integer, ForeignKey("workflow_output.id"), index=True),
)
model.WorkflowInvocationOutputDatasetCollectionAssociation.table = Table(
"workflow_invocation_output_dataset_collection_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True),
Column("workflow_output_id", Integer, ForeignKey("workflow_output.id"), index=True),
)
model.WorkflowInvocationStepOutputDatasetAssociation.table = Table(
"workflow_invocation_step_output_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_step_id", Integer, ForeignKey("workflow_invocation_step.id"), index=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("output_name", String(255), nullable=True),
)
model.WorkflowInvocationStepOutputDatasetCollectionAssociation.table = Table(
"workflow_invocation_step_output_dataset_collection_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_step_id", Integer, ForeignKey("workflow_invocation_step.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True),
Column("output_name", String(255), nullable=True),
)
model.WorkflowInvocationToSubworkflowInvocationAssociation.table = Table(
"workflow_invocation_to_subworkflow_invocation_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("subworkflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")),
)
model.StoredWorkflowUserShareAssociation.table = Table(
"stored_workflow_user_share_connection", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.StoredWorkflowMenuEntry.table = Table(
"stored_workflow_menu_entry", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("order_index", Integer))
model.MetadataFile.table = Table(
"metadata_file", metadata,
Column("id", Integer, primary_key=True),
Column("name", TEXT),
Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True, nullable=True),
Column("lda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("object_store_id", TrimmedString(255), index=True),
Column("uuid", UUIDType(), index=True),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False))
model.FormDefinitionCurrent.table = Table(
"form_definition_current", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("latest_form_id", Integer, ForeignKey("form_definition.id"), index=True),
Column("deleted", Boolean, index=True, default=False))
model.FormDefinition.table = Table(
"form_definition", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", TrimmedString(255), nullable=False),
Column("desc", TEXT),
Column("form_definition_current_id", Integer,
ForeignKey("form_definition_current.id", name='for_def_form_def_current_id_fk', use_alter=True), index=True),
Column("fields", JSONType()),
Column("type", TrimmedString(255), index=True),
Column("layout", JSONType()))
model.FormValues.table = Table(
"form_values", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True),
Column("content", JSONType()))
model.Page.table = Table(
"page", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False),
Column("latest_revision_id", Integer,
ForeignKey("page_revision.id", use_alter=True, name='page_latest_revision_id_fk'), index=True),
Column("title", TEXT),
Column("deleted", Boolean, index=True, default=False),
Column("importable", Boolean, index=True, default=False),
Column("slug", TEXT, unique=True, index=True),
Column("published", Boolean, index=True, default=False))
model.PageRevision.table = Table(
"page_revision", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("page_id", Integer, ForeignKey("page.id"), index=True, nullable=False),
Column("title", TEXT),
Column("content", TEXT))
model.PageUserShareAssociation.table = Table(
"page_user_share_association", metadata,
Column("id", Integer, primary_key=True),
Column("page_id", Integer, ForeignKey("page.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.Visualization.table = Table(
"visualization", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False),
Column("latest_revision_id", Integer,
ForeignKey("visualization_revision.id", use_alter=True, name='visualization_latest_revision_id_fk'), index=True),
Column("title", TEXT),
Column("type", TEXT),
Column("dbkey", TEXT, index=True),
Column("deleted", Boolean, default=False, index=True),
Column("importable", Boolean, default=False, index=True),
Column("slug", TEXT, index=True),
Column("published", Boolean, default=False, index=True))
model.VisualizationRevision.table = Table(
"visualization_revision", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True, nullable=False),
Column("title", TEXT),
Column("dbkey", TEXT, index=True),
Column("config", JSONType))
model.VisualizationUserShareAssociation.table = Table(
"visualization_user_share_association", metadata,
Column("id", Integer, primary_key=True),
Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
# Data Manager tables
model.DataManagerHistoryAssociation.table = Table(
"data_manager_history_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.DataManagerJobAssociation.table = Table(
"data_manager_job_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("data_manager_id", TEXT, index=True))
# Tagging tables.
model.Tag.table = Table(
"tag", metadata,
Column("id", Integer, primary_key=True),
Column("type", Integer),
Column("parent_id", Integer, ForeignKey("tag.id")),
Column("name", TrimmedString(255)),
UniqueConstraint("name"))
model.HistoryTagAssociation.table = Table(
"history_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.DatasetTagAssociation.table = Table(
"dataset_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.HistoryDatasetAssociationTagAssociation.table = Table(
"history_dataset_association_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.LibraryDatasetDatasetAssociationTagAssociation.table = Table(
"library_dataset_dataset_association_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_dataset_association_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.StoredWorkflowTagAssociation.table = Table(
"stored_workflow_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", Unicode(255), index=True),
Column("value", Unicode(255), index=True),
Column("user_value", Unicode(255), index=True))
model.PageTagAssociation.table = Table(
"page_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("page_id", Integer, ForeignKey("page.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.WorkflowStepTagAssociation.table = Table(
"workflow_step_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", Unicode(255), index=True),
Column("value", Unicode(255), index=True),
Column("user_value", Unicode(255), index=True))
model.VisualizationTagAssociation.table = Table(
"visualization_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.HistoryDatasetCollectionTagAssociation.table = Table(
"history_dataset_collection_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_collection_id", Integer,
ForeignKey("history_dataset_collection_association.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.LibraryDatasetCollectionTagAssociation.table = Table(
"library_dataset_collection_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_collection_id", Integer,
ForeignKey("library_dataset_collection_association.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.ToolTagAssociation.table = Table(
"tool_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("tool_id", TrimmedString(255), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
# Annotation tables.
model.HistoryAnnotationAssociation.table = Table(
"history_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
model.HistoryDatasetAssociationAnnotationAssociation.table = Table(
"history_dataset_association_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer,
ForeignKey("history_dataset_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
model.StoredWorkflowAnnotationAssociation.table = Table(
"stored_workflow_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
model.WorkflowStepAnnotationAssociation.table = Table(
"workflow_step_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
model.PageAnnotationAssociation.table = Table(
"page_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("page_id", Integer, ForeignKey("page.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
model.VisualizationAnnotationAssociation.table = Table(
"visualization_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
model.HistoryDatasetCollectionAssociationAnnotationAssociation.table = Table(
"history_dataset_collection_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_collection_id", Integer,
ForeignKey("history_dataset_collection_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
model.LibraryDatasetCollectionAnnotationAssociation.table = Table(
"library_dataset_collection_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_collection_id", Integer,
ForeignKey("library_dataset_collection_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
# Ratings tables.
model.HistoryRatingAssociation.table = Table("history_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.HistoryDatasetAssociationRatingAssociation.table = Table(
"history_dataset_association_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer,
ForeignKey("history_dataset_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.StoredWorkflowRatingAssociation.table = Table(
"stored_workflow_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.PageRatingAssociation.table = Table(
"page_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("page_id", Integer, ForeignKey("page.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.VisualizationRatingAssociation.table = Table(
"visualization_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.HistoryDatasetCollectionRatingAssociation.table = Table(
"history_dataset_collection_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_collection_id", Integer,
ForeignKey("history_dataset_collection_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.LibraryDatasetCollectionRatingAssociation.table = Table(
"library_dataset_collection_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_collection_id", Integer,
ForeignKey("library_dataset_collection_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
# User tables.
model.UserPreference.table = Table(
"user_preference", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("name", Unicode(255), index=True),
Column("value", Text))
model.UserAction.table = Table(
"user_action", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True),
Column("action", Unicode(255)),
Column("context", Unicode(512)),
Column("params", Unicode(1024)))
model.APIKeys.table = Table(
"api_keys", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("key", TrimmedString(32), index=True, unique=True))
CleanupEvent_table = Table("cleanup_event", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("message", TrimmedString(1024)))
CleanupEventDatasetAssociation_table = Table("cleanup_event_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True))
CleanupEventMetadataFileAssociation_table = Table("cleanup_event_metadata_file_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("metadata_file_id", Integer, ForeignKey("metadata_file.id"), index=True))
CleanupEventHistoryAssociation_table = Table("cleanup_event_history_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True))
CleanupEventHistoryDatasetAssociationAssociation_table = Table("cleanup_event_hda_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True))
CleanupEventLibraryAssociation_table = Table("cleanup_event_library_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("library_id", Integer, ForeignKey("library.id"), index=True))
CleanupEventLibraryFolderAssociation_table = Table("cleanup_event_library_folder_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("library_folder_id", Integer, ForeignKey("library_folder.id"), index=True))
CleanupEventLibraryDatasetAssociation_table = Table("cleanup_event_library_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("library_dataset_id", Integer, ForeignKey("library_dataset.id"), index=True))
CleanupEventLibraryDatasetDatasetAssociationAssociation_table = Table("cleanup_event_ldda_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True))
CleanupEventImplicitlyConvertedDatasetAssociationAssociation_table = Table("cleanup_event_icda_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("icda_id", Integer, ForeignKey("implicitly_converted_dataset_association.id"), index=True))
# With the tables defined we can define the mappers and setup the
# relationships between the model objects.
def simple_mapping(model, **kwds):
mapper(model, model.table, properties=kwds)
simple_mapping(model.WorkerProcess)
mapper(model.FormValues, model.FormValues.table, properties=dict(
form_definition=relation(model.FormDefinition,
primaryjoin=(model.FormValues.table.c.form_definition_id == model.FormDefinition.table.c.id))
))
mapper(model.FormDefinition, model.FormDefinition.table, properties=dict(
current=relation(model.FormDefinitionCurrent,
primaryjoin=(model.FormDefinition.table.c.form_definition_current_id == model.FormDefinitionCurrent.table.c.id))
))
mapper(model.FormDefinitionCurrent, model.FormDefinitionCurrent.table, properties=dict(
forms=relation(model.FormDefinition,
backref='form_definition_current',
cascade="all, delete-orphan",
primaryjoin=(model.FormDefinitionCurrent.table.c.id == model.FormDefinition.table.c.form_definition_current_id)),
latest_form=relation(model.FormDefinition,
post_update=True,
primaryjoin=(model.FormDefinitionCurrent.table.c.latest_form_id == model.FormDefinition.table.c.id))
))
mapper(model.UserAddress, model.UserAddress.table, properties=dict(
user=relation(model.User,
primaryjoin=(model.UserAddress.table.c.user_id == model.User.table.c.id),
backref='addresses',
order_by=desc(model.UserAddress.table.c.update_time)),
))
mapper(model.PSAAssociation, model.PSAAssociation.table, properties=None)
mapper(model.PSACode, model.PSACode.table, properties=None)
mapper(model.PSANonce, model.PSANonce.table, properties=None)
mapper(model.PSAPartial, model.PSAPartial.table, properties=None)
mapper(model.UserAuthnzToken, model.UserAuthnzToken.table, properties=dict(
user=relation(model.User,
primaryjoin=(model.UserAuthnzToken.table.c.user_id == model.User.table.c.id),
backref='social_auth')
))
mapper(model.CustosAuthnzToken, model.CustosAuthnzToken.table, properties=dict(
user=relation(model.User,
primaryjoin=(model.CustosAuthnzToken.table.c.user_id == model.User.table.c.id),
backref='custos_auth')
))
mapper(model.CloudAuthz, model.CloudAuthz.table, properties=dict(
user=relation(model.User,
primaryjoin=(model.CloudAuthz.table.c.user_id == model.User.table.c.id),
backref='cloudauthz'),
authn=relation(model.UserAuthnzToken,
primaryjoin=(model.CloudAuthz.table.c.authn_id == model.UserAuthnzToken.table.c.id),
backref='cloudauthz')
))
mapper(model.ValidationError, model.ValidationError.table)
simple_mapping(model.DynamicTool)
simple_mapping(model.HistoryDatasetAssociation,
dataset=relation(model.Dataset,
primaryjoin=(model.Dataset.table.c.id == model.HistoryDatasetAssociation.table.c.dataset_id), lazy=False),
# .history defined in History mapper
copied_from_history_dataset_association=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociation.table.c.copied_from_history_dataset_association_id ==
model.HistoryDatasetAssociation.table.c.id),
remote_side=[model.HistoryDatasetAssociation.table.c.id],
uselist=False),
copied_to_history_dataset_associations=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociation.table.c.copied_from_history_dataset_association_id ==
model.HistoryDatasetAssociation.table.c.id)),
copied_from_library_dataset_dataset_association=relation(
model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id),
uselist=False),
copied_to_library_dataset_dataset_associations=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
implicitly_converted_datasets=relation(model.ImplicitlyConvertedDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.hda_parent_id ==
model.HistoryDatasetAssociation.table.c.id)),
tags=relation(model.HistoryDatasetAssociationTagAssociation,
order_by=model.HistoryDatasetAssociationTagAssociation.table.c.id,
backref='history_tag_associations'),
annotations=relation(model.HistoryDatasetAssociationAnnotationAssociation,
order_by=model.HistoryDatasetAssociationAnnotationAssociation.table.c.id,
backref="hdas"),
ratings=relation(model.HistoryDatasetAssociationRatingAssociation,
order_by=model.HistoryDatasetAssociationRatingAssociation.table.c.id,
backref="hdas"),
extended_metadata=relation(model.ExtendedMetadata,
primaryjoin=((model.HistoryDatasetAssociation.table.c.extended_metadata_id ==
model.ExtendedMetadata.table.c.id))),
hidden_beneath_collection_instance=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=((model.HistoryDatasetAssociation.table.c.hidden_beneath_collection_instance_id ==
model.HistoryDatasetCollectionAssociation.table.c.id)),
uselist=False,
backref="hidden_dataset_instances"),
_metadata=deferred(model.HistoryDatasetAssociation.table.c._metadata)
)
simple_mapping(model.Dataset,
history_associations=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.Dataset.table.c.id == model.HistoryDatasetAssociation.table.c.dataset_id)),
active_history_associations=relation(model.HistoryDatasetAssociation,
primaryjoin=(
(model.Dataset.table.c.id == model.HistoryDatasetAssociation.table.c.dataset_id) &
(model.HistoryDatasetAssociation.table.c.deleted == false()) &
(model.HistoryDatasetAssociation.table.c.purged == false()))),
purged_history_associations=relation(model.HistoryDatasetAssociation,
primaryjoin=(
(model.Dataset.table.c.id == model.HistoryDatasetAssociation.table.c.dataset_id) &
(model.HistoryDatasetAssociation.table.c.purged == true()))),
library_associations=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.Dataset.table.c.id == model.LibraryDatasetDatasetAssociation.table.c.dataset_id)),
active_library_associations=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(
(model.Dataset.table.c.id == model.LibraryDatasetDatasetAssociation.table.c.dataset_id) &
(model.LibraryDatasetDatasetAssociation.table.c.deleted == false()))),
tags=relation(model.DatasetTagAssociation,
order_by=model.DatasetTagAssociation.table.c.id,
backref='datasets')
)
mapper(model.DatasetHash, model.DatasetHash.table, properties=dict(
dataset=relation(model.Dataset, backref='hashes')
))
mapper(model.DatasetSource, model.DatasetSource.table, properties=dict(
dataset=relation(model.Dataset, backref='sources')
))
mapper(model.DatasetSourceHash, model.DatasetSourceHash.table, properties=dict(
source=relation(model.DatasetSource, backref='hashes')
))
mapper(model.HistoryDatasetAssociationHistory, model.HistoryDatasetAssociationHistory.table)
mapper(model.HistoryDatasetAssociationDisplayAtAuthorization, model.HistoryDatasetAssociationDisplayAtAuthorization.table, properties=dict(
history_dataset_association=relation(model.HistoryDatasetAssociation),
user=relation(model.User)
))
mapper(model.HistoryDatasetAssociationSubset, model.HistoryDatasetAssociationSubset.table, properties=dict(
hda=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociationSubset.table.c.history_dataset_association_id ==
model.HistoryDatasetAssociation.table.c.id)),
subset=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociationSubset.table.c.history_dataset_association_subset_id ==
model.HistoryDatasetAssociation.table.c.id))
))
mapper(model.ImplicitlyConvertedDatasetAssociation, model.ImplicitlyConvertedDatasetAssociation.table, properties=dict(
parent_hda=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.hda_parent_id ==
model.HistoryDatasetAssociation.table.c.id)),
parent_ldda=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.ldda_parent_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
dataset_ldda=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.ldda_id ==
model.LibraryDatasetDatasetAssociation.table.c.id),
backref="implicitly_converted_parent_datasets"),
dataset=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.hda_id ==
model.HistoryDatasetAssociation.table.c.id),
backref="implicitly_converted_parent_datasets")
))
mapper(model.History, model.History.table, properties=dict(
galaxy_sessions=relation(model.GalaxySessionToHistoryAssociation),
datasets=relation(model.HistoryDatasetAssociation,
backref="history",
order_by=asc(model.HistoryDatasetAssociation.table.c.hid)),
exports=relation(model.JobExportHistoryArchive,
primaryjoin=(model.JobExportHistoryArchive.table.c.history_id == model.History.table.c.id),
order_by=desc(model.JobExportHistoryArchive.table.c.id)),
active_datasets=relation(model.HistoryDatasetAssociation,
primaryjoin=(
(model.HistoryDatasetAssociation.table.c.history_id == model.History.table.c.id) &
not_(model.HistoryDatasetAssociation.table.c.deleted)
),
order_by=asc(model.HistoryDatasetAssociation.table.c.hid),
viewonly=True),
active_dataset_collections=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=(
(model.HistoryDatasetCollectionAssociation.table.c.history_id == model.History.table.c.id) &
not_(model.HistoryDatasetCollectionAssociation.table.c.deleted)
),
order_by=asc(model.HistoryDatasetCollectionAssociation.table.c.hid),
viewonly=True),
visible_datasets=relation(model.HistoryDatasetAssociation,
primaryjoin=(
(model.HistoryDatasetAssociation.table.c.history_id == model.History.table.c.id) &
not_(model.HistoryDatasetAssociation.table.c.deleted) &
model.HistoryDatasetAssociation.table.c.visible
),
order_by=asc(model.HistoryDatasetAssociation.table.c.hid),
viewonly=True),
visible_dataset_collections=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=(
(model.HistoryDatasetCollectionAssociation.table.c.history_id == model.History.table.c.id) &
not_(model.HistoryDatasetCollectionAssociation.table.c.deleted) &
model.HistoryDatasetCollectionAssociation.table.c.visible
),
order_by=asc(model.HistoryDatasetCollectionAssociation.table.c.hid),
viewonly=True),
tags=relation(model.HistoryTagAssociation,
order_by=model.HistoryTagAssociation.table.c.id,
backref="histories"),
annotations=relation(model.HistoryAnnotationAssociation,
order_by=model.HistoryAnnotationAssociation.table.c.id,
backref="histories"),
ratings=relation(model.HistoryRatingAssociation,
order_by=model.HistoryRatingAssociation.table.c.id,
backref="histories"),
average_rating=column_property(
select([func.avg(model.HistoryRatingAssociation.table.c.rating)]).where(model.HistoryRatingAssociation.table.c.history_id == model.History.table.c.id),
deferred=True
),
users_shared_with_count=column_property(
select([func.count(model.HistoryUserShareAssociation.table.c.id)]).where(model.History.table.c.id == model.HistoryUserShareAssociation.table.c.history_id),
deferred=True
)
))
# Set up proxy so that
# History.users_shared_with
# returns a list of users that history is shared with.
model.History.users_shared_with_dot_users = association_proxy('users_shared_with', 'user')
mapper(model.HistoryUserShareAssociation, model.HistoryUserShareAssociation.table, properties=dict(
user=relation(model.User, backref='histories_shared_by_others'),
history=relation(model.History, backref='users_shared_with')
))
mapper(model.User, model.User.table, properties=dict(
histories=relation(model.History,
backref="user",
order_by=desc(model.History.table.c.update_time)),
active_histories=relation(model.History,
primaryjoin=(
(model.History.table.c.user_id == model.User.table.c.id) &
(not_(model.History.table.c.deleted))
),
order_by=desc(model.History.table.c.update_time)),
galaxy_sessions=relation(model.GalaxySession,
order_by=desc(model.GalaxySession.table.c.update_time)),
stored_workflow_menu_entries=relation(model.StoredWorkflowMenuEntry,
primaryjoin=(
(model.StoredWorkflowMenuEntry.table.c.user_id == model.User.table.c.id) &
(model.StoredWorkflowMenuEntry.table.c.stored_workflow_id == model.StoredWorkflow.table.c.id) &
not_(model.StoredWorkflow.table.c.deleted)
),
backref="user",
cascade="all, delete-orphan",
collection_class=ordering_list('order_index')),
_preferences=relation(model.UserPreference,
backref="user",
collection_class=attribute_mapped_collection('name')),
# addresses=relation( UserAddress,
# primaryjoin=( User.table.c.id == UserAddress.table.c.user_id ) ),
values=relation(model.FormValues,
primaryjoin=(model.User.table.c.form_values_id == model.FormValues.table.c.id)),
api_keys=relation(model.APIKeys,
backref="user",
order_by=desc(model.APIKeys.table.c.create_time)),
cloudauthzs=relation(model.CloudAuthz,
primaryjoin=model.CloudAuthz.table.c.user_id == model.User.table.c.id),
))
mapper(model.PasswordResetToken, model.PasswordResetToken.table,
properties=dict(user=relation(model.User, backref="reset_tokens")))
# Set up proxy so that this syntax is possible:
# <user_obj>.preferences[pref_name] = pref_value
model.User.preferences = association_proxy('_preferences', 'value', creator=model.UserPreference)
mapper(model.Group, model.Group.table, properties=dict(
users=relation(model.UserGroupAssociation)
))
mapper(model.UserGroupAssociation, model.UserGroupAssociation.table, properties=dict(
user=relation(model.User, backref="groups"),
group=relation(model.Group, backref="members")
))
mapper(model.DefaultUserPermissions, model.DefaultUserPermissions.table, properties=dict(
user=relation(model.User, backref="default_permissions"),
role=relation(model.Role)
))
mapper(model.DefaultHistoryPermissions, model.DefaultHistoryPermissions.table, properties=dict(
history=relation(model.History, backref="default_permissions"),
role=relation(model.Role)
))
mapper(model.Role, model.Role.table, properties=dict(
users=relation(model.UserRoleAssociation),
groups=relation(model.GroupRoleAssociation)
))
mapper(model.UserRoleAssociation, model.UserRoleAssociation.table, properties=dict(
user=relation(model.User, backref="roles"),
non_private_roles=relation(
model.User,
backref="non_private_roles",
primaryjoin=(
(model.User.table.c.id == model.UserRoleAssociation.table.c.user_id) &
(model.UserRoleAssociation.table.c.role_id == model.Role.table.c.id) &
not_(model.Role.table.c.name == model.User.table.c.email))
),
role=relation(model.Role)
))
mapper(model.GroupRoleAssociation, model.GroupRoleAssociation.table, properties=dict(
group=relation(model.Group, backref="roles"),
role=relation(model.Role)
))
mapper(model.Quota, model.Quota.table, properties=dict(
users=relation(model.UserQuotaAssociation),
groups=relation(model.GroupQuotaAssociation)
))
mapper(model.UserQuotaAssociation, model.UserQuotaAssociation.table, properties=dict(
user=relation(model.User, backref="quotas"),
quota=relation(model.Quota)
))
mapper(model.GroupQuotaAssociation, model.GroupQuotaAssociation.table, properties=dict(
group=relation(model.Group, backref="quotas"),
quota=relation(model.Quota)
))
mapper(model.DefaultQuotaAssociation, model.DefaultQuotaAssociation.table, properties=dict(
quota=relation(model.Quota, backref="default")
))
mapper(model.DatasetPermissions, model.DatasetPermissions.table, properties=dict(
dataset=relation(model.Dataset, backref="actions"),
role=relation(model.Role, backref="dataset_actions")
))
mapper(model.LibraryPermissions, model.LibraryPermissions.table, properties=dict(
library=relation(model.Library, backref="actions"),
role=relation(model.Role, backref="library_actions")
))
mapper(model.LibraryFolderPermissions, model.LibraryFolderPermissions.table, properties=dict(
folder=relation(model.LibraryFolder, backref="actions"),
role=relation(model.Role, backref="library_folder_actions")
))
mapper(model.LibraryDatasetPermissions, model.LibraryDatasetPermissions.table, properties=dict(
library_dataset=relation(model.LibraryDataset, backref="actions"),
role=relation(model.Role, backref="library_dataset_actions")
))
mapper(model.LibraryDatasetDatasetAssociationPermissions, model.LibraryDatasetDatasetAssociationPermissions.table, properties=dict(
library_dataset_dataset_association=relation(model.LibraryDatasetDatasetAssociation, backref="actions"),
role=relation(model.Role, backref="library_dataset_dataset_actions")
))
mapper(model.Library, model.Library.table, properties=dict(
root_folder=relation(model.LibraryFolder, backref=backref("library_root"))
))
mapper(model.ExtendedMetadata, model.ExtendedMetadata.table, properties=dict(
children=relation(model.ExtendedMetadataIndex,
primaryjoin=(model.ExtendedMetadataIndex.table.c.extended_metadata_id == model.ExtendedMetadata.table.c.id),
backref=backref("parent",
primaryjoin=(model.ExtendedMetadataIndex.table.c.extended_metadata_id == model.ExtendedMetadata.table.c.id)))
))
mapper(model.ExtendedMetadataIndex, model.ExtendedMetadataIndex.table, properties=dict(
extended_metadata=relation(model.ExtendedMetadata,
primaryjoin=((model.ExtendedMetadataIndex.table.c.extended_metadata_id == model.ExtendedMetadata.table.c.id)))
))
mapper(model.LibraryInfoAssociation, model.LibraryInfoAssociation.table, properties=dict(
library=relation(model.Library,
primaryjoin=(
(model.LibraryInfoAssociation.table.c.library_id == model.Library.table.c.id) &
(not_(model.LibraryInfoAssociation.table.c.deleted))
),
backref="info_association"),
template=relation(model.FormDefinition,
primaryjoin=(model.LibraryInfoAssociation.table.c.form_definition_id == model.FormDefinition.table.c.id)),
info=relation(model.FormValues,
primaryjoin=(model.LibraryInfoAssociation.table.c.form_values_id == model.FormValues.table.c.id))
))
mapper(model.LibraryFolder, model.LibraryFolder.table, properties=dict(
folders=relation(model.LibraryFolder,
primaryjoin=(model.LibraryFolder.table.c.parent_id == model.LibraryFolder.table.c.id),
order_by=asc(model.LibraryFolder.table.c.name),
backref=backref("parent",
primaryjoin=(model.LibraryFolder.table.c.parent_id == model.LibraryFolder.table.c.id),
remote_side=[model.LibraryFolder.table.c.id])),
active_folders=relation(model.LibraryFolder,
primaryjoin=(
(model.LibraryFolder.table.c.parent_id == model.LibraryFolder.table.c.id) &
(not_(model.LibraryFolder.table.c.deleted))
),
order_by=asc(model.LibraryFolder.table.c.name),
# """sqlalchemy.exc.ArgumentError: Error creating eager relationship 'active_folders'
# on parent class '<class 'galaxy.model.LibraryFolder'>' to child class '<class 'galaxy.model.LibraryFolder'>':
# Cant use eager loading on a self referential relationship."""
lazy=True,
viewonly=True),
datasets=relation(model.LibraryDataset,
primaryjoin=((model.LibraryDataset.table.c.folder_id == model.LibraryFolder.table.c.id)),
order_by=asc(model.LibraryDataset.table.c._name),
lazy=True,
viewonly=True),
active_datasets=relation(model.LibraryDataset,
primaryjoin=(
(model.LibraryDataset.table.c.folder_id == model.LibraryFolder.table.c.id) &
(not_(model.LibraryDataset.table.c.deleted))
),
order_by=asc(model.LibraryDataset.table.c._name),
lazy=True,
viewonly=True)
))
mapper(model.LibraryFolderInfoAssociation, model.LibraryFolderInfoAssociation.table, properties=dict(
folder=relation(model.LibraryFolder,
primaryjoin=(
(model.LibraryFolderInfoAssociation.table.c.library_folder_id == model.LibraryFolder.table.c.id) &
(not_(model.LibraryFolderInfoAssociation.table.c.deleted))
),
backref="info_association"),
template=relation(model.FormDefinition,
primaryjoin=(model.LibraryFolderInfoAssociation.table.c.form_definition_id == model.FormDefinition.table.c.id)),
info=relation(model.FormValues,
primaryjoin=(model.LibraryFolderInfoAssociation.table.c.form_values_id == model.FormValues.table.c.id))
))
mapper(model.LibraryDataset, model.LibraryDataset.table, properties=dict(
folder=relation(model.LibraryFolder),
library_dataset_dataset_association=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.LibraryDataset.table.c.library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
expired_datasets=relation(model.LibraryDatasetDatasetAssociation,
foreign_keys=[model.LibraryDataset.table.c.id, model.LibraryDataset.table.c.library_dataset_dataset_association_id],
primaryjoin=(
(model.LibraryDataset.table.c.id == model.LibraryDatasetDatasetAssociation.table.c.library_dataset_id) &
(not_(model.LibraryDataset.table.c.library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id))
),
viewonly=True,
uselist=True)
))
mapper(model.LibraryDatasetDatasetAssociation, model.LibraryDatasetDatasetAssociation.table, properties=dict(
dataset=relation(model.Dataset),
library_dataset=relation(model.LibraryDataset,
primaryjoin=(model.LibraryDatasetDatasetAssociation.table.c.library_dataset_id == model.LibraryDataset.table.c.id)),
# user=relation( model.User.mapper ),
user=relation(model.User),
copied_from_library_dataset_dataset_association=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.LibraryDatasetDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id),
remote_side=[model.LibraryDatasetDatasetAssociation.table.c.id],
uselist=False),
copied_to_library_dataset_dataset_associations=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.LibraryDatasetDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
copied_from_history_dataset_association=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.LibraryDatasetDatasetAssociation.table.c.copied_from_history_dataset_association_id ==
model.HistoryDatasetAssociation.table.c.id),
uselist=False),
copied_to_history_dataset_associations=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
implicitly_converted_datasets=relation(model.ImplicitlyConvertedDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.ldda_parent_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
tags=relation(model.LibraryDatasetDatasetAssociationTagAssociation,
order_by=model.LibraryDatasetDatasetAssociationTagAssociation.table.c.id,
backref='history_tag_associations'),
extended_metadata=relation(model.ExtendedMetadata,
primaryjoin=((model.LibraryDatasetDatasetAssociation.table.c.extended_metadata_id == model.ExtendedMetadata.table.c.id))
),
_metadata=deferred(model.LibraryDatasetDatasetAssociation.table.c._metadata)
))
mapper(model.LibraryDatasetDatasetInfoAssociation, model.LibraryDatasetDatasetInfoAssociation.table, properties=dict(
library_dataset_dataset_association=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(
(model.LibraryDatasetDatasetInfoAssociation.table.c.library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id) &
(not_(model.LibraryDatasetDatasetInfoAssociation.table.c.deleted))
),
backref="info_association"),
template=relation(model.FormDefinition,
primaryjoin=(model.LibraryDatasetDatasetInfoAssociation.table.c.form_definition_id == model.FormDefinition.table.c.id)),
info=relation(model.FormValues,
primaryjoin=(model.LibraryDatasetDatasetInfoAssociation.table.c.form_values_id == model.FormValues.table.c.id))
))
mapper(model.JobToInputDatasetAssociation, model.JobToInputDatasetAssociation.table, properties=dict(
job=relation(model.Job),
dataset=relation(model.HistoryDatasetAssociation,
lazy=False,
backref="dependent_jobs")
))
mapper(model.JobToOutputDatasetAssociation, model.JobToOutputDatasetAssociation.table, properties=dict(
job=relation(model.Job),
dataset=relation(model.HistoryDatasetAssociation,
lazy=False)
))
mapper(model.JobToInputDatasetCollectionAssociation, model.JobToInputDatasetCollectionAssociation.table, properties=dict(
job=relation(model.Job),
dataset_collection=relation(model.HistoryDatasetCollectionAssociation,
lazy=False)
))
mapper(model.JobToOutputDatasetCollectionAssociation, model.JobToOutputDatasetCollectionAssociation.table, properties=dict(
job=relation(model.Job),
dataset_collection_instance=relation(model.HistoryDatasetCollectionAssociation,
lazy=False,
backref="output_dataset_collection_instances")
))
mapper(model.JobToImplicitOutputDatasetCollectionAssociation, model.JobToImplicitOutputDatasetCollectionAssociation.table, properties=dict(
job=relation(model.Job),
dataset_collection=relation(model.DatasetCollection,
backref="output_dataset_collections")
))
mapper(model.JobToInputLibraryDatasetAssociation, model.JobToInputLibraryDatasetAssociation.table, properties=dict(
job=relation(model.Job),
dataset=relation(model.LibraryDatasetDatasetAssociation,
lazy=False,
backref="dependent_jobs")
))
mapper(model.JobToOutputLibraryDatasetAssociation, model.JobToOutputLibraryDatasetAssociation.table, properties=dict(
job=relation(model.Job),
dataset=relation(model.LibraryDatasetDatasetAssociation,
lazy=False)
))
simple_mapping(model.JobStateHistory,
job=relation(model.Job, backref="state_history"))
simple_mapping(model.JobMetricText,
job=relation(model.Job, backref="text_metrics"))
simple_mapping(model.TaskMetricText,
task=relation(model.Task, backref="text_metrics"))
simple_mapping(model.JobMetricNumeric,
job=relation(model.Job, backref="numeric_metrics"))
simple_mapping(model.TaskMetricNumeric,
task=relation(model.Task, backref="numeric_metrics"))
simple_mapping(model.ImplicitlyCreatedDatasetCollectionInput,
input_dataset_collection=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=((model.HistoryDatasetCollectionAssociation.table.c.id ==
model.ImplicitlyCreatedDatasetCollectionInput.table.c.input_dataset_collection_id)),
# backref="implicitly_created_dataset_collections",
),
)
simple_mapping(model.ImplicitCollectionJobs)
# simple_mapping(
# model.ImplicitCollectionJobsHistoryDatasetCollectionAssociation,
# history_dataset_collection_associations=relation(
# model.HistoryDatasetCollectionAssociation,
# backref=backref("implicit_collection_jobs_association", uselist=False),
# uselist=True,
# ),
# )
simple_mapping(
model.ImplicitCollectionJobsJobAssociation,
implicit_collection_jobs=relation(
model.ImplicitCollectionJobs,
backref=backref("jobs", uselist=True),
uselist=False,
),
job=relation(
model.Job,
backref=backref("implicit_collection_jobs_association", uselist=False),
uselist=False,
),
)
mapper(model.JobParameter, model.JobParameter.table)
mapper(model.JobExternalOutputMetadata, model.JobExternalOutputMetadata.table, properties=dict(
job=relation(model.Job),
history_dataset_association=relation(model.HistoryDatasetAssociation, lazy=False),
library_dataset_dataset_association=relation(model.LibraryDatasetDatasetAssociation, lazy=False)
))
mapper(model.JobExportHistoryArchive, model.JobExportHistoryArchive.table, properties=dict(
job=relation(model.Job),
history=relation(model.History),
dataset=relation(model.Dataset, backref='job_export_history_archive')
))
mapper(model.JobImportHistoryArchive, model.JobImportHistoryArchive.table, properties=dict(
job=relation(model.Job),
history=relation(model.History)
))
mapper(model.GenomeIndexToolData, model.GenomeIndexToolData.table, properties=dict(
job=relation(model.Job, backref='job'),
dataset=relation(model.Dataset, backref='genome_index_tool_data'),
user=relation(model.User),
deferred=relation(model.DeferredJob, backref='deferred_job'),
transfer=relation(model.TransferJob, backref='transfer_job')
))
mapper(model.PostJobAction, model.PostJobAction.table, properties=dict(
workflow_step=relation(model.WorkflowStep,
backref='post_job_actions',
primaryjoin=(model.WorkflowStep.table.c.id == model.PostJobAction.table.c.workflow_step_id))
))
mapper(model.PostJobActionAssociation, model.PostJobActionAssociation.table, properties=dict(
job=relation(model.Job),
post_job_action=relation(model.PostJobAction)
))
mapper(model.Job, model.Job.table, properties=dict(
# user=relation( model.User.mapper ),
user=relation(model.User),
galaxy_session=relation(model.GalaxySession),
history=relation(model.History, backref="jobs"),
library_folder=relation(model.LibraryFolder, lazy=True),
parameters=relation(model.JobParameter, lazy=True),
input_datasets=relation(model.JobToInputDatasetAssociation),
input_dataset_collections=relation(model.JobToInputDatasetCollectionAssociation, lazy=True),
output_datasets=relation(model.JobToOutputDatasetAssociation, lazy=True),
any_output_dataset_deleted=column_property(
exists([model.HistoryDatasetAssociation],
and_(model.Job.table.c.id == model.JobToOutputDatasetAssociation.table.c.job_id,
model.HistoryDatasetAssociation.table.c.id == model.JobToOutputDatasetAssociation.table.c.dataset_id,
model.HistoryDatasetAssociation.table.c.deleted == true())
)
),
any_output_dataset_collection_instances_deleted=column_property(
exists([model.HistoryDatasetCollectionAssociation.table.c.id],
and_(model.Job.table.c.id == model.JobToOutputDatasetCollectionAssociation.table.c.job_id,
model.HistoryDatasetCollectionAssociation.table.c.id == model.JobToOutputDatasetCollectionAssociation.table.c.dataset_collection_id,
model.HistoryDatasetCollectionAssociation.table.c.deleted == true())
)
),
output_dataset_collection_instances=relation(model.JobToOutputDatasetCollectionAssociation, lazy=True),
output_dataset_collections=relation(model.JobToImplicitOutputDatasetCollectionAssociation, lazy=True),
post_job_actions=relation(model.PostJobActionAssociation, lazy=False),
input_library_datasets=relation(model.JobToInputLibraryDatasetAssociation),
output_library_datasets=relation(model.JobToOutputLibraryDatasetAssociation, lazy=True),
external_output_metadata=relation(model.JobExternalOutputMetadata, lazy=True),
tasks=relation(model.Task)
))
mapper(model.Task, model.Task.table, properties=dict(
job=relation(model.Job)
))
mapper(model.DeferredJob, model.DeferredJob.table, properties={})
mapper(model.TransferJob, model.TransferJob.table, properties={})
simple_mapping(model.DatasetCollection,
elements=relation(model.DatasetCollectionElement,
primaryjoin=(model.DatasetCollection.table.c.id == model.DatasetCollectionElement.table.c.dataset_collection_id),
remote_side=[model.DatasetCollectionElement.table.c.dataset_collection_id],
backref="collection",
order_by=model.DatasetCollectionElement.table.c.element_index)
)
simple_mapping(model.HistoryDatasetCollectionAssociation,
collection=relation(model.DatasetCollection),
history=relation(model.History,
backref='dataset_collections'),
copied_from_history_dataset_collection_association=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=(model.HistoryDatasetCollectionAssociation.table.c.copied_from_history_dataset_collection_association_id ==
model.HistoryDatasetCollectionAssociation.table.c.id),
remote_side=[model.HistoryDatasetCollectionAssociation.table.c.id],
uselist=False),
copied_to_history_dataset_collection_associations=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=(model.HistoryDatasetCollectionAssociation.table.c.copied_from_history_dataset_collection_association_id ==
model.HistoryDatasetCollectionAssociation.table.c.id)),
implicit_input_collections=relation(model.ImplicitlyCreatedDatasetCollectionInput,
primaryjoin=((model.HistoryDatasetCollectionAssociation.table.c.id ==
model.ImplicitlyCreatedDatasetCollectionInput.table.c.dataset_collection_id)),
backref="dataset_collection",
),
implicit_collection_jobs=relation(
model.ImplicitCollectionJobs,
backref=backref("history_dataset_collection_associations", uselist=True),
uselist=False,
),
job=relation(
model.Job,
backref=backref("history_dataset_collection_associations", uselist=True),
uselist=False,
),
tags=relation(model.HistoryDatasetCollectionTagAssociation,
order_by=model.HistoryDatasetCollectionTagAssociation.table.c.id,
backref='dataset_collections'),
annotations=relation(model.HistoryDatasetCollectionAssociationAnnotationAssociation,
order_by=model.HistoryDatasetCollectionAssociationAnnotationAssociation.table.c.id,
backref="dataset_collections"),
ratings=relation(model.HistoryDatasetCollectionRatingAssociation,
order_by=model.HistoryDatasetCollectionRatingAssociation.table.c.id,
backref="dataset_collections")
)
simple_mapping(model.LibraryDatasetCollectionAssociation,
collection=relation(model.DatasetCollection),
folder=relation(model.LibraryFolder,
backref='dataset_collections'),
tags=relation(model.LibraryDatasetCollectionTagAssociation,
order_by=model.LibraryDatasetCollectionTagAssociation.table.c.id,
backref='dataset_collections'),
annotations=relation(model.LibraryDatasetCollectionAnnotationAssociation,
order_by=model.LibraryDatasetCollectionAnnotationAssociation.table.c.id,
backref="dataset_collections"),
ratings=relation(model.LibraryDatasetCollectionRatingAssociation,
order_by=model.LibraryDatasetCollectionRatingAssociation.table.c.id,
backref="dataset_collections"))
simple_mapping(model.DatasetCollectionElement,
hda=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.DatasetCollectionElement.table.c.hda_id == model.HistoryDatasetAssociation.table.c.id)),
ldda=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.DatasetCollectionElement.table.c.ldda_id == model.LibraryDatasetDatasetAssociation.table.c.id)),
child_collection=relation(model.DatasetCollection,
primaryjoin=(model.DatasetCollectionElement.table.c.child_collection_id == model.DatasetCollection.table.c.id)))
mapper(model.Event, model.Event.table, properties=dict(
history=relation(model.History),
galaxy_session=relation(model.GalaxySession),
# user=relation( model.User.mapper ) ) )
user=relation(model.User)
))
mapper(model.GalaxySession, model.GalaxySession.table, properties=dict(
histories=relation(model.GalaxySessionToHistoryAssociation),
current_history=relation(model.History),
# user=relation( model.User.mapper ) ) )
user=relation(model.User)
))
mapper(model.GalaxySessionToHistoryAssociation, model.GalaxySessionToHistoryAssociation.table, properties=dict(
galaxy_session=relation(model.GalaxySession),
history=relation(model.History)
))
mapper(model.Workflow, model.Workflow.table, properties=dict(
steps=relation(model.WorkflowStep,
backref='workflow',
primaryjoin=((model.Workflow.table.c.id == model.WorkflowStep.table.c.workflow_id)),
order_by=asc(model.WorkflowStep.table.c.order_index),
cascade="all, delete-orphan",
lazy=False),
step_count=column_property(
select([func.count(model.WorkflowStep.table.c.id)]).where(model.Workflow.table.c.id == model.WorkflowStep.table.c.workflow_id),
deferred=True
)
))
mapper(model.WorkflowStep, model.WorkflowStep.table, properties=dict(
subworkflow=relation(model.Workflow,
primaryjoin=(model.Workflow.table.c.id == model.WorkflowStep.table.c.subworkflow_id),
backref="parent_workflow_steps"),
dynamic_tool=relation(model.DynamicTool,
primaryjoin=(model.DynamicTool.table.c.id == model.WorkflowStep.table.c.dynamic_tool_id),
backref="workflow_steps"),
tags=relation(model.WorkflowStepTagAssociation,
order_by=model.WorkflowStepTagAssociation.table.c.id,
backref="workflow_steps"),
annotations=relation(model.WorkflowStepAnnotationAssociation,
order_by=model.WorkflowStepAnnotationAssociation.table.c.id,
backref="workflow_steps")
))
mapper(model.WorkflowStepInput, model.WorkflowStepInput.table, properties=dict(
workflow_step=relation(model.WorkflowStep,
backref=backref("inputs", uselist=True),
cascade="all",
primaryjoin=(model.WorkflowStepInput.table.c.workflow_step_id == model.WorkflowStep.table.c.id))
))
mapper(model.WorkflowOutput, model.WorkflowOutput.table, properties=dict(
workflow_step=relation(model.WorkflowStep,
backref='workflow_outputs',
primaryjoin=(model.WorkflowStep.table.c.id == model.WorkflowOutput.table.c.workflow_step_id))
))
mapper(model.WorkflowStepConnection, model.WorkflowStepConnection.table, properties=dict(
input_step_input=relation(model.WorkflowStepInput,
backref="connections",
cascade="all",
primaryjoin=(model.WorkflowStepConnection.table.c.input_step_input_id == model.WorkflowStepInput.table.c.id)),
input_subworkflow_step=relation(model.WorkflowStep,
backref=backref("parent_workflow_input_connections", uselist=True),
primaryjoin=(model.WorkflowStepConnection.table.c.input_subworkflow_step_id == model.WorkflowStep.table.c.id),
),
output_step=relation(model.WorkflowStep,
backref="output_connections",
cascade="all",
primaryjoin=(model.WorkflowStepConnection.table.c.output_step_id == model.WorkflowStep.table.c.id)),
))
mapper(model.StoredWorkflow, model.StoredWorkflow.table, properties=dict(
user=relation(model.User,
primaryjoin=(model.User.table.c.id == model.StoredWorkflow.table.c.user_id),
backref='stored_workflows'),
workflows=relation(model.Workflow,
backref='stored_workflow',
cascade="all, delete-orphan",
primaryjoin=(model.StoredWorkflow.table.c.id == model.Workflow.table.c.stored_workflow_id),
order_by=-model.Workflow.id),
latest_workflow=relation(model.Workflow,
post_update=True,
primaryjoin=(model.StoredWorkflow.table.c.latest_workflow_id == model.Workflow.table.c.id),
lazy=False),
tags=relation(model.StoredWorkflowTagAssociation,
order_by=model.StoredWorkflowTagAssociation.table.c.id,
backref="stored_workflows"),
owner_tags=relation(model.StoredWorkflowTagAssociation,
primaryjoin=(
and_(model.StoredWorkflow.table.c.id == model.StoredWorkflowTagAssociation.table.c.stored_workflow_id,
model.StoredWorkflow.table.c.user_id == model.StoredWorkflowTagAssociation.table.c.user_id)
),
order_by=model.StoredWorkflowTagAssociation.table.c.id),
annotations=relation(model.StoredWorkflowAnnotationAssociation,
order_by=model.StoredWorkflowAnnotationAssociation.table.c.id,
backref="stored_workflows"),
ratings=relation(model.StoredWorkflowRatingAssociation,
order_by=model.StoredWorkflowRatingAssociation.table.c.id,
backref="stored_workflows"),
average_rating=column_property(
select([func.avg(model.StoredWorkflowRatingAssociation.table.c.rating)]).where(model.StoredWorkflowRatingAssociation.table.c.stored_workflow_id == model.StoredWorkflow.table.c.id),
deferred=True
)
))
# Set up proxy so that
# StoredWorkflow.users_shared_with
# returns a list of users that workflow is shared with.
model.StoredWorkflow.users_shared_with_dot_users = association_proxy('users_shared_with', 'user')
mapper(model.StoredWorkflowUserShareAssociation, model.StoredWorkflowUserShareAssociation.table, properties=dict(
user=relation(model.User,
backref='workflows_shared_by_others'),
stored_workflow=relation(model.StoredWorkflow,
backref='users_shared_with')
))
mapper(model.StoredWorkflowMenuEntry, model.StoredWorkflowMenuEntry.table, properties=dict(
stored_workflow=relation(model.StoredWorkflow)
))
mapper(model.WorkflowInvocation, model.WorkflowInvocation.table, properties=dict(
history=relation(model.History, backref=backref('workflow_invocations', uselist=True)),
input_parameters=relation(model.WorkflowRequestInputParameter),
step_states=relation(model.WorkflowRequestStepState),
input_step_parameters=relation(model.WorkflowRequestInputStepParameter),
input_datasets=relation(model.WorkflowRequestToInputDatasetAssociation),
input_dataset_collections=relation(model.WorkflowRequestToInputDatasetCollectionAssociation),
subworkflow_invocations=relation(model.WorkflowInvocationToSubworkflowInvocationAssociation,
primaryjoin=((model.WorkflowInvocationToSubworkflowInvocationAssociation.table.c.workflow_invocation_id == model.WorkflowInvocation.table.c.id)),
backref=backref("parent_workflow_invocation", uselist=False),
uselist=True,
),
steps=relation(model.WorkflowInvocationStep,
backref="workflow_invocation"),
workflow=relation(model.Workflow)
))
mapper(model.WorkflowInvocationToSubworkflowInvocationAssociation, model.WorkflowInvocationToSubworkflowInvocationAssociation.table, properties=dict(
subworkflow_invocation=relation(model.WorkflowInvocation,
primaryjoin=((model.WorkflowInvocationToSubworkflowInvocationAssociation.table.c.subworkflow_invocation_id == model.WorkflowInvocation.table.c.id)),
backref="parent_workflow_invocation_association",
uselist=False,
),
workflow_step=relation(model.WorkflowStep),
))
simple_mapping(model.WorkflowInvocationStep,
workflow_step=relation(model.WorkflowStep),
job=relation(model.Job, backref=backref('workflow_invocation_step', uselist=False), uselist=False),
implicit_collection_jobs=relation(model.ImplicitCollectionJobs, backref=backref('workflow_invocation_step', uselist=False), uselist=False),)
simple_mapping(model.WorkflowRequestInputParameter,
workflow_invocation=relation(model.WorkflowInvocation))
simple_mapping(model.WorkflowRequestStepState,
workflow_invocation=relation(model.WorkflowInvocation),
workflow_step=relation(model.WorkflowStep))
simple_mapping(model.WorkflowRequestInputStepParameter,
workflow_invocation=relation(model.WorkflowInvocation),
workflow_step=relation(model.WorkflowStep))
simple_mapping(model.WorkflowRequestToInputDatasetAssociation,
workflow_invocation=relation(model.WorkflowInvocation),
workflow_step=relation(model.WorkflowStep),
dataset=relation(model.HistoryDatasetAssociation))
simple_mapping(model.WorkflowRequestToInputDatasetCollectionAssociation,
workflow_invocation=relation(model.WorkflowInvocation),
workflow_step=relation(model.WorkflowStep),
dataset_collection=relation(model.HistoryDatasetCollectionAssociation))
mapper(model.MetadataFile, model.MetadataFile.table, properties=dict(
history_dataset=relation(model.HistoryDatasetAssociation),
library_dataset=relation(model.LibraryDatasetDatasetAssociation)
))
simple_mapping(
model.WorkflowInvocationOutputDatasetAssociation,
workflow_invocation=relation(model.WorkflowInvocation, backref="output_datasets"),
workflow_step=relation(model.WorkflowStep),
dataset=relation(model.HistoryDatasetAssociation),
workflow_output=relation(model.WorkflowOutput),
)
simple_mapping(
model.WorkflowInvocationOutputDatasetCollectionAssociation,
workflow_invocation=relation(model.WorkflowInvocation, backref="output_dataset_collections"),
workflow_step=relation(model.WorkflowStep),
dataset_collection=relation(model.HistoryDatasetCollectionAssociation),
workflow_output=relation(model.WorkflowOutput),
)
simple_mapping(
model.WorkflowInvocationStepOutputDatasetAssociation,
workflow_invocation_step=relation(model.WorkflowInvocationStep, backref="output_datasets"),
dataset=relation(model.HistoryDatasetAssociation),
)
simple_mapping(
model.WorkflowInvocationStepOutputDatasetCollectionAssociation,
workflow_invocation_step=relation(model.WorkflowInvocationStep, backref="output_dataset_collections"),
dataset_collection=relation(model.HistoryDatasetCollectionAssociation),
)
mapper(model.PageRevision, model.PageRevision.table)
mapper(model.Page, model.Page.table, properties=dict(
user=relation(model.User),
revisions=relation(model.PageRevision,
backref='page',
cascade="all, delete-orphan",
primaryjoin=(model.Page.table.c.id == model.PageRevision.table.c.page_id)),
latest_revision=relation(model.PageRevision,
post_update=True,
primaryjoin=(model.Page.table.c.latest_revision_id == model.PageRevision.table.c.id),
lazy=False),
tags=relation(model.PageTagAssociation,
order_by=model.PageTagAssociation.table.c.id,
backref="pages"),
annotations=relation(model.PageAnnotationAssociation,
order_by=model.PageAnnotationAssociation.table.c.id,
backref="pages"),
ratings=relation(model.PageRatingAssociation,
order_by=model.PageRatingAssociation.table.c.id,
backref="pages"),
average_rating=column_property(
select([func.avg(model.PageRatingAssociation.table.c.rating)]).where(model.PageRatingAssociation.table.c.page_id == model.Page.table.c.id),
deferred=True
)
))
# Set up proxy so that
# Page.users_shared_with
# returns a list of users that page is shared with.
model.Page.users_shared_with_dot_users = association_proxy('users_shared_with', 'user')
mapper(model.PageUserShareAssociation, model.PageUserShareAssociation.table,
properties=dict(user=relation(model.User, backref='pages_shared_by_others'),
page=relation(model.Page, backref='users_shared_with')))
mapper(model.VisualizationRevision, model.VisualizationRevision.table)
mapper(model.Visualization, model.Visualization.table, properties=dict(
user=relation(model.User),
revisions=relation(model.VisualizationRevision,
backref='visualization',
cascade="all, delete-orphan",
primaryjoin=(model.Visualization.table.c.id == model.VisualizationRevision.table.c.visualization_id)),
latest_revision=relation(model.VisualizationRevision,
post_update=True,
primaryjoin=(model.Visualization.table.c.latest_revision_id == model.VisualizationRevision.table.c.id),
lazy=False),
tags=relation(model.VisualizationTagAssociation,
order_by=model.VisualizationTagAssociation.table.c.id,
backref="visualizations"),
annotations=relation(model.VisualizationAnnotationAssociation,
order_by=model.VisualizationAnnotationAssociation.table.c.id,
backref="visualizations"),
ratings=relation(model.VisualizationRatingAssociation,
order_by=model.VisualizationRatingAssociation.table.c.id,
backref="visualizations"),
average_rating=column_property(
select([func.avg(model.VisualizationRatingAssociation.table.c.rating)]).where(model.VisualizationRatingAssociation.table.c.visualization_id == model.Visualization.table.c.id),
deferred=True
)
))
# Set up proxy so that
# Visualization.users_shared_with
# returns a list of users that visualization is shared with.
model.Visualization.users_shared_with_dot_users = association_proxy('users_shared_with', 'user')
mapper(model.VisualizationUserShareAssociation, model.VisualizationUserShareAssociation.table, properties=dict(
user=relation(model.User,
backref='visualizations_shared_by_others'),
visualization=relation(model.Visualization,
backref='users_shared_with')
))
# Tag tables.
simple_mapping(model.Tag,
children=relation(model.Tag, backref=backref('parent', remote_side=[model.Tag.table.c.id])))
def tag_mapping(tag_association_class, backref_name):
simple_mapping(tag_association_class, tag=relation(model.Tag, backref=backref_name), user=relation(model.User))
tag_mapping(model.HistoryTagAssociation, "tagged_histories")
tag_mapping(model.DatasetTagAssociation, "tagged_datasets")
tag_mapping(model.HistoryDatasetAssociationTagAssociation, "tagged_history_dataset_associations")
tag_mapping(model.LibraryDatasetDatasetAssociationTagAssociation, "tagged_library_dataset_dataset_associations")
tag_mapping(model.PageTagAssociation, "tagged_pages")
tag_mapping(model.StoredWorkflowTagAssociation, "tagged_workflows")
tag_mapping(model.WorkflowStepTagAssociation, "tagged_workflow_steps")
tag_mapping(model.VisualizationTagAssociation, "tagged_visualizations")
tag_mapping(model.HistoryDatasetCollectionTagAssociation, "tagged_history_dataset_collections")
tag_mapping(model.LibraryDatasetCollectionTagAssociation, "tagged_library_dataset_collections")
tag_mapping(model.ToolTagAssociation, "tagged_tools")
# Annotation tables.
def annotation_mapping(annotation_class, **kwds):
kwds = dict((key, relation(value)) for key, value in kwds.items())
simple_mapping(annotation_class, **dict(user=relation(model.User), **kwds))
annotation_mapping(model.HistoryAnnotationAssociation, history=model.History)
annotation_mapping(model.HistoryDatasetAssociationAnnotationAssociation, hda=model.HistoryDatasetAssociation)
annotation_mapping(model.StoredWorkflowAnnotationAssociation, stored_workflow=model.StoredWorkflow)
annotation_mapping(model.WorkflowStepAnnotationAssociation, workflow_step=model.WorkflowStep)
annotation_mapping(model.PageAnnotationAssociation, page=model.Page)
annotation_mapping(model.VisualizationAnnotationAssociation, visualization=model.Visualization)
annotation_mapping(model.HistoryDatasetCollectionAssociationAnnotationAssociation,
history_dataset_collection=model.HistoryDatasetCollectionAssociation)
annotation_mapping(model.LibraryDatasetCollectionAnnotationAssociation,
library_dataset_collection=model.LibraryDatasetCollectionAssociation)
# Rating tables.
def rating_mapping(rating_class, **kwds):
kwds = dict((key, relation(value)) for key, value in kwds.items())
simple_mapping(rating_class, **dict(user=relation(model.User), **kwds))
rating_mapping(model.HistoryRatingAssociation, history=model.History)
rating_mapping(model.HistoryDatasetAssociationRatingAssociation, hda=model.HistoryDatasetAssociation)
rating_mapping(model.StoredWorkflowRatingAssociation, stored_workflow=model.StoredWorkflow)
rating_mapping(model.PageRatingAssociation, page=model.Page)
rating_mapping(model.VisualizationRatingAssociation, visualizaiton=model.Visualization)
rating_mapping(model.HistoryDatasetCollectionRatingAssociation,
history_dataset_collection=model.HistoryDatasetCollectionAssociation)
rating_mapping(model.LibraryDatasetCollectionRatingAssociation,
libary_dataset_collection=model.LibraryDatasetCollectionAssociation)
# Data Manager tables
mapper(model.DataManagerHistoryAssociation, model.DataManagerHistoryAssociation.table, properties=dict(
history=relation(model.History),
user=relation(model.User,
backref='data_manager_histories')
))
mapper(model.DataManagerJobAssociation, model.DataManagerJobAssociation.table, properties=dict(
job=relation(model.Job,
backref=backref('data_manager_association', uselist=False),
uselist=False)
))
# User tables.
mapper(model.UserPreference, model.UserPreference.table, properties={})
mapper(model.UserAction, model.UserAction.table, properties=dict(
# user=relation( model.User.mapper )
user=relation(model.User)
))
mapper(model.APIKeys, model.APIKeys.table, properties={})
# model.HistoryDatasetAssociation.mapper.add_property( "creating_job_associations",
# relation( model.JobToOutputDatasetAssociation ) )
# model.LibraryDatasetDatasetAssociation.mapper.add_property( "creating_job_associations",
# relation( model.JobToOutputLibraryDatasetAssociation ) )
class_mapper(model.HistoryDatasetAssociation).add_property(
"creating_job_associations", relation(model.JobToOutputDatasetAssociation))
class_mapper(model.LibraryDatasetDatasetAssociation).add_property(
"creating_job_associations", relation(model.JobToOutputLibraryDatasetAssociation))
class_mapper(model.HistoryDatasetCollectionAssociation).add_property(
"creating_job_associations", relation(model.JobToOutputDatasetCollectionAssociation))
# Helper methods.
def db_next_hid(self, n=1):
"""
db_next_hid( self )
Override __next_hid to generate from the database in a concurrency safe way.
Loads the next history ID from the DB and returns it.
It also saves the future next_id into the DB.
:rtype: int
:returns: the next history id
"""
session = object_session(self)
table = self.table
trans = session.begin()
try:
if "postgres" not in session.bind.dialect.name:
next_hid = select([table.c.hid_counter], table.c.id == model.cached_id(self), for_update=True).scalar()
table.update(table.c.id == self.id).execute(hid_counter=(next_hid + n))
else:
stmt = table.update().where(table.c.id == model.cached_id(self)).values(hid_counter=(table.c.hid_counter + n)).returning(table.c.hid_counter)
next_hid = session.execute(stmt).scalar() - n
trans.commit()
return next_hid
except Exception:
trans.rollback()
raise
model.History._next_hid = db_next_hid
def _workflow_invocation_update(self):
session = object_session(self)
table = self.table
now_val = now()
stmt = table.update().values(update_time=now_val).where(and_(table.c.id == self.id, table.c.update_time < now_val))
session.execute(stmt)
model.WorkflowInvocation.update = _workflow_invocation_update
def init(file_path, url, engine_options=None, create_tables=False, map_install_models=False,
database_query_profiling_proxy=False, object_store=None, trace_logger=None, use_pbkdf2=True,
slow_query_log_threshold=0, thread_local_log=None):
"""Connect mappings to the database"""
if engine_options is None:
engine_options = {}
# Connect dataset to the file path
model.Dataset.file_path = file_path
# Connect dataset to object store
model.Dataset.object_store = object_store
# Use PBKDF2 password hashing?
model.User.use_pbkdf2 = use_pbkdf2
# Load the appropriate db module
engine = build_engine(url, engine_options, database_query_profiling_proxy, trace_logger, slow_query_log_threshold, thread_local_log=thread_local_log)
# Connect the metadata to the database.
metadata.bind = engine
model_modules = [model]
if map_install_models:
import galaxy.model.tool_shed_install.mapping # noqa: F401
from galaxy.model import tool_shed_install
model_modules.append(tool_shed_install)
result = ModelMapping(model_modules, engine=engine)
# Create tables if needed
if create_tables:
metadata.create_all()
# metadata.engine.commit()
result.create_tables = create_tables
# load local galaxy security policy
result.security_agent = GalaxyRBACAgent(result)
result.thread_local_log = thread_local_log
return result
| 48.569126 | 188 | 0.731883 |
import logging
from sqlalchemy import (
and_,
asc,
Boolean,
Column,
DateTime,
desc,
false,
ForeignKey,
func,
Integer,
MetaData,
not_,
Numeric,
select,
String, Table,
TEXT,
Text,
true,
Unicode,
UniqueConstraint,
VARCHAR
)
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy.orm import backref, class_mapper, column_property, deferred, mapper, object_session, relation
from sqlalchemy.orm.collections import attribute_mapped_collection
from sqlalchemy.sql import exists
from sqlalchemy.types import BigInteger
from galaxy import model
from galaxy.model.base import ModelMapping
from galaxy.model.custom_types import JSONType, MetadataType, TrimmedString, UUIDType
from galaxy.model.orm.engine_factory import build_engine
from galaxy.model.orm.now import now
from galaxy.model.security import GalaxyRBACAgent
log = logging.getLogger(__name__)
metadata = MetaData()
model.WorkerProcess.table = Table(
'worker_process',
metadata,
Column("id", Integer, primary_key=True),
Column("server_name", String(255), index=True),
Column("hostname", String(255)),
Column("update_time", DateTime, default=now, onupdate=now),
UniqueConstraint('server_name', 'hostname'),
)
model.User.table = Table(
"galaxy_user", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("email", TrimmedString(255), index=True, nullable=False),
Column("username", TrimmedString(255), index=True, unique=True),
Column("password", TrimmedString(255), nullable=False),
Column("last_password_change", DateTime, default=now),
Column("external", Boolean, default=False),
Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False),
Column("disk_usage", Numeric(15, 0), index=True),
Column("active", Boolean, index=True, default=True, nullable=False),
Column("activation_token", TrimmedString(64), nullable=True, index=True))
model.UserAddress.table = Table(
"user_address", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("desc", TrimmedString(255)),
Column("name", TrimmedString(255), nullable=False),
Column("institution", TrimmedString(255)),
Column("address", TrimmedString(255), nullable=False),
Column("city", TrimmedString(255), nullable=False),
Column("state", TrimmedString(255), nullable=False),
Column("postal_code", TrimmedString(255), nullable=False),
Column("country", TrimmedString(255), nullable=False),
Column("phone", TrimmedString(255)),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False))
model.PSAAssociation.table = Table(
"psa_association", metadata,
Column('id', Integer, primary_key=True),
Column('server_url', VARCHAR(255)),
Column('handle', VARCHAR(255)),
Column('secret', VARCHAR(255)),
Column('issued', Integer),
Column('lifetime', Integer),
Column('assoc_type', VARCHAR(64)))
model.PSACode.table = Table(
"psa_code", metadata,
Column('id', Integer, primary_key=True),
Column('email', VARCHAR(200)),
Column('code', VARCHAR(32)))
model.PSANonce.table = Table(
"psa_nonce", metadata,
Column('id', Integer, primary_key=True),
Column('server_url', VARCHAR(255)),
Column('timestamp', Integer),
Column('salt', VARCHAR(40)))
model.PSAPartial.table = Table(
"psa_partial", metadata,
Column('id', Integer, primary_key=True),
Column('token', VARCHAR(32)),
Column('data', TEXT),
Column('next_step', Integer),
Column('backend', VARCHAR(32)))
model.UserAuthnzToken.table = Table(
"oidc_user_authnz_tokens", metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey("galaxy_user.id"), index=True),
Column('uid', VARCHAR(255)),
Column('provider', VARCHAR(32)),
Column('extra_data', JSONType, nullable=True),
Column('lifetime', Integer),
Column('assoc_type', VARCHAR(64)))
model.CustosAuthnzToken.table = Table(
"custos_authnz_token", metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey("galaxy_user.id")),
Column('external_user_id', String(64)),
Column('provider', String(255)),
Column('access_token', Text),
Column('id_token', Text),
Column('refresh_token', Text),
Column("expiration_time", DateTime),
Column("refresh_expiration_time", DateTime),
UniqueConstraint("user_id", "external_user_id", "provider"),
UniqueConstraint("external_user_id", "provider"),
)
model.CloudAuthz.table = Table(
"cloudauthz", metadata,
Column('id', Integer, primary_key=True),
Column('user_id', Integer, ForeignKey("galaxy_user.id"), index=True),
Column('provider', String(255)),
Column('config', JSONType),
Column('authn_id', Integer, ForeignKey("oidc_user_authnz_tokens.id"), index=True),
Column('tokens', JSONType),
Column('last_update', DateTime),
Column('last_activity', DateTime),
Column('description', TEXT),
Column('create_time', DateTime, default=now))
model.PasswordResetToken.table = Table(
"password_reset_token", metadata,
Column("token", String(32), primary_key=True, unique=True, index=True),
Column("expiration_time", DateTime),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.DynamicTool.table = Table(
"dynamic_tool", metadata,
Column("id", Integer, primary_key=True),
Column("uuid", UUIDType()),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("tool_id", Unicode(255)),
Column("tool_version", Unicode(255)),
Column("tool_format", Unicode(255)),
Column("tool_path", Unicode(255)),
Column("tool_directory", Unicode(255)),
Column("hidden", Boolean, default=True),
Column("active", Boolean, default=True),
Column("value", JSONType()),
)
model.History.table = Table(
"history", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("name", TrimmedString(255)),
Column("hid_counter", Integer, default=1),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False),
Column("importing", Boolean, index=True, default=False),
Column("genome_build", TrimmedString(40)),
Column("importable", Boolean, default=False),
Column("slug", TEXT, index=True),
Column("published", Boolean, index=True, default=False))
model.HistoryUserShareAssociation.table = Table(
"history_user_share_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.HistoryDatasetAssociation.table = Table(
"history_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", TrimmedString(64), index=True, key="_state"),
Column("copied_from_history_dataset_association_id", Integer,
ForeignKey("history_dataset_association.id"), nullable=True),
Column("copied_from_library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id"), nullable=True),
Column("name", TrimmedString(255)),
Column("info", TrimmedString(255)),
Column("blurb", TrimmedString(255)),
Column("peek", TEXT),
Column("tool_version", TEXT),
Column("extension", TrimmedString(64)),
Column("metadata", MetadataType(), key="_metadata"),
Column("parent_id", Integer, ForeignKey("history_dataset_association.id"), nullable=True),
Column("designation", TrimmedString(255)),
Column("deleted", Boolean, index=True, default=False),
Column("visible", Boolean),
Column("extended_metadata_id", Integer, ForeignKey("extended_metadata.id"), index=True),
Column("version", Integer, default=1, nullable=True, index=True),
Column("hid", Integer),
Column("purged", Boolean, index=True, default=False),
Column("hidden_beneath_collection_instance_id",
ForeignKey("history_dataset_collection_association.id"), nullable=True))
model.HistoryDatasetAssociationHistory.table = Table(
"history_dataset_association_history", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("update_time", DateTime, default=now),
Column("version", Integer),
Column("name", TrimmedString(255)),
Column("extension", TrimmedString(64)),
Column("metadata", MetadataType(), key="_metadata"),
Column("extended_metadata_id", Integer, ForeignKey("extended_metadata.id"), index=True),
)
model.Dataset.table = Table(
"dataset", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("state", TrimmedString(64), index=True),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False),
Column("purgable", Boolean, default=True),
Column("object_store_id", TrimmedString(255), index=True),
Column("external_filename", TEXT),
Column("_extra_files_path", TEXT),
Column('file_size', Numeric(15, 0)),
Column('total_size', Numeric(15, 0)),
Column('uuid', UUIDType()))
model.DatasetSource.table = Table(
"dataset_source", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("source_uri", TEXT),
Column("extra_files_path", TEXT),
Column("transform", JSONType)
)
model.DatasetHash.table = Table(
"dataset_hash", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("hash_function", TEXT),
Column("hash_value", TEXT),
Column("extra_files_path", TEXT),
)
model.DatasetSourceHash.table = Table(
"dataset_source_hash", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_source_id", Integer, ForeignKey("dataset_source.id"), index=True),
Column("hash_function", TEXT),
Column("hash_value", TEXT)
)
model.HistoryDatasetAssociationDisplayAtAuthorization.table = Table(
"history_dataset_association_display_at_authorization", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("site", TrimmedString(255)))
model.HistoryDatasetAssociationSubset.table = Table(
"history_dataset_association_subset", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("history_dataset_association_subset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("location", Unicode(255), index=True))
model.ImplicitlyConvertedDatasetAssociation.table = Table(
"implicitly_converted_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True, nullable=True),
Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True),
Column("hda_parent_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("ldda_parent_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True),
Column("deleted", Boolean, index=True, default=False),
Column("metadata_safe", Boolean, index=True, default=True),
Column("type", TrimmedString(255)))
model.ValidationError.table = Table(
"validation_error", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("message", TrimmedString(255)),
Column("err_type", TrimmedString(64)),
Column("attributes", TEXT))
model.Group.table = Table(
"galaxy_group", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", String(255), index=True, unique=True),
Column("deleted", Boolean, index=True, default=False))
model.UserGroupAssociation.table = Table(
"user_group_association", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("group_id", Integer, ForeignKey("galaxy_group.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.UserRoleAssociation.table = Table(
"user_role_association", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.GroupRoleAssociation.table = Table(
"group_role_association", metadata,
Column("id", Integer, primary_key=True),
Column("group_id", Integer, ForeignKey("galaxy_group.id"), index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.Role.table = Table(
"role", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", String(255), index=True, unique=True),
Column("description", TEXT),
Column("type", String(40), index=True),
Column("deleted", Boolean, index=True, default=False))
model.UserQuotaAssociation.table = Table(
"user_quota_association", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("quota_id", Integer, ForeignKey("quota.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.GroupQuotaAssociation.table = Table(
"group_quota_association", metadata,
Column("id", Integer, primary_key=True),
Column("group_id", Integer, ForeignKey("galaxy_group.id"), index=True),
Column("quota_id", Integer, ForeignKey("quota.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.Quota.table = Table(
"quota", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", String(255), index=True, unique=True),
Column("description", TEXT),
Column("bytes", BigInteger),
Column("operation", String(8)),
Column("deleted", Boolean, index=True, default=False))
model.DefaultQuotaAssociation.table = Table(
"default_quota_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("type", String(32), index=True, unique=True),
Column("quota_id", Integer, ForeignKey("quota.id"), index=True))
model.DatasetPermissions.table = Table(
"dataset_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("action", TEXT),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.LibraryPermissions.table = Table(
"library_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("action", TEXT),
Column("library_id", Integer, ForeignKey("library.id"), nullable=True, index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.LibraryFolderPermissions.table = Table(
"library_folder_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("action", TEXT),
Column("library_folder_id", Integer, ForeignKey("library_folder.id"), nullable=True, index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.LibraryDatasetPermissions.table = Table(
"library_dataset_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("action", TEXT),
Column("library_dataset_id", Integer, ForeignKey("library_dataset.id"), nullable=True, index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.LibraryDatasetDatasetAssociationPermissions.table = Table(
"library_dataset_dataset_association_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("action", TEXT),
Column("library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id"),
nullable=True, index=True),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.DefaultUserPermissions.table = Table(
"default_user_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("action", TEXT),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.DefaultHistoryPermissions.table = Table(
"default_history_permissions", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("action", TEXT),
Column("role_id", Integer, ForeignKey("role.id"), index=True))
model.LibraryDataset.table = Table(
"library_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id", use_alter=True, name="library_dataset_dataset_association_id_fk"),
nullable=True, index=True),
Column("folder_id", Integer, ForeignKey("library_folder.id"), index=True),
Column("order_id", Integer),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", TrimmedString(255), key="_name", index=True),
# when not None/null this will supercede display in library (but not when imported into user's history?)
Column("info", TrimmedString(255), key="_info"),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False))
model.LibraryDatasetDatasetAssociation.table = Table(
"library_dataset_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_id", Integer, ForeignKey("library_dataset.id"), index=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", TrimmedString(64), index=True, key="_state"),
Column("copied_from_history_dataset_association_id", Integer,
ForeignKey("history_dataset_association.id", use_alter=True, name='history_dataset_association_dataset_id_fkey'),
nullable=True),
Column("copied_from_library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id", use_alter=True, name='library_dataset_dataset_association_id_fkey'),
nullable=True),
Column("name", TrimmedString(255), index=True),
Column("info", TrimmedString(255)),
Column("blurb", TrimmedString(255)),
Column("peek", TEXT),
Column("tool_version", TEXT),
Column("extension", TrimmedString(64)),
Column("metadata", MetadataType(), key="_metadata"),
Column("parent_id", Integer, ForeignKey("library_dataset_dataset_association.id"), nullable=True),
Column("designation", TrimmedString(255)),
Column("deleted", Boolean, index=True, default=False),
Column("visible", Boolean),
Column("extended_metadata_id", Integer, ForeignKey("extended_metadata.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("message", TrimmedString(255)))
model.ExtendedMetadata.table = Table(
"extended_metadata", metadata,
Column("id", Integer, primary_key=True),
Column("data", JSONType))
model.ExtendedMetadataIndex.table = Table(
"extended_metadata_index", metadata,
Column("id", Integer, primary_key=True),
Column("extended_metadata_id", Integer,
ForeignKey("extended_metadata.id", onupdate="CASCADE", ondelete="CASCADE"), index=True),
Column("path", String(255)),
Column("value", TEXT))
model.Library.table = Table(
"library", metadata,
Column("id", Integer, primary_key=True),
Column("root_folder_id", Integer, ForeignKey("library_folder.id"), index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", String(255), index=True),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False),
Column("description", TEXT),
Column("synopsis", TEXT))
model.LibraryFolder.table = Table(
"library_folder", metadata,
Column("id", Integer, primary_key=True),
Column("parent_id", Integer, ForeignKey("library_folder.id"), nullable=True, index=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", TEXT, index=True),
Column("description", TEXT),
Column("order_id", Integer),
Column("item_count", Integer),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False),
Column("genome_build", TrimmedString(40)))
model.LibraryInfoAssociation.table = Table(
"library_info_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_id", Integer, ForeignKey("library.id"), index=True),
Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True),
Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True),
Column("inheritable", Boolean, index=True, default=False),
Column("deleted", Boolean, index=True, default=False))
model.LibraryFolderInfoAssociation.table = Table(
"library_folder_info_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_folder_id", Integer, ForeignKey("library_folder.id"), nullable=True, index=True),
Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True),
Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True),
Column("inheritable", Boolean, index=True, default=False),
Column("deleted", Boolean, index=True, default=False))
model.LibraryDatasetDatasetInfoAssociation.table = Table(
"library_dataset_dataset_info_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id"), nullable=True, index=True),
Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True),
Column("form_values_id", Integer, ForeignKey("form_values.id"), index=True),
Column("deleted", Boolean, index=True, default=False))
model.Job.table = Table(
"job", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("library_folder_id", Integer, ForeignKey("library_folder.id"), index=True),
Column("tool_id", String(255)),
Column("tool_version", TEXT, default="1.0.0"),
Column("dynamic_tool_id", Integer, ForeignKey("dynamic_tool.id"), index=True, nullable=True),
Column("state", String(64), index=True),
Column("info", TrimmedString(255)),
Column("copied_from_job_id", Integer, nullable=True),
Column("command_line", TEXT),
Column("dependencies", JSONType, nullable=True),
Column("job_messages", JSONType, nullable=True),
Column("param_filename", String(1024)),
Column("runner_name", String(255)),
Column("job_stdout", TEXT),
Column("job_stderr", TEXT),
Column("tool_stdout", TEXT),
Column("tool_stderr", TEXT),
Column("exit_code", Integer, nullable=True),
Column("traceback", TEXT),
Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True, nullable=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=True),
Column("job_runner_name", String(255)),
Column("job_runner_external_id", String(255)),
Column("destination_id", String(255), nullable=True),
Column("destination_params", JSONType, nullable=True),
Column("object_store_id", TrimmedString(255), index=True),
Column("imported", Boolean, default=False, index=True),
Column("params", TrimmedString(255), index=True),
Column("handler", TrimmedString(255), index=True))
model.JobStateHistory.table = Table(
"job_state_history", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("state", String(64), index=True),
Column("info", TrimmedString(255)))
model.JobParameter.table = Table(
"job_parameter", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("name", String(255)),
Column("value", TEXT))
model.JobToInputDatasetAssociation.table = Table(
"job_to_input_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("dataset_version", Integer),
Column("name", String(255)))
model.JobToOutputDatasetAssociation.table = Table(
"job_to_output_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("name", String(255)))
model.JobToInputDatasetCollectionAssociation.table = Table(
"job_to_input_dataset_collection", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True),
Column("name", Unicode(255)))
model.JobToImplicitOutputDatasetCollectionAssociation.table = Table(
"job_to_implicit_output_dataset_collection", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_collection_id", Integer, ForeignKey("dataset_collection.id"), index=True),
Column("name", Unicode(255)))
model.JobToOutputDatasetCollectionAssociation.table = Table(
"job_to_output_dataset_collection", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True),
Column("name", Unicode(255)))
model.JobToInputLibraryDatasetAssociation.table = Table(
"job_to_input_library_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True),
Column("name", String(255)))
model.JobToOutputLibraryDatasetAssociation.table = Table(
"job_to_output_library_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True),
Column("name", String(255)))
model.ImplicitlyCreatedDatasetCollectionInput.table = Table(
"implicitly_created_dataset_collection_inputs", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_collection_id", Integer,
ForeignKey("history_dataset_collection_association.id"), index=True),
Column("input_dataset_collection_id", Integer,
ForeignKey("history_dataset_collection_association.id"), index=True),
Column("name", Unicode(255)))
model.ImplicitCollectionJobs.table = Table(
"implicit_collection_jobs", metadata,
Column("id", Integer, primary_key=True),
Column("populated_state", TrimmedString(64), default='new', nullable=False),
)
model.ImplicitCollectionJobsJobAssociation.table = Table(
"implicit_collection_jobs_job_association", metadata,
Column("id", Integer, primary_key=True),
Column("implicit_collection_jobs_id", Integer, ForeignKey("implicit_collection_jobs.id"), index=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("order_index", Integer, nullable=False),
)
model.JobExternalOutputMetadata.table = Table(
"job_external_output_metadata", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("history_dataset_association_id", Integer,
ForeignKey("history_dataset_association.id"), index=True, nullable=True),
Column("library_dataset_dataset_association_id", Integer,
ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True),
Column("is_valid", Boolean, default=True),
Column("filename_in", String(255)),
Column("filename_out", String(255)),
Column("filename_results_code", String(255)),
Column("filename_kwds", String(255)),
Column("filename_override_metadata", String(255)),
Column("job_runner_external_pid", String(255)))
model.JobExportHistoryArchive.table = Table(
"job_export_history_archive", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("compressed", Boolean, index=True, default=False),
Column("history_attrs_filename", TEXT))
model.JobImportHistoryArchive.table = Table(
"job_import_history_archive", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("archive_dir", TEXT))
model.JobMetricText.table = Table(
"job_metric_text", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("plugin", Unicode(255)),
Column("metric_name", Unicode(255)),
Column("metric_value", Unicode(model.JOB_METRIC_MAX_LENGTH)))
model.TaskMetricText.table = Table(
"task_metric_text", metadata,
Column("id", Integer, primary_key=True),
Column("task_id", Integer, ForeignKey("task.id"), index=True),
Column("plugin", Unicode(255)),
Column("metric_name", Unicode(255)),
Column("metric_value", Unicode(model.JOB_METRIC_MAX_LENGTH)))
model.JobMetricNumeric.table = Table(
"job_metric_numeric", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("plugin", Unicode(255)),
Column("metric_name", Unicode(255)),
Column("metric_value", Numeric(model.JOB_METRIC_PRECISION, model.JOB_METRIC_SCALE)))
model.TaskMetricNumeric.table = Table(
"task_metric_numeric", metadata,
Column("id", Integer, primary_key=True),
Column("task_id", Integer, ForeignKey("task.id"), index=True),
Column("plugin", Unicode(255)),
Column("metric_name", Unicode(255)),
Column("metric_value", Numeric(model.JOB_METRIC_PRECISION, model.JOB_METRIC_SCALE)))
model.GenomeIndexToolData.table = Table(
"genome_index_tool_data", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("deferred_job_id", Integer, ForeignKey("deferred_job.id"), index=True),
Column("transfer_job_id", Integer, ForeignKey("transfer_job.id"), index=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("fasta_path", String(255)),
Column("created_time", DateTime, default=now),
Column("modified_time", DateTime, default=now, onupdate=now),
Column("indexer", String(64)),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.Task.table = Table(
"task", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("execution_time", DateTime),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", String(64), index=True),
Column("command_line", TEXT),
Column("param_filename", String(1024)),
Column("runner_name", String(255)),
Column("job_stdout", TEXT),
Column("job_stderr", TEXT),
Column("tool_stdout", TEXT),
Column("tool_stderr", TEXT),
Column("exit_code", Integer, nullable=True),
Column("job_messages", JSONType, nullable=True),
Column("info", TrimmedString(255)),
Column("traceback", TEXT),
Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=False),
Column("working_directory", String(1024)),
Column("task_runner_name", String(255)),
Column("task_runner_external_id", String(255)),
Column("prepare_input_files_cmd", TEXT))
model.PostJobAction.table = Table(
"post_job_action", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True, nullable=False),
Column("action_type", String(255), nullable=False),
Column("output_name", String(255), nullable=True),
Column("action_arguments", JSONType, nullable=True))
model.PostJobActionAssociation.table = Table(
"post_job_action_association", metadata,
Column("id", Integer, primary_key=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=False),
Column("post_job_action_id", Integer, ForeignKey("post_job_action.id"), index=True, nullable=False))
model.DeferredJob.table = Table(
"deferred_job", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", String(64), index=True),
Column("plugin", String(128), index=True),
Column("params", JSONType))
model.TransferJob.table = Table(
"transfer_job", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("state", String(64), index=True),
Column("path", String(1024)),
Column("info", TEXT),
Column("pid", Integer),
Column("socket", Integer),
Column("params", JSONType))
model.DatasetCollection.table = Table(
"dataset_collection", metadata,
Column("id", Integer, primary_key=True),
Column("collection_type", Unicode(255), nullable=False),
Column("populated_state", TrimmedString(64), default='ok', nullable=False),
Column("populated_state_message", TEXT),
Column("element_count", Integer, nullable=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now))
model.HistoryDatasetCollectionAssociation.table = Table(
"history_dataset_collection_association", metadata,
Column("id", Integer, primary_key=True),
Column("collection_id", Integer, ForeignKey("dataset_collection.id"), index=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("name", TrimmedString(255)),
Column("hid", Integer),
Column("visible", Boolean),
Column("deleted", Boolean, default=False),
Column("copied_from_history_dataset_collection_association_id", Integer,
ForeignKey("history_dataset_collection_association.id"), nullable=True),
Column("implicit_output_name", Unicode(255), nullable=True),
Column("job_id", ForeignKey("job.id"), index=True, nullable=True),
Column("implicit_collection_jobs_id", ForeignKey("implicit_collection_jobs.id"), index=True, nullable=True),
)
model.LibraryDatasetCollectionAssociation.table = Table(
"library_dataset_collection_association", metadata,
Column("id", Integer, primary_key=True),
Column("collection_id", Integer, ForeignKey("dataset_collection.id"), index=True),
Column("folder_id", Integer, ForeignKey("library_folder.id"), index=True),
Column("name", TrimmedString(255)),
Column("deleted", Boolean, default=False))
model.DatasetCollectionElement.table = Table(
"dataset_collection_element", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_collection_id", Integer, ForeignKey("dataset_collection.id"), index=True, nullable=False),
Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True, nullable=True),
Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True),
Column("child_collection_id", Integer, ForeignKey("dataset_collection.id"), index=True, nullable=True),
Column("element_index", Integer),
Column("element_identifier", Unicode(255), ))
model.Event.table = Table(
"event", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("history_id", Integer, ForeignKey("history.id"), index=True, nullable=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=True),
Column("message", TrimmedString(1024)),
Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True, nullable=True),
Column("tool_id", String(255)))
model.GalaxySession.table = Table(
"galaxy_session", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=True),
Column("remote_host", String(255)),
Column("remote_addr", String(255)),
Column("referer", TEXT),
Column("current_history_id", Integer, ForeignKey("history.id"), nullable=True),
Column("session_key", TrimmedString(255), index=True, unique=True),
Column("is_valid", Boolean, default=False),
Column("prev_session_id", Integer),
Column("disk_usage", Numeric(15, 0), index=True),
Column("last_action", DateTime))
model.GalaxySessionToHistoryAssociation.table = Table(
"galaxy_session_to_history", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True))
model.StoredWorkflow.table = Table(
"stored_workflow", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False),
Column("latest_workflow_id", Integer,
ForeignKey("workflow.id", use_alter=True, name='stored_workflow_latest_workflow_id_fk'), index=True),
Column("name", TEXT),
Column("deleted", Boolean, default=False),
Column("importable", Boolean, default=False),
Column("slug", TEXT, index=True),
Column("from_path", TEXT, index=True),
Column("published", Boolean, index=True, default=False))
model.Workflow.table = Table(
"workflow", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True, nullable=True),
Column("parent_workflow_id", Integer, ForeignKey("workflow.id"), index=True, nullable=True),
Column("name", TEXT),
Column("has_cycles", Boolean),
Column("has_errors", Boolean),
Column("uuid", UUIDType, nullable=True))
model.WorkflowStep.table = Table(
"workflow_step", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("workflow_id", Integer, ForeignKey("workflow.id"), index=True, nullable=False),
Column("subworkflow_id", Integer, ForeignKey("workflow.id"), index=True, nullable=True),
Column("dynamic_tool_id", Integer, ForeignKey("dynamic_tool.id"), index=True, nullable=True),
Column("type", String(64)),
Column("tool_id", TEXT),
Column("tool_version", TEXT),
Column("tool_inputs", JSONType),
Column("tool_errors", JSONType),
Column("position", JSONType),
Column("config", JSONType),
Column("order_index", Integer),
Column("uuid", UUIDType),
Column("label", Unicode(255)))
model.WorkflowStepInput.table = Table(
"workflow_step_input", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("name", TEXT),
Column("merge_type", TEXT),
Column("scatter_type", TEXT),
Column("value_from", JSONType),
Column("value_from_type", TEXT),
Column("default_value", JSONType),
Column("default_value_set", Boolean, default=False),
Column("runtime_value", Boolean, default=False),
UniqueConstraint("workflow_step_id", "name"),
)
model.WorkflowRequestStepState.table = Table(
"workflow_request_step_states", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer,
ForeignKey("workflow_invocation.id", onupdate="CASCADE", ondelete="CASCADE")),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")),
Column("value", JSONType))
model.WorkflowRequestInputParameter.table = Table(
"workflow_request_input_parameters", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer,
ForeignKey("workflow_invocation.id", onupdate="CASCADE", ondelete="CASCADE")),
Column("name", Unicode(255)),
Column("value", TEXT),
Column("type", Unicode(255)))
model.WorkflowRequestInputStepParameter.table = Table(
"workflow_request_input_step_parameter", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")),
Column("parameter_value", JSONType),
)
model.WorkflowRequestToInputDatasetAssociation.table = Table(
"workflow_request_to_input_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("name", String(255)),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True))
model.WorkflowRequestToInputDatasetCollectionAssociation.table = Table(
"workflow_request_to_input_collection_dataset", metadata,
Column("id", Integer, primary_key=True),
Column("name", String(255)),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")),
Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True))
model.WorkflowStepConnection.table = Table(
"workflow_step_connection", metadata,
Column("id", Integer, primary_key=True),
Column("output_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("input_step_input_id", Integer, ForeignKey("workflow_step_input.id"), index=True),
Column("output_name", TEXT),
Column("input_subworkflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
)
model.WorkflowOutput.table = Table(
"workflow_output", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True, nullable=False),
Column("output_name", String(255), nullable=True),
Column("label", Unicode(255)),
Column("uuid", UUIDType),
)
model.WorkflowInvocation.table = Table(
"workflow_invocation", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("workflow_id", Integer, ForeignKey("workflow.id"), index=True, nullable=False),
Column("state", TrimmedString(64), index=True),
Column("scheduler", TrimmedString(255), index=True),
Column("handler", TrimmedString(255), index=True),
Column('uuid', UUIDType()),
Column("history_id", Integer, ForeignKey("history.id"), index=True))
model.WorkflowInvocationStep.table = Table(
"workflow_invocation_step", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True, nullable=False),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True, nullable=False),
Column("state", TrimmedString(64), index=True),
Column("job_id", Integer, ForeignKey("job.id"), index=True, nullable=True),
Column("implicit_collection_jobs_id", Integer, ForeignKey("implicit_collection_jobs.id"), index=True, nullable=True),
Column("action", JSONType, nullable=True))
model.WorkflowInvocationOutputDatasetAssociation.table = Table(
"workflow_invocation_output_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("workflow_output_id", Integer, ForeignKey("workflow_output.id"), index=True),
)
model.WorkflowInvocationOutputDatasetCollectionAssociation.table = Table(
"workflow_invocation_output_dataset_collection_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True),
Column("workflow_output_id", Integer, ForeignKey("workflow_output.id"), index=True),
)
model.WorkflowInvocationStepOutputDatasetAssociation.table = Table(
"workflow_invocation_step_output_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_step_id", Integer, ForeignKey("workflow_invocation_step.id"), index=True),
Column("dataset_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("output_name", String(255), nullable=True),
)
model.WorkflowInvocationStepOutputDatasetCollectionAssociation.table = Table(
"workflow_invocation_step_output_dataset_collection_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_step_id", Integer, ForeignKey("workflow_invocation_step.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("dataset_collection_id", Integer, ForeignKey("history_dataset_collection_association.id"), index=True),
Column("output_name", String(255), nullable=True),
)
model.WorkflowInvocationToSubworkflowInvocationAssociation.table = Table(
"workflow_invocation_to_subworkflow_invocation_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("subworkflow_invocation_id", Integer, ForeignKey("workflow_invocation.id"), index=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id")),
)
model.StoredWorkflowUserShareAssociation.table = Table(
"stored_workflow_user_share_connection", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.StoredWorkflowMenuEntry.table = Table(
"stored_workflow_menu_entry", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("order_index", Integer))
model.MetadataFile.table = Table(
"metadata_file", metadata,
Column("id", Integer, primary_key=True),
Column("name", TEXT),
Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True, nullable=True),
Column("lda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True, nullable=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("object_store_id", TrimmedString(255), index=True),
Column("uuid", UUIDType(), index=True),
Column("deleted", Boolean, index=True, default=False),
Column("purged", Boolean, index=True, default=False))
model.FormDefinitionCurrent.table = Table(
"form_definition_current", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("latest_form_id", Integer, ForeignKey("form_definition.id"), index=True),
Column("deleted", Boolean, index=True, default=False))
model.FormDefinition.table = Table(
"form_definition", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("name", TrimmedString(255), nullable=False),
Column("desc", TEXT),
Column("form_definition_current_id", Integer,
ForeignKey("form_definition_current.id", name='for_def_form_def_current_id_fk', use_alter=True), index=True),
Column("fields", JSONType()),
Column("type", TrimmedString(255), index=True),
Column("layout", JSONType()))
model.FormValues.table = Table(
"form_values", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("form_definition_id", Integer, ForeignKey("form_definition.id"), index=True),
Column("content", JSONType()))
model.Page.table = Table(
"page", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False),
Column("latest_revision_id", Integer,
ForeignKey("page_revision.id", use_alter=True, name='page_latest_revision_id_fk'), index=True),
Column("title", TEXT),
Column("deleted", Boolean, index=True, default=False),
Column("importable", Boolean, index=True, default=False),
Column("slug", TEXT, unique=True, index=True),
Column("published", Boolean, index=True, default=False))
model.PageRevision.table = Table(
"page_revision", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("page_id", Integer, ForeignKey("page.id"), index=True, nullable=False),
Column("title", TEXT),
Column("content", TEXT))
model.PageUserShareAssociation.table = Table(
"page_user_share_association", metadata,
Column("id", Integer, primary_key=True),
Column("page_id", Integer, ForeignKey("page.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.Visualization.table = Table(
"visualization", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True, nullable=False),
Column("latest_revision_id", Integer,
ForeignKey("visualization_revision.id", use_alter=True, name='visualization_latest_revision_id_fk'), index=True),
Column("title", TEXT),
Column("type", TEXT),
Column("dbkey", TEXT, index=True),
Column("deleted", Boolean, default=False, index=True),
Column("importable", Boolean, default=False, index=True),
Column("slug", TEXT, index=True),
Column("published", Boolean, default=False, index=True))
model.VisualizationRevision.table = Table(
"visualization_revision", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, default=now, onupdate=now),
Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True, nullable=False),
Column("title", TEXT),
Column("dbkey", TEXT, index=True),
Column("config", JSONType))
model.VisualizationUserShareAssociation.table = Table(
"visualization_user_share_association", metadata,
Column("id", Integer, primary_key=True),
Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.DataManagerHistoryAssociation.table = Table(
"data_manager_history_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True))
model.DataManagerJobAssociation.table = Table(
"data_manager_job_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("update_time", DateTime, index=True, default=now, onupdate=now),
Column("job_id", Integer, ForeignKey("job.id"), index=True),
Column("data_manager_id", TEXT, index=True))
model.Tag.table = Table(
"tag", metadata,
Column("id", Integer, primary_key=True),
Column("type", Integer),
Column("parent_id", Integer, ForeignKey("tag.id")),
Column("name", TrimmedString(255)),
UniqueConstraint("name"))
model.HistoryTagAssociation.table = Table(
"history_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.DatasetTagAssociation.table = Table(
"dataset_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.HistoryDatasetAssociationTagAssociation.table = Table(
"history_dataset_association_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer, ForeignKey("history_dataset_association.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.LibraryDatasetDatasetAssociationTagAssociation.table = Table(
"library_dataset_dataset_association_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_dataset_association_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.StoredWorkflowTagAssociation.table = Table(
"stored_workflow_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", Unicode(255), index=True),
Column("value", Unicode(255), index=True),
Column("user_value", Unicode(255), index=True))
model.PageTagAssociation.table = Table(
"page_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("page_id", Integer, ForeignKey("page.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.WorkflowStepTagAssociation.table = Table(
"workflow_step_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", Unicode(255), index=True),
Column("value", Unicode(255), index=True),
Column("user_value", Unicode(255), index=True))
model.VisualizationTagAssociation.table = Table(
"visualization_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.HistoryDatasetCollectionTagAssociation.table = Table(
"history_dataset_collection_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_collection_id", Integer,
ForeignKey("history_dataset_collection_association.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.LibraryDatasetCollectionTagAssociation.table = Table(
"library_dataset_collection_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_collection_id", Integer,
ForeignKey("library_dataset_collection_association.id"), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.ToolTagAssociation.table = Table(
"tool_tag_association", metadata,
Column("id", Integer, primary_key=True),
Column("tool_id", TrimmedString(255), index=True),
Column("tag_id", Integer, ForeignKey("tag.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("user_tname", TrimmedString(255), index=True),
Column("value", TrimmedString(255), index=True),
Column("user_value", TrimmedString(255), index=True))
model.HistoryAnnotationAssociation.table = Table(
"history_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
model.HistoryDatasetAssociationAnnotationAssociation.table = Table(
"history_dataset_association_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer,
ForeignKey("history_dataset_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
model.StoredWorkflowAnnotationAssociation.table = Table(
"stored_workflow_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
model.WorkflowStepAnnotationAssociation.table = Table(
"workflow_step_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("workflow_step_id", Integer, ForeignKey("workflow_step.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
model.PageAnnotationAssociation.table = Table(
"page_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("page_id", Integer, ForeignKey("page.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
model.VisualizationAnnotationAssociation.table = Table(
"visualization_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
model.HistoryDatasetCollectionAssociationAnnotationAssociation.table = Table(
"history_dataset_collection_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_collection_id", Integer,
ForeignKey("history_dataset_collection_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
model.LibraryDatasetCollectionAnnotationAssociation.table = Table(
"library_dataset_collection_annotation_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_collection_id", Integer,
ForeignKey("library_dataset_collection_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("annotation", TEXT, index=True))
model.HistoryRatingAssociation.table = Table("history_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.HistoryDatasetAssociationRatingAssociation.table = Table(
"history_dataset_association_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_association_id", Integer,
ForeignKey("history_dataset_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.StoredWorkflowRatingAssociation.table = Table(
"stored_workflow_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("stored_workflow_id", Integer, ForeignKey("stored_workflow.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.PageRatingAssociation.table = Table(
"page_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("page_id", Integer, ForeignKey("page.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.VisualizationRatingAssociation.table = Table(
"visualization_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("visualization_id", Integer, ForeignKey("visualization.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.HistoryDatasetCollectionRatingAssociation.table = Table(
"history_dataset_collection_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("history_dataset_collection_id", Integer,
ForeignKey("history_dataset_collection_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.LibraryDatasetCollectionRatingAssociation.table = Table(
"library_dataset_collection_rating_association", metadata,
Column("id", Integer, primary_key=True),
Column("library_dataset_collection_id", Integer,
ForeignKey("library_dataset_collection_association.id"), index=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("rating", Integer, index=True))
model.UserPreference.table = Table(
"user_preference", metadata,
Column("id", Integer, primary_key=True),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("name", Unicode(255), index=True),
Column("value", Text))
model.UserAction.table = Table(
"user_action", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("session_id", Integer, ForeignKey("galaxy_session.id"), index=True),
Column("action", Unicode(255)),
Column("context", Unicode(512)),
Column("params", Unicode(1024)))
model.APIKeys.table = Table(
"api_keys", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("user_id", Integer, ForeignKey("galaxy_user.id"), index=True),
Column("key", TrimmedString(32), index=True, unique=True))
CleanupEvent_table = Table("cleanup_event", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("message", TrimmedString(1024)))
CleanupEventDatasetAssociation_table = Table("cleanup_event_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("dataset_id", Integer, ForeignKey("dataset.id"), index=True))
CleanupEventMetadataFileAssociation_table = Table("cleanup_event_metadata_file_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("metadata_file_id", Integer, ForeignKey("metadata_file.id"), index=True))
CleanupEventHistoryAssociation_table = Table("cleanup_event_history_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("history_id", Integer, ForeignKey("history.id"), index=True))
CleanupEventHistoryDatasetAssociationAssociation_table = Table("cleanup_event_hda_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("hda_id", Integer, ForeignKey("history_dataset_association.id"), index=True))
CleanupEventLibraryAssociation_table = Table("cleanup_event_library_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("library_id", Integer, ForeignKey("library.id"), index=True))
CleanupEventLibraryFolderAssociation_table = Table("cleanup_event_library_folder_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("library_folder_id", Integer, ForeignKey("library_folder.id"), index=True))
CleanupEventLibraryDatasetAssociation_table = Table("cleanup_event_library_dataset_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("library_dataset_id", Integer, ForeignKey("library_dataset.id"), index=True))
CleanupEventLibraryDatasetDatasetAssociationAssociation_table = Table("cleanup_event_ldda_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("ldda_id", Integer, ForeignKey("library_dataset_dataset_association.id"), index=True))
CleanupEventImplicitlyConvertedDatasetAssociationAssociation_table = Table("cleanup_event_icda_association", metadata,
Column("id", Integer, primary_key=True),
Column("create_time", DateTime, default=now),
Column("cleanup_event_id", Integer, ForeignKey("cleanup_event.id"), index=True, nullable=True),
Column("icda_id", Integer, ForeignKey("implicitly_converted_dataset_association.id"), index=True))
def simple_mapping(model, **kwds):
mapper(model, model.table, properties=kwds)
simple_mapping(model.WorkerProcess)
mapper(model.FormValues, model.FormValues.table, properties=dict(
form_definition=relation(model.FormDefinition,
primaryjoin=(model.FormValues.table.c.form_definition_id == model.FormDefinition.table.c.id))
))
mapper(model.FormDefinition, model.FormDefinition.table, properties=dict(
current=relation(model.FormDefinitionCurrent,
primaryjoin=(model.FormDefinition.table.c.form_definition_current_id == model.FormDefinitionCurrent.table.c.id))
))
mapper(model.FormDefinitionCurrent, model.FormDefinitionCurrent.table, properties=dict(
forms=relation(model.FormDefinition,
backref='form_definition_current',
cascade="all, delete-orphan",
primaryjoin=(model.FormDefinitionCurrent.table.c.id == model.FormDefinition.table.c.form_definition_current_id)),
latest_form=relation(model.FormDefinition,
post_update=True,
primaryjoin=(model.FormDefinitionCurrent.table.c.latest_form_id == model.FormDefinition.table.c.id))
))
mapper(model.UserAddress, model.UserAddress.table, properties=dict(
user=relation(model.User,
primaryjoin=(model.UserAddress.table.c.user_id == model.User.table.c.id),
backref='addresses',
order_by=desc(model.UserAddress.table.c.update_time)),
))
mapper(model.PSAAssociation, model.PSAAssociation.table, properties=None)
mapper(model.PSACode, model.PSACode.table, properties=None)
mapper(model.PSANonce, model.PSANonce.table, properties=None)
mapper(model.PSAPartial, model.PSAPartial.table, properties=None)
mapper(model.UserAuthnzToken, model.UserAuthnzToken.table, properties=dict(
user=relation(model.User,
primaryjoin=(model.UserAuthnzToken.table.c.user_id == model.User.table.c.id),
backref='social_auth')
))
mapper(model.CustosAuthnzToken, model.CustosAuthnzToken.table, properties=dict(
user=relation(model.User,
primaryjoin=(model.CustosAuthnzToken.table.c.user_id == model.User.table.c.id),
backref='custos_auth')
))
mapper(model.CloudAuthz, model.CloudAuthz.table, properties=dict(
user=relation(model.User,
primaryjoin=(model.CloudAuthz.table.c.user_id == model.User.table.c.id),
backref='cloudauthz'),
authn=relation(model.UserAuthnzToken,
primaryjoin=(model.CloudAuthz.table.c.authn_id == model.UserAuthnzToken.table.c.id),
backref='cloudauthz')
))
mapper(model.ValidationError, model.ValidationError.table)
simple_mapping(model.DynamicTool)
simple_mapping(model.HistoryDatasetAssociation,
dataset=relation(model.Dataset,
primaryjoin=(model.Dataset.table.c.id == model.HistoryDatasetAssociation.table.c.dataset_id), lazy=False),
copied_from_history_dataset_association=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociation.table.c.copied_from_history_dataset_association_id ==
model.HistoryDatasetAssociation.table.c.id),
remote_side=[model.HistoryDatasetAssociation.table.c.id],
uselist=False),
copied_to_history_dataset_associations=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociation.table.c.copied_from_history_dataset_association_id ==
model.HistoryDatasetAssociation.table.c.id)),
copied_from_library_dataset_dataset_association=relation(
model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id),
uselist=False),
copied_to_library_dataset_dataset_associations=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
implicitly_converted_datasets=relation(model.ImplicitlyConvertedDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.hda_parent_id ==
model.HistoryDatasetAssociation.table.c.id)),
tags=relation(model.HistoryDatasetAssociationTagAssociation,
order_by=model.HistoryDatasetAssociationTagAssociation.table.c.id,
backref='history_tag_associations'),
annotations=relation(model.HistoryDatasetAssociationAnnotationAssociation,
order_by=model.HistoryDatasetAssociationAnnotationAssociation.table.c.id,
backref="hdas"),
ratings=relation(model.HistoryDatasetAssociationRatingAssociation,
order_by=model.HistoryDatasetAssociationRatingAssociation.table.c.id,
backref="hdas"),
extended_metadata=relation(model.ExtendedMetadata,
primaryjoin=((model.HistoryDatasetAssociation.table.c.extended_metadata_id ==
model.ExtendedMetadata.table.c.id))),
hidden_beneath_collection_instance=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=((model.HistoryDatasetAssociation.table.c.hidden_beneath_collection_instance_id ==
model.HistoryDatasetCollectionAssociation.table.c.id)),
uselist=False,
backref="hidden_dataset_instances"),
_metadata=deferred(model.HistoryDatasetAssociation.table.c._metadata)
)
simple_mapping(model.Dataset,
history_associations=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.Dataset.table.c.id == model.HistoryDatasetAssociation.table.c.dataset_id)),
active_history_associations=relation(model.HistoryDatasetAssociation,
primaryjoin=(
(model.Dataset.table.c.id == model.HistoryDatasetAssociation.table.c.dataset_id) &
(model.HistoryDatasetAssociation.table.c.deleted == false()) &
(model.HistoryDatasetAssociation.table.c.purged == false()))),
purged_history_associations=relation(model.HistoryDatasetAssociation,
primaryjoin=(
(model.Dataset.table.c.id == model.HistoryDatasetAssociation.table.c.dataset_id) &
(model.HistoryDatasetAssociation.table.c.purged == true()))),
library_associations=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.Dataset.table.c.id == model.LibraryDatasetDatasetAssociation.table.c.dataset_id)),
active_library_associations=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(
(model.Dataset.table.c.id == model.LibraryDatasetDatasetAssociation.table.c.dataset_id) &
(model.LibraryDatasetDatasetAssociation.table.c.deleted == false()))),
tags=relation(model.DatasetTagAssociation,
order_by=model.DatasetTagAssociation.table.c.id,
backref='datasets')
)
mapper(model.DatasetHash, model.DatasetHash.table, properties=dict(
dataset=relation(model.Dataset, backref='hashes')
))
mapper(model.DatasetSource, model.DatasetSource.table, properties=dict(
dataset=relation(model.Dataset, backref='sources')
))
mapper(model.DatasetSourceHash, model.DatasetSourceHash.table, properties=dict(
source=relation(model.DatasetSource, backref='hashes')
))
mapper(model.HistoryDatasetAssociationHistory, model.HistoryDatasetAssociationHistory.table)
mapper(model.HistoryDatasetAssociationDisplayAtAuthorization, model.HistoryDatasetAssociationDisplayAtAuthorization.table, properties=dict(
history_dataset_association=relation(model.HistoryDatasetAssociation),
user=relation(model.User)
))
mapper(model.HistoryDatasetAssociationSubset, model.HistoryDatasetAssociationSubset.table, properties=dict(
hda=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociationSubset.table.c.history_dataset_association_id ==
model.HistoryDatasetAssociation.table.c.id)),
subset=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociationSubset.table.c.history_dataset_association_subset_id ==
model.HistoryDatasetAssociation.table.c.id))
))
mapper(model.ImplicitlyConvertedDatasetAssociation, model.ImplicitlyConvertedDatasetAssociation.table, properties=dict(
parent_hda=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.hda_parent_id ==
model.HistoryDatasetAssociation.table.c.id)),
parent_ldda=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.ldda_parent_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
dataset_ldda=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.ldda_id ==
model.LibraryDatasetDatasetAssociation.table.c.id),
backref="implicitly_converted_parent_datasets"),
dataset=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.hda_id ==
model.HistoryDatasetAssociation.table.c.id),
backref="implicitly_converted_parent_datasets")
))
mapper(model.History, model.History.table, properties=dict(
galaxy_sessions=relation(model.GalaxySessionToHistoryAssociation),
datasets=relation(model.HistoryDatasetAssociation,
backref="history",
order_by=asc(model.HistoryDatasetAssociation.table.c.hid)),
exports=relation(model.JobExportHistoryArchive,
primaryjoin=(model.JobExportHistoryArchive.table.c.history_id == model.History.table.c.id),
order_by=desc(model.JobExportHistoryArchive.table.c.id)),
active_datasets=relation(model.HistoryDatasetAssociation,
primaryjoin=(
(model.HistoryDatasetAssociation.table.c.history_id == model.History.table.c.id) &
not_(model.HistoryDatasetAssociation.table.c.deleted)
),
order_by=asc(model.HistoryDatasetAssociation.table.c.hid),
viewonly=True),
active_dataset_collections=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=(
(model.HistoryDatasetCollectionAssociation.table.c.history_id == model.History.table.c.id) &
not_(model.HistoryDatasetCollectionAssociation.table.c.deleted)
),
order_by=asc(model.HistoryDatasetCollectionAssociation.table.c.hid),
viewonly=True),
visible_datasets=relation(model.HistoryDatasetAssociation,
primaryjoin=(
(model.HistoryDatasetAssociation.table.c.history_id == model.History.table.c.id) &
not_(model.HistoryDatasetAssociation.table.c.deleted) &
model.HistoryDatasetAssociation.table.c.visible
),
order_by=asc(model.HistoryDatasetAssociation.table.c.hid),
viewonly=True),
visible_dataset_collections=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=(
(model.HistoryDatasetCollectionAssociation.table.c.history_id == model.History.table.c.id) &
not_(model.HistoryDatasetCollectionAssociation.table.c.deleted) &
model.HistoryDatasetCollectionAssociation.table.c.visible
),
order_by=asc(model.HistoryDatasetCollectionAssociation.table.c.hid),
viewonly=True),
tags=relation(model.HistoryTagAssociation,
order_by=model.HistoryTagAssociation.table.c.id,
backref="histories"),
annotations=relation(model.HistoryAnnotationAssociation,
order_by=model.HistoryAnnotationAssociation.table.c.id,
backref="histories"),
ratings=relation(model.HistoryRatingAssociation,
order_by=model.HistoryRatingAssociation.table.c.id,
backref="histories"),
average_rating=column_property(
select([func.avg(model.HistoryRatingAssociation.table.c.rating)]).where(model.HistoryRatingAssociation.table.c.history_id == model.History.table.c.id),
deferred=True
),
users_shared_with_count=column_property(
select([func.count(model.HistoryUserShareAssociation.table.c.id)]).where(model.History.table.c.id == model.HistoryUserShareAssociation.table.c.history_id),
deferred=True
)
))
model.History.users_shared_with_dot_users = association_proxy('users_shared_with', 'user')
mapper(model.HistoryUserShareAssociation, model.HistoryUserShareAssociation.table, properties=dict(
user=relation(model.User, backref='histories_shared_by_others'),
history=relation(model.History, backref='users_shared_with')
))
mapper(model.User, model.User.table, properties=dict(
histories=relation(model.History,
backref="user",
order_by=desc(model.History.table.c.update_time)),
active_histories=relation(model.History,
primaryjoin=(
(model.History.table.c.user_id == model.User.table.c.id) &
(not_(model.History.table.c.deleted))
),
order_by=desc(model.History.table.c.update_time)),
galaxy_sessions=relation(model.GalaxySession,
order_by=desc(model.GalaxySession.table.c.update_time)),
stored_workflow_menu_entries=relation(model.StoredWorkflowMenuEntry,
primaryjoin=(
(model.StoredWorkflowMenuEntry.table.c.user_id == model.User.table.c.id) &
(model.StoredWorkflowMenuEntry.table.c.stored_workflow_id == model.StoredWorkflow.table.c.id) &
not_(model.StoredWorkflow.table.c.deleted)
),
backref="user",
cascade="all, delete-orphan",
collection_class=ordering_list('order_index')),
_preferences=relation(model.UserPreference,
backref="user",
collection_class=attribute_mapped_collection('name')),
values=relation(model.FormValues,
primaryjoin=(model.User.table.c.form_values_id == model.FormValues.table.c.id)),
api_keys=relation(model.APIKeys,
backref="user",
order_by=desc(model.APIKeys.table.c.create_time)),
cloudauthzs=relation(model.CloudAuthz,
primaryjoin=model.CloudAuthz.table.c.user_id == model.User.table.c.id),
))
mapper(model.PasswordResetToken, model.PasswordResetToken.table,
properties=dict(user=relation(model.User, backref="reset_tokens")))
model.User.preferences = association_proxy('_preferences', 'value', creator=model.UserPreference)
mapper(model.Group, model.Group.table, properties=dict(
users=relation(model.UserGroupAssociation)
))
mapper(model.UserGroupAssociation, model.UserGroupAssociation.table, properties=dict(
user=relation(model.User, backref="groups"),
group=relation(model.Group, backref="members")
))
mapper(model.DefaultUserPermissions, model.DefaultUserPermissions.table, properties=dict(
user=relation(model.User, backref="default_permissions"),
role=relation(model.Role)
))
mapper(model.DefaultHistoryPermissions, model.DefaultHistoryPermissions.table, properties=dict(
history=relation(model.History, backref="default_permissions"),
role=relation(model.Role)
))
mapper(model.Role, model.Role.table, properties=dict(
users=relation(model.UserRoleAssociation),
groups=relation(model.GroupRoleAssociation)
))
mapper(model.UserRoleAssociation, model.UserRoleAssociation.table, properties=dict(
user=relation(model.User, backref="roles"),
non_private_roles=relation(
model.User,
backref="non_private_roles",
primaryjoin=(
(model.User.table.c.id == model.UserRoleAssociation.table.c.user_id) &
(model.UserRoleAssociation.table.c.role_id == model.Role.table.c.id) &
not_(model.Role.table.c.name == model.User.table.c.email))
),
role=relation(model.Role)
))
mapper(model.GroupRoleAssociation, model.GroupRoleAssociation.table, properties=dict(
group=relation(model.Group, backref="roles"),
role=relation(model.Role)
))
mapper(model.Quota, model.Quota.table, properties=dict(
users=relation(model.UserQuotaAssociation),
groups=relation(model.GroupQuotaAssociation)
))
mapper(model.UserQuotaAssociation, model.UserQuotaAssociation.table, properties=dict(
user=relation(model.User, backref="quotas"),
quota=relation(model.Quota)
))
mapper(model.GroupQuotaAssociation, model.GroupQuotaAssociation.table, properties=dict(
group=relation(model.Group, backref="quotas"),
quota=relation(model.Quota)
))
mapper(model.DefaultQuotaAssociation, model.DefaultQuotaAssociation.table, properties=dict(
quota=relation(model.Quota, backref="default")
))
mapper(model.DatasetPermissions, model.DatasetPermissions.table, properties=dict(
dataset=relation(model.Dataset, backref="actions"),
role=relation(model.Role, backref="dataset_actions")
))
mapper(model.LibraryPermissions, model.LibraryPermissions.table, properties=dict(
library=relation(model.Library, backref="actions"),
role=relation(model.Role, backref="library_actions")
))
mapper(model.LibraryFolderPermissions, model.LibraryFolderPermissions.table, properties=dict(
folder=relation(model.LibraryFolder, backref="actions"),
role=relation(model.Role, backref="library_folder_actions")
))
mapper(model.LibraryDatasetPermissions, model.LibraryDatasetPermissions.table, properties=dict(
library_dataset=relation(model.LibraryDataset, backref="actions"),
role=relation(model.Role, backref="library_dataset_actions")
))
mapper(model.LibraryDatasetDatasetAssociationPermissions, model.LibraryDatasetDatasetAssociationPermissions.table, properties=dict(
library_dataset_dataset_association=relation(model.LibraryDatasetDatasetAssociation, backref="actions"),
role=relation(model.Role, backref="library_dataset_dataset_actions")
))
mapper(model.Library, model.Library.table, properties=dict(
root_folder=relation(model.LibraryFolder, backref=backref("library_root"))
))
mapper(model.ExtendedMetadata, model.ExtendedMetadata.table, properties=dict(
children=relation(model.ExtendedMetadataIndex,
primaryjoin=(model.ExtendedMetadataIndex.table.c.extended_metadata_id == model.ExtendedMetadata.table.c.id),
backref=backref("parent",
primaryjoin=(model.ExtendedMetadataIndex.table.c.extended_metadata_id == model.ExtendedMetadata.table.c.id)))
))
mapper(model.ExtendedMetadataIndex, model.ExtendedMetadataIndex.table, properties=dict(
extended_metadata=relation(model.ExtendedMetadata,
primaryjoin=((model.ExtendedMetadataIndex.table.c.extended_metadata_id == model.ExtendedMetadata.table.c.id)))
))
mapper(model.LibraryInfoAssociation, model.LibraryInfoAssociation.table, properties=dict(
library=relation(model.Library,
primaryjoin=(
(model.LibraryInfoAssociation.table.c.library_id == model.Library.table.c.id) &
(not_(model.LibraryInfoAssociation.table.c.deleted))
),
backref="info_association"),
template=relation(model.FormDefinition,
primaryjoin=(model.LibraryInfoAssociation.table.c.form_definition_id == model.FormDefinition.table.c.id)),
info=relation(model.FormValues,
primaryjoin=(model.LibraryInfoAssociation.table.c.form_values_id == model.FormValues.table.c.id))
))
mapper(model.LibraryFolder, model.LibraryFolder.table, properties=dict(
folders=relation(model.LibraryFolder,
primaryjoin=(model.LibraryFolder.table.c.parent_id == model.LibraryFolder.table.c.id),
order_by=asc(model.LibraryFolder.table.c.name),
backref=backref("parent",
primaryjoin=(model.LibraryFolder.table.c.parent_id == model.LibraryFolder.table.c.id),
remote_side=[model.LibraryFolder.table.c.id])),
active_folders=relation(model.LibraryFolder,
primaryjoin=(
(model.LibraryFolder.table.c.parent_id == model.LibraryFolder.table.c.id) &
(not_(model.LibraryFolder.table.c.deleted))
),
order_by=asc(model.LibraryFolder.table.c.name),
# on parent class '<class 'galaxy.model.LibraryFolder'>' to child class '<class 'galaxy.model.LibraryFolder'>':
# Cant use eager loading on a self referential relationship."""
lazy=True,
viewonly=True),
datasets=relation(model.LibraryDataset,
primaryjoin=((model.LibraryDataset.table.c.folder_id == model.LibraryFolder.table.c.id)),
order_by=asc(model.LibraryDataset.table.c._name),
lazy=True,
viewonly=True),
active_datasets=relation(model.LibraryDataset,
primaryjoin=(
(model.LibraryDataset.table.c.folder_id == model.LibraryFolder.table.c.id) &
(not_(model.LibraryDataset.table.c.deleted))
),
order_by=asc(model.LibraryDataset.table.c._name),
lazy=True,
viewonly=True)
))
mapper(model.LibraryFolderInfoAssociation, model.LibraryFolderInfoAssociation.table, properties=dict(
folder=relation(model.LibraryFolder,
primaryjoin=(
(model.LibraryFolderInfoAssociation.table.c.library_folder_id == model.LibraryFolder.table.c.id) &
(not_(model.LibraryFolderInfoAssociation.table.c.deleted))
),
backref="info_association"),
template=relation(model.FormDefinition,
primaryjoin=(model.LibraryFolderInfoAssociation.table.c.form_definition_id == model.FormDefinition.table.c.id)),
info=relation(model.FormValues,
primaryjoin=(model.LibraryFolderInfoAssociation.table.c.form_values_id == model.FormValues.table.c.id))
))
mapper(model.LibraryDataset, model.LibraryDataset.table, properties=dict(
folder=relation(model.LibraryFolder),
library_dataset_dataset_association=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.LibraryDataset.table.c.library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
expired_datasets=relation(model.LibraryDatasetDatasetAssociation,
foreign_keys=[model.LibraryDataset.table.c.id, model.LibraryDataset.table.c.library_dataset_dataset_association_id],
primaryjoin=(
(model.LibraryDataset.table.c.id == model.LibraryDatasetDatasetAssociation.table.c.library_dataset_id) &
(not_(model.LibraryDataset.table.c.library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id))
),
viewonly=True,
uselist=True)
))
mapper(model.LibraryDatasetDatasetAssociation, model.LibraryDatasetDatasetAssociation.table, properties=dict(
dataset=relation(model.Dataset),
library_dataset=relation(model.LibraryDataset,
primaryjoin=(model.LibraryDatasetDatasetAssociation.table.c.library_dataset_id == model.LibraryDataset.table.c.id)),
user=relation(model.User),
copied_from_library_dataset_dataset_association=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.LibraryDatasetDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id),
remote_side=[model.LibraryDatasetDatasetAssociation.table.c.id],
uselist=False),
copied_to_library_dataset_dataset_associations=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.LibraryDatasetDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
copied_from_history_dataset_association=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.LibraryDatasetDatasetAssociation.table.c.copied_from_history_dataset_association_id ==
model.HistoryDatasetAssociation.table.c.id),
uselist=False),
copied_to_history_dataset_associations=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.HistoryDatasetAssociation.table.c.copied_from_library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
implicitly_converted_datasets=relation(model.ImplicitlyConvertedDatasetAssociation,
primaryjoin=(model.ImplicitlyConvertedDatasetAssociation.table.c.ldda_parent_id ==
model.LibraryDatasetDatasetAssociation.table.c.id)),
tags=relation(model.LibraryDatasetDatasetAssociationTagAssociation,
order_by=model.LibraryDatasetDatasetAssociationTagAssociation.table.c.id,
backref='history_tag_associations'),
extended_metadata=relation(model.ExtendedMetadata,
primaryjoin=((model.LibraryDatasetDatasetAssociation.table.c.extended_metadata_id == model.ExtendedMetadata.table.c.id))
),
_metadata=deferred(model.LibraryDatasetDatasetAssociation.table.c._metadata)
))
mapper(model.LibraryDatasetDatasetInfoAssociation, model.LibraryDatasetDatasetInfoAssociation.table, properties=dict(
library_dataset_dataset_association=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(
(model.LibraryDatasetDatasetInfoAssociation.table.c.library_dataset_dataset_association_id ==
model.LibraryDatasetDatasetAssociation.table.c.id) &
(not_(model.LibraryDatasetDatasetInfoAssociation.table.c.deleted))
),
backref="info_association"),
template=relation(model.FormDefinition,
primaryjoin=(model.LibraryDatasetDatasetInfoAssociation.table.c.form_definition_id == model.FormDefinition.table.c.id)),
info=relation(model.FormValues,
primaryjoin=(model.LibraryDatasetDatasetInfoAssociation.table.c.form_values_id == model.FormValues.table.c.id))
))
mapper(model.JobToInputDatasetAssociation, model.JobToInputDatasetAssociation.table, properties=dict(
job=relation(model.Job),
dataset=relation(model.HistoryDatasetAssociation,
lazy=False,
backref="dependent_jobs")
))
mapper(model.JobToOutputDatasetAssociation, model.JobToOutputDatasetAssociation.table, properties=dict(
job=relation(model.Job),
dataset=relation(model.HistoryDatasetAssociation,
lazy=False)
))
mapper(model.JobToInputDatasetCollectionAssociation, model.JobToInputDatasetCollectionAssociation.table, properties=dict(
job=relation(model.Job),
dataset_collection=relation(model.HistoryDatasetCollectionAssociation,
lazy=False)
))
mapper(model.JobToOutputDatasetCollectionAssociation, model.JobToOutputDatasetCollectionAssociation.table, properties=dict(
job=relation(model.Job),
dataset_collection_instance=relation(model.HistoryDatasetCollectionAssociation,
lazy=False,
backref="output_dataset_collection_instances")
))
mapper(model.JobToImplicitOutputDatasetCollectionAssociation, model.JobToImplicitOutputDatasetCollectionAssociation.table, properties=dict(
job=relation(model.Job),
dataset_collection=relation(model.DatasetCollection,
backref="output_dataset_collections")
))
mapper(model.JobToInputLibraryDatasetAssociation, model.JobToInputLibraryDatasetAssociation.table, properties=dict(
job=relation(model.Job),
dataset=relation(model.LibraryDatasetDatasetAssociation,
lazy=False,
backref="dependent_jobs")
))
mapper(model.JobToOutputLibraryDatasetAssociation, model.JobToOutputLibraryDatasetAssociation.table, properties=dict(
job=relation(model.Job),
dataset=relation(model.LibraryDatasetDatasetAssociation,
lazy=False)
))
simple_mapping(model.JobStateHistory,
job=relation(model.Job, backref="state_history"))
simple_mapping(model.JobMetricText,
job=relation(model.Job, backref="text_metrics"))
simple_mapping(model.TaskMetricText,
task=relation(model.Task, backref="text_metrics"))
simple_mapping(model.JobMetricNumeric,
job=relation(model.Job, backref="numeric_metrics"))
simple_mapping(model.TaskMetricNumeric,
task=relation(model.Task, backref="numeric_metrics"))
simple_mapping(model.ImplicitlyCreatedDatasetCollectionInput,
input_dataset_collection=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=((model.HistoryDatasetCollectionAssociation.table.c.id ==
model.ImplicitlyCreatedDatasetCollectionInput.table.c.input_dataset_collection_id)),
),
)
simple_mapping(model.ImplicitCollectionJobs)
simple_mapping(
model.ImplicitCollectionJobsJobAssociation,
implicit_collection_jobs=relation(
model.ImplicitCollectionJobs,
backref=backref("jobs", uselist=True),
uselist=False,
),
job=relation(
model.Job,
backref=backref("implicit_collection_jobs_association", uselist=False),
uselist=False,
),
)
mapper(model.JobParameter, model.JobParameter.table)
mapper(model.JobExternalOutputMetadata, model.JobExternalOutputMetadata.table, properties=dict(
job=relation(model.Job),
history_dataset_association=relation(model.HistoryDatasetAssociation, lazy=False),
library_dataset_dataset_association=relation(model.LibraryDatasetDatasetAssociation, lazy=False)
))
mapper(model.JobExportHistoryArchive, model.JobExportHistoryArchive.table, properties=dict(
job=relation(model.Job),
history=relation(model.History),
dataset=relation(model.Dataset, backref='job_export_history_archive')
))
mapper(model.JobImportHistoryArchive, model.JobImportHistoryArchive.table, properties=dict(
job=relation(model.Job),
history=relation(model.History)
))
mapper(model.GenomeIndexToolData, model.GenomeIndexToolData.table, properties=dict(
job=relation(model.Job, backref='job'),
dataset=relation(model.Dataset, backref='genome_index_tool_data'),
user=relation(model.User),
deferred=relation(model.DeferredJob, backref='deferred_job'),
transfer=relation(model.TransferJob, backref='transfer_job')
))
mapper(model.PostJobAction, model.PostJobAction.table, properties=dict(
workflow_step=relation(model.WorkflowStep,
backref='post_job_actions',
primaryjoin=(model.WorkflowStep.table.c.id == model.PostJobAction.table.c.workflow_step_id))
))
mapper(model.PostJobActionAssociation, model.PostJobActionAssociation.table, properties=dict(
job=relation(model.Job),
post_job_action=relation(model.PostJobAction)
))
mapper(model.Job, model.Job.table, properties=dict(
user=relation(model.User),
galaxy_session=relation(model.GalaxySession),
history=relation(model.History, backref="jobs"),
library_folder=relation(model.LibraryFolder, lazy=True),
parameters=relation(model.JobParameter, lazy=True),
input_datasets=relation(model.JobToInputDatasetAssociation),
input_dataset_collections=relation(model.JobToInputDatasetCollectionAssociation, lazy=True),
output_datasets=relation(model.JobToOutputDatasetAssociation, lazy=True),
any_output_dataset_deleted=column_property(
exists([model.HistoryDatasetAssociation],
and_(model.Job.table.c.id == model.JobToOutputDatasetAssociation.table.c.job_id,
model.HistoryDatasetAssociation.table.c.id == model.JobToOutputDatasetAssociation.table.c.dataset_id,
model.HistoryDatasetAssociation.table.c.deleted == true())
)
),
any_output_dataset_collection_instances_deleted=column_property(
exists([model.HistoryDatasetCollectionAssociation.table.c.id],
and_(model.Job.table.c.id == model.JobToOutputDatasetCollectionAssociation.table.c.job_id,
model.HistoryDatasetCollectionAssociation.table.c.id == model.JobToOutputDatasetCollectionAssociation.table.c.dataset_collection_id,
model.HistoryDatasetCollectionAssociation.table.c.deleted == true())
)
),
output_dataset_collection_instances=relation(model.JobToOutputDatasetCollectionAssociation, lazy=True),
output_dataset_collections=relation(model.JobToImplicitOutputDatasetCollectionAssociation, lazy=True),
post_job_actions=relation(model.PostJobActionAssociation, lazy=False),
input_library_datasets=relation(model.JobToInputLibraryDatasetAssociation),
output_library_datasets=relation(model.JobToOutputLibraryDatasetAssociation, lazy=True),
external_output_metadata=relation(model.JobExternalOutputMetadata, lazy=True),
tasks=relation(model.Task)
))
mapper(model.Task, model.Task.table, properties=dict(
job=relation(model.Job)
))
mapper(model.DeferredJob, model.DeferredJob.table, properties={})
mapper(model.TransferJob, model.TransferJob.table, properties={})
simple_mapping(model.DatasetCollection,
elements=relation(model.DatasetCollectionElement,
primaryjoin=(model.DatasetCollection.table.c.id == model.DatasetCollectionElement.table.c.dataset_collection_id),
remote_side=[model.DatasetCollectionElement.table.c.dataset_collection_id],
backref="collection",
order_by=model.DatasetCollectionElement.table.c.element_index)
)
simple_mapping(model.HistoryDatasetCollectionAssociation,
collection=relation(model.DatasetCollection),
history=relation(model.History,
backref='dataset_collections'),
copied_from_history_dataset_collection_association=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=(model.HistoryDatasetCollectionAssociation.table.c.copied_from_history_dataset_collection_association_id ==
model.HistoryDatasetCollectionAssociation.table.c.id),
remote_side=[model.HistoryDatasetCollectionAssociation.table.c.id],
uselist=False),
copied_to_history_dataset_collection_associations=relation(model.HistoryDatasetCollectionAssociation,
primaryjoin=(model.HistoryDatasetCollectionAssociation.table.c.copied_from_history_dataset_collection_association_id ==
model.HistoryDatasetCollectionAssociation.table.c.id)),
implicit_input_collections=relation(model.ImplicitlyCreatedDatasetCollectionInput,
primaryjoin=((model.HistoryDatasetCollectionAssociation.table.c.id ==
model.ImplicitlyCreatedDatasetCollectionInput.table.c.dataset_collection_id)),
backref="dataset_collection",
),
implicit_collection_jobs=relation(
model.ImplicitCollectionJobs,
backref=backref("history_dataset_collection_associations", uselist=True),
uselist=False,
),
job=relation(
model.Job,
backref=backref("history_dataset_collection_associations", uselist=True),
uselist=False,
),
tags=relation(model.HistoryDatasetCollectionTagAssociation,
order_by=model.HistoryDatasetCollectionTagAssociation.table.c.id,
backref='dataset_collections'),
annotations=relation(model.HistoryDatasetCollectionAssociationAnnotationAssociation,
order_by=model.HistoryDatasetCollectionAssociationAnnotationAssociation.table.c.id,
backref="dataset_collections"),
ratings=relation(model.HistoryDatasetCollectionRatingAssociation,
order_by=model.HistoryDatasetCollectionRatingAssociation.table.c.id,
backref="dataset_collections")
)
simple_mapping(model.LibraryDatasetCollectionAssociation,
collection=relation(model.DatasetCollection),
folder=relation(model.LibraryFolder,
backref='dataset_collections'),
tags=relation(model.LibraryDatasetCollectionTagAssociation,
order_by=model.LibraryDatasetCollectionTagAssociation.table.c.id,
backref='dataset_collections'),
annotations=relation(model.LibraryDatasetCollectionAnnotationAssociation,
order_by=model.LibraryDatasetCollectionAnnotationAssociation.table.c.id,
backref="dataset_collections"),
ratings=relation(model.LibraryDatasetCollectionRatingAssociation,
order_by=model.LibraryDatasetCollectionRatingAssociation.table.c.id,
backref="dataset_collections"))
simple_mapping(model.DatasetCollectionElement,
hda=relation(model.HistoryDatasetAssociation,
primaryjoin=(model.DatasetCollectionElement.table.c.hda_id == model.HistoryDatasetAssociation.table.c.id)),
ldda=relation(model.LibraryDatasetDatasetAssociation,
primaryjoin=(model.DatasetCollectionElement.table.c.ldda_id == model.LibraryDatasetDatasetAssociation.table.c.id)),
child_collection=relation(model.DatasetCollection,
primaryjoin=(model.DatasetCollectionElement.table.c.child_collection_id == model.DatasetCollection.table.c.id)))
mapper(model.Event, model.Event.table, properties=dict(
history=relation(model.History),
galaxy_session=relation(model.GalaxySession),
user=relation(model.User)
))
mapper(model.GalaxySession, model.GalaxySession.table, properties=dict(
histories=relation(model.GalaxySessionToHistoryAssociation),
current_history=relation(model.History),
user=relation(model.User)
))
mapper(model.GalaxySessionToHistoryAssociation, model.GalaxySessionToHistoryAssociation.table, properties=dict(
galaxy_session=relation(model.GalaxySession),
history=relation(model.History)
))
mapper(model.Workflow, model.Workflow.table, properties=dict(
steps=relation(model.WorkflowStep,
backref='workflow',
primaryjoin=((model.Workflow.table.c.id == model.WorkflowStep.table.c.workflow_id)),
order_by=asc(model.WorkflowStep.table.c.order_index),
cascade="all, delete-orphan",
lazy=False),
step_count=column_property(
select([func.count(model.WorkflowStep.table.c.id)]).where(model.Workflow.table.c.id == model.WorkflowStep.table.c.workflow_id),
deferred=True
)
))
mapper(model.WorkflowStep, model.WorkflowStep.table, properties=dict(
subworkflow=relation(model.Workflow,
primaryjoin=(model.Workflow.table.c.id == model.WorkflowStep.table.c.subworkflow_id),
backref="parent_workflow_steps"),
dynamic_tool=relation(model.DynamicTool,
primaryjoin=(model.DynamicTool.table.c.id == model.WorkflowStep.table.c.dynamic_tool_id),
backref="workflow_steps"),
tags=relation(model.WorkflowStepTagAssociation,
order_by=model.WorkflowStepTagAssociation.table.c.id,
backref="workflow_steps"),
annotations=relation(model.WorkflowStepAnnotationAssociation,
order_by=model.WorkflowStepAnnotationAssociation.table.c.id,
backref="workflow_steps")
))
mapper(model.WorkflowStepInput, model.WorkflowStepInput.table, properties=dict(
workflow_step=relation(model.WorkflowStep,
backref=backref("inputs", uselist=True),
cascade="all",
primaryjoin=(model.WorkflowStepInput.table.c.workflow_step_id == model.WorkflowStep.table.c.id))
))
mapper(model.WorkflowOutput, model.WorkflowOutput.table, properties=dict(
workflow_step=relation(model.WorkflowStep,
backref='workflow_outputs',
primaryjoin=(model.WorkflowStep.table.c.id == model.WorkflowOutput.table.c.workflow_step_id))
))
mapper(model.WorkflowStepConnection, model.WorkflowStepConnection.table, properties=dict(
input_step_input=relation(model.WorkflowStepInput,
backref="connections",
cascade="all",
primaryjoin=(model.WorkflowStepConnection.table.c.input_step_input_id == model.WorkflowStepInput.table.c.id)),
input_subworkflow_step=relation(model.WorkflowStep,
backref=backref("parent_workflow_input_connections", uselist=True),
primaryjoin=(model.WorkflowStepConnection.table.c.input_subworkflow_step_id == model.WorkflowStep.table.c.id),
),
output_step=relation(model.WorkflowStep,
backref="output_connections",
cascade="all",
primaryjoin=(model.WorkflowStepConnection.table.c.output_step_id == model.WorkflowStep.table.c.id)),
))
mapper(model.StoredWorkflow, model.StoredWorkflow.table, properties=dict(
user=relation(model.User,
primaryjoin=(model.User.table.c.id == model.StoredWorkflow.table.c.user_id),
backref='stored_workflows'),
workflows=relation(model.Workflow,
backref='stored_workflow',
cascade="all, delete-orphan",
primaryjoin=(model.StoredWorkflow.table.c.id == model.Workflow.table.c.stored_workflow_id),
order_by=-model.Workflow.id),
latest_workflow=relation(model.Workflow,
post_update=True,
primaryjoin=(model.StoredWorkflow.table.c.latest_workflow_id == model.Workflow.table.c.id),
lazy=False),
tags=relation(model.StoredWorkflowTagAssociation,
order_by=model.StoredWorkflowTagAssociation.table.c.id,
backref="stored_workflows"),
owner_tags=relation(model.StoredWorkflowTagAssociation,
primaryjoin=(
and_(model.StoredWorkflow.table.c.id == model.StoredWorkflowTagAssociation.table.c.stored_workflow_id,
model.StoredWorkflow.table.c.user_id == model.StoredWorkflowTagAssociation.table.c.user_id)
),
order_by=model.StoredWorkflowTagAssociation.table.c.id),
annotations=relation(model.StoredWorkflowAnnotationAssociation,
order_by=model.StoredWorkflowAnnotationAssociation.table.c.id,
backref="stored_workflows"),
ratings=relation(model.StoredWorkflowRatingAssociation,
order_by=model.StoredWorkflowRatingAssociation.table.c.id,
backref="stored_workflows"),
average_rating=column_property(
select([func.avg(model.StoredWorkflowRatingAssociation.table.c.rating)]).where(model.StoredWorkflowRatingAssociation.table.c.stored_workflow_id == model.StoredWorkflow.table.c.id),
deferred=True
)
))
model.StoredWorkflow.users_shared_with_dot_users = association_proxy('users_shared_with', 'user')
mapper(model.StoredWorkflowUserShareAssociation, model.StoredWorkflowUserShareAssociation.table, properties=dict(
user=relation(model.User,
backref='workflows_shared_by_others'),
stored_workflow=relation(model.StoredWorkflow,
backref='users_shared_with')
))
mapper(model.StoredWorkflowMenuEntry, model.StoredWorkflowMenuEntry.table, properties=dict(
stored_workflow=relation(model.StoredWorkflow)
))
mapper(model.WorkflowInvocation, model.WorkflowInvocation.table, properties=dict(
history=relation(model.History, backref=backref('workflow_invocations', uselist=True)),
input_parameters=relation(model.WorkflowRequestInputParameter),
step_states=relation(model.WorkflowRequestStepState),
input_step_parameters=relation(model.WorkflowRequestInputStepParameter),
input_datasets=relation(model.WorkflowRequestToInputDatasetAssociation),
input_dataset_collections=relation(model.WorkflowRequestToInputDatasetCollectionAssociation),
subworkflow_invocations=relation(model.WorkflowInvocationToSubworkflowInvocationAssociation,
primaryjoin=((model.WorkflowInvocationToSubworkflowInvocationAssociation.table.c.workflow_invocation_id == model.WorkflowInvocation.table.c.id)),
backref=backref("parent_workflow_invocation", uselist=False),
uselist=True,
),
steps=relation(model.WorkflowInvocationStep,
backref="workflow_invocation"),
workflow=relation(model.Workflow)
))
mapper(model.WorkflowInvocationToSubworkflowInvocationAssociation, model.WorkflowInvocationToSubworkflowInvocationAssociation.table, properties=dict(
subworkflow_invocation=relation(model.WorkflowInvocation,
primaryjoin=((model.WorkflowInvocationToSubworkflowInvocationAssociation.table.c.subworkflow_invocation_id == model.WorkflowInvocation.table.c.id)),
backref="parent_workflow_invocation_association",
uselist=False,
),
workflow_step=relation(model.WorkflowStep),
))
simple_mapping(model.WorkflowInvocationStep,
workflow_step=relation(model.WorkflowStep),
job=relation(model.Job, backref=backref('workflow_invocation_step', uselist=False), uselist=False),
implicit_collection_jobs=relation(model.ImplicitCollectionJobs, backref=backref('workflow_invocation_step', uselist=False), uselist=False),)
simple_mapping(model.WorkflowRequestInputParameter,
workflow_invocation=relation(model.WorkflowInvocation))
simple_mapping(model.WorkflowRequestStepState,
workflow_invocation=relation(model.WorkflowInvocation),
workflow_step=relation(model.WorkflowStep))
simple_mapping(model.WorkflowRequestInputStepParameter,
workflow_invocation=relation(model.WorkflowInvocation),
workflow_step=relation(model.WorkflowStep))
simple_mapping(model.WorkflowRequestToInputDatasetAssociation,
workflow_invocation=relation(model.WorkflowInvocation),
workflow_step=relation(model.WorkflowStep),
dataset=relation(model.HistoryDatasetAssociation))
simple_mapping(model.WorkflowRequestToInputDatasetCollectionAssociation,
workflow_invocation=relation(model.WorkflowInvocation),
workflow_step=relation(model.WorkflowStep),
dataset_collection=relation(model.HistoryDatasetCollectionAssociation))
mapper(model.MetadataFile, model.MetadataFile.table, properties=dict(
history_dataset=relation(model.HistoryDatasetAssociation),
library_dataset=relation(model.LibraryDatasetDatasetAssociation)
))
simple_mapping(
model.WorkflowInvocationOutputDatasetAssociation,
workflow_invocation=relation(model.WorkflowInvocation, backref="output_datasets"),
workflow_step=relation(model.WorkflowStep),
dataset=relation(model.HistoryDatasetAssociation),
workflow_output=relation(model.WorkflowOutput),
)
simple_mapping(
model.WorkflowInvocationOutputDatasetCollectionAssociation,
workflow_invocation=relation(model.WorkflowInvocation, backref="output_dataset_collections"),
workflow_step=relation(model.WorkflowStep),
dataset_collection=relation(model.HistoryDatasetCollectionAssociation),
workflow_output=relation(model.WorkflowOutput),
)
simple_mapping(
model.WorkflowInvocationStepOutputDatasetAssociation,
workflow_invocation_step=relation(model.WorkflowInvocationStep, backref="output_datasets"),
dataset=relation(model.HistoryDatasetAssociation),
)
simple_mapping(
model.WorkflowInvocationStepOutputDatasetCollectionAssociation,
workflow_invocation_step=relation(model.WorkflowInvocationStep, backref="output_dataset_collections"),
dataset_collection=relation(model.HistoryDatasetCollectionAssociation),
)
mapper(model.PageRevision, model.PageRevision.table)
mapper(model.Page, model.Page.table, properties=dict(
user=relation(model.User),
revisions=relation(model.PageRevision,
backref='page',
cascade="all, delete-orphan",
primaryjoin=(model.Page.table.c.id == model.PageRevision.table.c.page_id)),
latest_revision=relation(model.PageRevision,
post_update=True,
primaryjoin=(model.Page.table.c.latest_revision_id == model.PageRevision.table.c.id),
lazy=False),
tags=relation(model.PageTagAssociation,
order_by=model.PageTagAssociation.table.c.id,
backref="pages"),
annotations=relation(model.PageAnnotationAssociation,
order_by=model.PageAnnotationAssociation.table.c.id,
backref="pages"),
ratings=relation(model.PageRatingAssociation,
order_by=model.PageRatingAssociation.table.c.id,
backref="pages"),
average_rating=column_property(
select([func.avg(model.PageRatingAssociation.table.c.rating)]).where(model.PageRatingAssociation.table.c.page_id == model.Page.table.c.id),
deferred=True
)
))
model.Page.users_shared_with_dot_users = association_proxy('users_shared_with', 'user')
mapper(model.PageUserShareAssociation, model.PageUserShareAssociation.table,
properties=dict(user=relation(model.User, backref='pages_shared_by_others'),
page=relation(model.Page, backref='users_shared_with')))
mapper(model.VisualizationRevision, model.VisualizationRevision.table)
mapper(model.Visualization, model.Visualization.table, properties=dict(
user=relation(model.User),
revisions=relation(model.VisualizationRevision,
backref='visualization',
cascade="all, delete-orphan",
primaryjoin=(model.Visualization.table.c.id == model.VisualizationRevision.table.c.visualization_id)),
latest_revision=relation(model.VisualizationRevision,
post_update=True,
primaryjoin=(model.Visualization.table.c.latest_revision_id == model.VisualizationRevision.table.c.id),
lazy=False),
tags=relation(model.VisualizationTagAssociation,
order_by=model.VisualizationTagAssociation.table.c.id,
backref="visualizations"),
annotations=relation(model.VisualizationAnnotationAssociation,
order_by=model.VisualizationAnnotationAssociation.table.c.id,
backref="visualizations"),
ratings=relation(model.VisualizationRatingAssociation,
order_by=model.VisualizationRatingAssociation.table.c.id,
backref="visualizations"),
average_rating=column_property(
select([func.avg(model.VisualizationRatingAssociation.table.c.rating)]).where(model.VisualizationRatingAssociation.table.c.visualization_id == model.Visualization.table.c.id),
deferred=True
)
))
model.Visualization.users_shared_with_dot_users = association_proxy('users_shared_with', 'user')
mapper(model.VisualizationUserShareAssociation, model.VisualizationUserShareAssociation.table, properties=dict(
user=relation(model.User,
backref='visualizations_shared_by_others'),
visualization=relation(model.Visualization,
backref='users_shared_with')
))
simple_mapping(model.Tag,
children=relation(model.Tag, backref=backref('parent', remote_side=[model.Tag.table.c.id])))
def tag_mapping(tag_association_class, backref_name):
simple_mapping(tag_association_class, tag=relation(model.Tag, backref=backref_name), user=relation(model.User))
tag_mapping(model.HistoryTagAssociation, "tagged_histories")
tag_mapping(model.DatasetTagAssociation, "tagged_datasets")
tag_mapping(model.HistoryDatasetAssociationTagAssociation, "tagged_history_dataset_associations")
tag_mapping(model.LibraryDatasetDatasetAssociationTagAssociation, "tagged_library_dataset_dataset_associations")
tag_mapping(model.PageTagAssociation, "tagged_pages")
tag_mapping(model.StoredWorkflowTagAssociation, "tagged_workflows")
tag_mapping(model.WorkflowStepTagAssociation, "tagged_workflow_steps")
tag_mapping(model.VisualizationTagAssociation, "tagged_visualizations")
tag_mapping(model.HistoryDatasetCollectionTagAssociation, "tagged_history_dataset_collections")
tag_mapping(model.LibraryDatasetCollectionTagAssociation, "tagged_library_dataset_collections")
tag_mapping(model.ToolTagAssociation, "tagged_tools")
def annotation_mapping(annotation_class, **kwds):
kwds = dict((key, relation(value)) for key, value in kwds.items())
simple_mapping(annotation_class, **dict(user=relation(model.User), **kwds))
annotation_mapping(model.HistoryAnnotationAssociation, history=model.History)
annotation_mapping(model.HistoryDatasetAssociationAnnotationAssociation, hda=model.HistoryDatasetAssociation)
annotation_mapping(model.StoredWorkflowAnnotationAssociation, stored_workflow=model.StoredWorkflow)
annotation_mapping(model.WorkflowStepAnnotationAssociation, workflow_step=model.WorkflowStep)
annotation_mapping(model.PageAnnotationAssociation, page=model.Page)
annotation_mapping(model.VisualizationAnnotationAssociation, visualization=model.Visualization)
annotation_mapping(model.HistoryDatasetCollectionAssociationAnnotationAssociation,
history_dataset_collection=model.HistoryDatasetCollectionAssociation)
annotation_mapping(model.LibraryDatasetCollectionAnnotationAssociation,
library_dataset_collection=model.LibraryDatasetCollectionAssociation)
def rating_mapping(rating_class, **kwds):
kwds = dict((key, relation(value)) for key, value in kwds.items())
simple_mapping(rating_class, **dict(user=relation(model.User), **kwds))
rating_mapping(model.HistoryRatingAssociation, history=model.History)
rating_mapping(model.HistoryDatasetAssociationRatingAssociation, hda=model.HistoryDatasetAssociation)
rating_mapping(model.StoredWorkflowRatingAssociation, stored_workflow=model.StoredWorkflow)
rating_mapping(model.PageRatingAssociation, page=model.Page)
rating_mapping(model.VisualizationRatingAssociation, visualizaiton=model.Visualization)
rating_mapping(model.HistoryDatasetCollectionRatingAssociation,
history_dataset_collection=model.HistoryDatasetCollectionAssociation)
rating_mapping(model.LibraryDatasetCollectionRatingAssociation,
libary_dataset_collection=model.LibraryDatasetCollectionAssociation)
mapper(model.DataManagerHistoryAssociation, model.DataManagerHistoryAssociation.table, properties=dict(
history=relation(model.History),
user=relation(model.User,
backref='data_manager_histories')
))
mapper(model.DataManagerJobAssociation, model.DataManagerJobAssociation.table, properties=dict(
job=relation(model.Job,
backref=backref('data_manager_association', uselist=False),
uselist=False)
))
mapper(model.UserPreference, model.UserPreference.table, properties={})
mapper(model.UserAction, model.UserAction.table, properties=dict(
user=relation(model.User)
))
mapper(model.APIKeys, model.APIKeys.table, properties={})
class_mapper(model.HistoryDatasetAssociation).add_property(
"creating_job_associations", relation(model.JobToOutputDatasetAssociation))
class_mapper(model.LibraryDatasetDatasetAssociation).add_property(
"creating_job_associations", relation(model.JobToOutputLibraryDatasetAssociation))
class_mapper(model.HistoryDatasetCollectionAssociation).add_property(
"creating_job_associations", relation(model.JobToOutputDatasetCollectionAssociation))
def db_next_hid(self, n=1):
session = object_session(self)
table = self.table
trans = session.begin()
try:
if "postgres" not in session.bind.dialect.name:
next_hid = select([table.c.hid_counter], table.c.id == model.cached_id(self), for_update=True).scalar()
table.update(table.c.id == self.id).execute(hid_counter=(next_hid + n))
else:
stmt = table.update().where(table.c.id == model.cached_id(self)).values(hid_counter=(table.c.hid_counter + n)).returning(table.c.hid_counter)
next_hid = session.execute(stmt).scalar() - n
trans.commit()
return next_hid
except Exception:
trans.rollback()
raise
model.History._next_hid = db_next_hid
def _workflow_invocation_update(self):
session = object_session(self)
table = self.table
now_val = now()
stmt = table.update().values(update_time=now_val).where(and_(table.c.id == self.id, table.c.update_time < now_val))
session.execute(stmt)
model.WorkflowInvocation.update = _workflow_invocation_update
def init(file_path, url, engine_options=None, create_tables=False, map_install_models=False,
database_query_profiling_proxy=False, object_store=None, trace_logger=None, use_pbkdf2=True,
slow_query_log_threshold=0, thread_local_log=None):
if engine_options is None:
engine_options = {}
model.Dataset.file_path = file_path
model.Dataset.object_store = object_store
model.User.use_pbkdf2 = use_pbkdf2
engine = build_engine(url, engine_options, database_query_profiling_proxy, trace_logger, slow_query_log_threshold, thread_local_log=thread_local_log)
metadata.bind = engine
model_modules = [model]
if map_install_models:
import galaxy.model.tool_shed_install.mapping
from galaxy.model import tool_shed_install
model_modules.append(tool_shed_install)
result = ModelMapping(model_modules, engine=engine)
if create_tables:
metadata.create_all()
result.create_tables = create_tables
result.security_agent = GalaxyRBACAgent(result)
result.thread_local_log = thread_local_log
return result
| true | true |
f7f5647cf696e534c739ef11c362c02c1fab180d | 817 | py | Python | tests/test_decline.py | Kitingu/restplus | f9f5d36f376b08bed4305020259f2be7d689705a | [
"MIT"
] | null | null | null | tests/test_decline.py | Kitingu/restplus | f9f5d36f376b08bed4305020259f2be7d689705a | [
"MIT"
] | 5 | 2019-10-21T17:05:46.000Z | 2021-06-01T22:35:47.000Z | tests/test_decline.py | Kitingu/restplus | f9f5d36f376b08bed4305020259f2be7d689705a | [
"MIT"
] | 1 | 2018-09-04T14:17:43.000Z | 2018-09-04T14:17:43.000Z | import json
from .Base_test import BaseTest
class DeclineApproval(BaseTest):
def test_decline(self):
response = self.client().post('/api/v1/rides', data=json.dumps(self.test_ride),
content_type='application/json',headers=self.user_header)
self.assertEqual(response.status_code, 201)
resp = self.client().post('/api/v1/rides/1/requests', data=json.dumps(self.test_request),
content_type='application/json',headers=self.user_header)
self.assertEqual(resp.status_code, 201)
resp = self.client().post('/api/v1/requests/1', data=json.dumps(self.decline),
content_type='application/json',headers=self.user_header)
self.assertEqual(resp.status_code, 401)
| 32.68 | 97 | 0.629131 | import json
from .Base_test import BaseTest
class DeclineApproval(BaseTest):
def test_decline(self):
response = self.client().post('/api/v1/rides', data=json.dumps(self.test_ride),
content_type='application/json',headers=self.user_header)
self.assertEqual(response.status_code, 201)
resp = self.client().post('/api/v1/rides/1/requests', data=json.dumps(self.test_request),
content_type='application/json',headers=self.user_header)
self.assertEqual(resp.status_code, 201)
resp = self.client().post('/api/v1/requests/1', data=json.dumps(self.decline),
content_type='application/json',headers=self.user_header)
self.assertEqual(resp.status_code, 401)
| true | true |
f7f56499daace7690f4862ef71e523c801553b8c | 3,490 | py | Python | app.py files/app_stroke.py | imsanjoykb/Health-AI | 2c033899fce81089f9fc8e4d79e453dc94742576 | [
"MIT"
] | 5 | 2022-02-07T04:47:37.000Z | 2022-03-20T12:28:39.000Z | app.py files/app_stroke.py | imsanjoykb/Health-AI | 2c033899fce81089f9fc8e4d79e453dc94742576 | [
"MIT"
] | null | null | null | app.py files/app_stroke.py | imsanjoykb/Health-AI | 2c033899fce81089f9fc8e4d79e453dc94742576 | [
"MIT"
] | null | null | null | from flask import Flask, render_template, request
import numpy as np
import pickle
app = Flask(__name__)
model = pickle.load(open('Stroke.pkl', 'rb'))
@app.route('/',methods=['GET'])
def Home():
return render_template('index.html')
@app.route("/predict", methods=['POST'])
def predict():
if request.method == 'POST':
gender = request.form['gender']
if gender == 'Male':
gender_Male = 1
gender_Female = 0
else:
gender_Male = 0
gender_Female = 1
age = float(request.form['age'])
hypertension = int(request.form['hypertension'])
heart_disease = int(request.form['heart_disease'])
ever_married = int(request.form['ever_married'])
Residence_type = int(request.form['Residence_type'])
avg_glucose_level = float(request.form['avg_glucose_level'])
bmi = float(request.form['bmi'])
work_type = request.form['work_type']
if work_type == 'Never_worked':
work_type_Never_worked = 1
work_type_Private = 0
work_type_Self_employed = 0
work_type_children = 0
work_type_Govt_job = 0
if work_type == 'Private':
work_type_Never_worked = 0
work_type_Private = 1
work_type_Self_employed = 0
work_type_children = 0
work_type_Govt_job = 0
elif work_type == "Self_employed":
work_type_Never_worked = 0
work_type_Private = 0
work_type_Self_employed = 1
work_type_children = 0
work_type_Govt_job = 0
elif work_type == "children":
work_type_Never_worked = 0
work_type_Private = 0
work_type_Self_employed = 0
work_type_children = 1
work_type_Govt_job = 0
else:
work_type_Never_worked = 0
work_type_Private = 0
work_type_Self_employed = 0
work_type_children = 0
work_type_Govt_job = 1
smoking_status = request.form['smoking_status']
if smoking_status == "formerly_smoked":
smoking_status_formerly_smoked = 1
smoking_status_never_smoked = 0
smoking_status_Smokes = 0
smoking_status_Unknown = 0
elif smoking_status == "never_smoked":
smoking_status_formerly_smoked = 0
smoking_status_never_smoked = 1
smoking_status_Smokes = 0
smoking_status_Unknown = 0
elif smoking_status == "Smokes":
smoking_status_formerly_smoked = 0
smoking_status_never_smoked = 0
smoking_status_Smokes = 1
smoking_status_Unknown = 0
else:
smoking_status_formerly_smoked = 0
smoking_status_never_smoked = 0
smoking_status_Smokes = 0
smoking_status_Unknown = 1
values = np.array([[gender_Male,age, hypertension, heart_disease, ever_married,
Residence_type, avg_glucose_level, bmi,
work_type_Never_worked, work_type_Private,work_type_Self_employed, work_type_children,
smoking_status_formerly_smoked, smoking_status_never_smoked, smoking_status_Smokes]])
prediction = model.predict(values)
return render_template('result.html', prediction=prediction)
if __name__ == "__main__":
app.run(debug=True)
| 31.727273 | 114 | 0.606304 | from flask import Flask, render_template, request
import numpy as np
import pickle
app = Flask(__name__)
model = pickle.load(open('Stroke.pkl', 'rb'))
@app.route('/',methods=['GET'])
def Home():
return render_template('index.html')
@app.route("/predict", methods=['POST'])
def predict():
if request.method == 'POST':
gender = request.form['gender']
if gender == 'Male':
gender_Male = 1
gender_Female = 0
else:
gender_Male = 0
gender_Female = 1
age = float(request.form['age'])
hypertension = int(request.form['hypertension'])
heart_disease = int(request.form['heart_disease'])
ever_married = int(request.form['ever_married'])
Residence_type = int(request.form['Residence_type'])
avg_glucose_level = float(request.form['avg_glucose_level'])
bmi = float(request.form['bmi'])
work_type = request.form['work_type']
if work_type == 'Never_worked':
work_type_Never_worked = 1
work_type_Private = 0
work_type_Self_employed = 0
work_type_children = 0
work_type_Govt_job = 0
if work_type == 'Private':
work_type_Never_worked = 0
work_type_Private = 1
work_type_Self_employed = 0
work_type_children = 0
work_type_Govt_job = 0
elif work_type == "Self_employed":
work_type_Never_worked = 0
work_type_Private = 0
work_type_Self_employed = 1
work_type_children = 0
work_type_Govt_job = 0
elif work_type == "children":
work_type_Never_worked = 0
work_type_Private = 0
work_type_Self_employed = 0
work_type_children = 1
work_type_Govt_job = 0
else:
work_type_Never_worked = 0
work_type_Private = 0
work_type_Self_employed = 0
work_type_children = 0
work_type_Govt_job = 1
smoking_status = request.form['smoking_status']
if smoking_status == "formerly_smoked":
smoking_status_formerly_smoked = 1
smoking_status_never_smoked = 0
smoking_status_Smokes = 0
smoking_status_Unknown = 0
elif smoking_status == "never_smoked":
smoking_status_formerly_smoked = 0
smoking_status_never_smoked = 1
smoking_status_Smokes = 0
smoking_status_Unknown = 0
elif smoking_status == "Smokes":
smoking_status_formerly_smoked = 0
smoking_status_never_smoked = 0
smoking_status_Smokes = 1
smoking_status_Unknown = 0
else:
smoking_status_formerly_smoked = 0
smoking_status_never_smoked = 0
smoking_status_Smokes = 0
smoking_status_Unknown = 1
values = np.array([[gender_Male,age, hypertension, heart_disease, ever_married,
Residence_type, avg_glucose_level, bmi,
work_type_Never_worked, work_type_Private,work_type_Self_employed, work_type_children,
smoking_status_formerly_smoked, smoking_status_never_smoked, smoking_status_Smokes]])
prediction = model.predict(values)
return render_template('result.html', prediction=prediction)
if __name__ == "__main__":
app.run(debug=True)
| true | true |
f7f564e680ba4f84304bb6e45f5f2936034ebe4c | 4,963 | py | Python | Predict.py | Sam-Chanow/Bitcoin-IGASS | c3babac66d6a0594c4d83479393c50a54af9572a | [
"Unlicense"
] | 1 | 2021-12-10T12:58:35.000Z | 2021-12-10T12:58:35.000Z | Predict.py | Sam-Chanow/Bitcoin-IGASS | c3babac66d6a0594c4d83479393c50a54af9572a | [
"Unlicense"
] | 1 | 2021-12-16T16:48:53.000Z | 2021-12-16T16:49:34.000Z | Predict.py | Sam-Chanow/Bitcoin-IGASS | c3babac66d6a0594c4d83479393c50a54af9572a | [
"Unlicense"
] | null | null | null | import torch
import sys
from tqdm import tqdm
from dataset import Dataset
import numpy as np
import tensorflow as tf
#adding the model folder path
sys.path.append('../model/')
sys.path.append('../tensorflow_model/')
import model
import tf_model
if __name__ == "__main__":
if (len(sys.argv) > 1) and (sys.argv[1] == '-train'):
#Build the dataset from BVBPRI data
D = Dataset(['../data/compiled-datasets/BVBPRI/BVBPRI' + str(x) + '.pt' for x in range(0, 967)],tensor_data=True) #967
D = iter(D)
X = []
count = 0
for x in tqdm(D):
x[1] = model.average_tensors(x[1]).tolist()
#print(x[1])
x[0] = model.build_labels(x[0])
#print(x)
X.append([x[0], x[1]])
#print(L)
#print(len(L[0][1]))
#exit(0)
count += 1
#print("Tensor", count, "averaged.")
if count > 1000: break;
#print(X[0])
Y = [t[0] for t in X]
X = [t[1] for t in X]
#print("RAW Y:", Y)
tfm = tf_model.TFModel()
data = tfm.preprocess(X, Y)
print("DATA:", [x for x, y in data])
#exit(0)
#print("DATA:", y_labels)
#exit(0)
train_data, validation_data, test_data = tf_model.get_dataset_partitions(data, 967)
print("VAL DATA:", [x for x, y in validation_data])
print("train take", [x for x, y in train_data])
#for i in iter(data):
#print(i)
#num = 300
#D_eval = Dataset(['data/compiled-datasets/BVBPRI/BVBPRI' + str(x) + '.pt' for x in range(900, 966)], tensor_data=True)
#D_eval = iter(D_eval)
#X_eval = []
#for x in D_eval:
#x[1] = model.average_tensors(x[1]).tolist()
#x[0] = model.build_labels(x[0])
# print(x)
#X_eval.append([x[0], x[1]])
# Y_eval = [t[0] for t in X_eval]
#X_eval = [t[1] for t in X_eval]
#data_eval = tfm.preprocess(X_eval, Y_eval)
#for evalu, gold in zip(tfm.evaluate(data_eval), Y_eval):
# print("OUT:", evalu)
#print("GOLD:", gold)
tfm.train(train_data, validation_data)
y_labels = np.concatenate([y for x, y in test_data], axis=0)
x_data = np.concatenate([x for x, y in test_data], axis=0)
score = tfm.evaluate(x_data, y_labels)
print(f'Test loss: {score[0]} / Test accuracy: {score[1]}')
#X = torch.tensor(([2, 9], [1, 5], [3, 6]), dtype=torch.float) # 3 X 2 tensor
#Y = [torch.FloatTensor(l[0]) for l in X]
#X = [l[1] for l in X]
#X = torch.stack(X)
#Y = torch.stack(Y)
#print(X.size())
#print(Y, X)
#y = torch.tensor(([92], [100], [89]), dtype=torch.float) # 3 X 1 tensor
#xPredicted = torch.tensor(([4, 8]), dtype=torch.float) # 1 X 2 tensor
# scale units
#X_max, _ = torch.max(X, 0)
#xPredicted_max, _ = torch.max(xPredicted, 0)
#X = torch.div(X, X_max)
#xPredicted = torch.div(xPredicted, xPredicted_max)
#y = y / 100 # max test score is 100
#NN = Net.NNetwork()
#Loss = []
#for i in range(1000): # trains the NN 1,000 times
# l = str(torch.mean((Y - NN(X)) ** 2).detach().item())
#print("#" + str(i) + " Loss: " + l) # mean sum squared loss
# Loss.append(l)
# NN.train(X, Y)
#NN.saveWeights(NN)
#torch.save(Loss, "model/loss.pt")
#NN.predict()
#Get the data to train on and the data to test on
#scale the data to train on and test on
#train the network on the data
pass
if (len(sys.argv) > 1) and (sys.argv[1] == '-test'):
tfm = tf_model.TFModel()
D = Dataset(['../temp_evaluate/evaluateBVBPRI.pt'], tensor_data=True) # 967
D = iter(D)
X = [[]]
count = 0
for x in tqdm(D):
x = model.average_tensors(x).tolist()
# print(x[1])
# print(x)
X[0].append(x)
# print(L)
# print(len(L[0][1]))
# exit(0)
count += 1
# print("Tensor", count, "averaged.")
if count > 1000: break;
# print(X[0])
#X = [t[1] for t in X]
# print("RAW Y:", Y)
tfm = tf_model.TFModel()
data = tfm.preprocess_unlabeled(X)
print("DATA:", [x for x in data])
score = tfm.predict(data)
max_index = np.argmax(score[0])
max_val = score[0][max_index]
change = "Bitcoin's price will rise tomorrow" if max_index == 0 else "Bitcoin's price will fall tomorrow"
confidence = str(int(max_val * 100)) + "%"
output_screen = "###########################\n" + change + "\n" + \
"Confidence is " + confidence + "\n" + "###########################"
print(output_screen)
| 30.447853 | 127 | 0.505742 | import torch
import sys
from tqdm import tqdm
from dataset import Dataset
import numpy as np
import tensorflow as tf
sys.path.append('../model/')
sys.path.append('../tensorflow_model/')
import model
import tf_model
if __name__ == "__main__":
if (len(sys.argv) > 1) and (sys.argv[1] == '-train'):
D = Dataset(['../data/compiled-datasets/BVBPRI/BVBPRI' + str(x) + '.pt' for x in range(0, 967)],tensor_data=True)
D = iter(D)
X = []
count = 0
for x in tqdm(D):
x[1] = model.average_tensors(x[1]).tolist()
x[0] = model.build_labels(x[0])
X.append([x[0], x[1]])
count += 1
if count > 1000: break;
Y = [t[0] for t in X]
X = [t[1] for t in X]
tfm = tf_model.TFModel()
data = tfm.preprocess(X, Y)
print("DATA:", [x for x, y in data])
train_data, validation_data, test_data = tf_model.get_dataset_partitions(data, 967)
print("VAL DATA:", [x for x, y in validation_data])
print("train take", [x for x, y in train_data])
tfm.train(train_data, validation_data)
y_labels = np.concatenate([y for x, y in test_data], axis=0)
x_data = np.concatenate([x for x, y in test_data], axis=0)
score = tfm.evaluate(x_data, y_labels)
print(f'Test loss: {score[0]} / Test accuracy: {score[1]}')
pass
if (len(sys.argv) > 1) and (sys.argv[1] == '-test'):
tfm = tf_model.TFModel()
D = Dataset(['../temp_evaluate/evaluateBVBPRI.pt'], tensor_data=True)
D = iter(D)
X = [[]]
count = 0
for x in tqdm(D):
x = model.average_tensors(x).tolist()
X[0].append(x)
count += 1
if count > 1000: break;
tfm = tf_model.TFModel()
data = tfm.preprocess_unlabeled(X)
print("DATA:", [x for x in data])
score = tfm.predict(data)
max_index = np.argmax(score[0])
max_val = score[0][max_index]
change = "Bitcoin's price will rise tomorrow" if max_index == 0 else "Bitcoin's price will fall tomorrow"
confidence = str(int(max_val * 100)) + "%"
output_screen = "###########################\n" + change + "\n" + \
"Confidence is " + confidence + "\n" + "###########################"
print(output_screen)
| true | true |
f7f564f9f9b9a76b5676ddd4c2245f69159fbff4 | 745 | py | Python | var/spack/repos/builtin/packages/minizip/package.py | RemoteConnectionManager/spack | f2967b6c16effd26ce007cf86cadbb645c574f50 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2020-10-15T01:08:42.000Z | 2021-10-18T01:28:18.000Z | var/spack/repos/builtin/packages/minizip/package.py | RemoteConnectionManager/spack | f2967b6c16effd26ce007cf86cadbb645c574f50 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2019-07-30T10:12:28.000Z | 2019-12-17T09:02:27.000Z | var/spack/repos/builtin/packages/minizip/package.py | RemoteConnectionManager/spack | f2967b6c16effd26ce007cf86cadbb645c574f50 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Minizip(AutotoolsPackage):
"""C library for zip/unzip via zLib."""
homepage = "http://www.winimage.com/zLibDll/minizip.html"
url = "https://zlib.net/fossils/zlib-1.2.11.tar.gz"
version('1.2.11', sha256='c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1')
configure_directory = 'contrib/minizip'
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('zlib')
| 31.041667 | 96 | 0.712752 |
from spack import *
class Minizip(AutotoolsPackage):
homepage = "http://www.winimage.com/zLibDll/minizip.html"
url = "https://zlib.net/fossils/zlib-1.2.11.tar.gz"
version('1.2.11', sha256='c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1')
configure_directory = 'contrib/minizip'
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('zlib')
| true | true |
f7f565c390622a84c75ca7cd2b7ef4fd3c4acc77 | 2,225 | py | Python | menpo/transform/test/test_h_translation.py | apapaion/menpo | 9834f0437ca3cbe6a972c2a62f7c970ae950cf32 | [
"BSD-3-Clause"
] | 311 | 2015-01-01T17:16:18.000Z | 2021-12-20T11:25:23.000Z | menpo/transform/test/test_h_translation.py | apapaion/menpo | 9834f0437ca3cbe6a972c2a62f7c970ae950cf32 | [
"BSD-3-Clause"
] | 298 | 2015-01-02T17:30:22.000Z | 2022-01-02T22:12:17.000Z | menpo/transform/test/test_h_translation.py | apapaion/menpo | 9834f0437ca3cbe6a972c2a62f7c970ae950cf32 | [
"BSD-3-Clause"
] | 80 | 2015-02-02T14:17:36.000Z | 2021-12-22T10:09:28.000Z | import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
from pytest import raises
from menpo.transform import Translation
def test_1d_translation():
t_vec = np.array([1])
with raises(ValueError):
Translation(t_vec)
def test_5d_translation():
t_vec = np.ones(5)
with raises(ValueError):
Translation(t_vec)
def test_translation():
t_vec = np.array([1, 2, 3])
starting_vector = np.random.rand(10, 3)
transform = Translation(t_vec)
transformed = transform.apply(starting_vector)
assert_allclose(starting_vector + t_vec, transformed)
def test_translation_2d_from_vector():
params = np.array([1, 2])
homo = np.array([[1, 0, params[0]], [0, 1, params[1]], [0, 0, 1]])
tr = Translation.init_identity(2).from_vector(params)
assert_almost_equal(tr.h_matrix, homo)
def test_translation_2d_as_vector():
params = np.array([1, 2])
vec = Translation(params).as_vector()
assert_allclose(vec, params)
def test_translation_3d_from_vector():
params = np.array([1, 2, 3])
homo = np.array(
[[1, 0, 0, params[0]], [0, 1, 0, params[1]], [0, 0, 1, params[2]], [0, 0, 0, 1]]
)
tr = Translation.init_identity(3).from_vector(params)
assert_almost_equal(tr.h_matrix, homo)
def test_translation_3d_as_vector():
params = np.array([1, 2, 3])
vec = Translation(params).as_vector()
assert_allclose(vec, params)
def test_translation_2d_n_parameters():
trans = np.array([1, 2])
t = Translation(trans)
assert t.n_parameters == 2
def test_translation_3d_n_parameters():
trans = np.array([1, 2, 3])
t = Translation(trans)
assert t.n_parameters == 3
def test_translation_from_list():
t_a = Translation([3, 4])
t_b = Translation(np.array([3, 4]))
assert np.all(t_a.h_matrix == t_b.h_matrix)
def test_translation_identity_2d():
assert_allclose(Translation.init_identity(2).h_matrix, np.eye(3))
def test_translation_identity_3d():
assert_allclose(Translation.init_identity(3).h_matrix, np.eye(4))
def test_translation_decompose_optional():
t = Translation.init_identity(2)
d = t.decompose()
assert np.all(d[0].h_matrix == t.h_matrix)
| 24.722222 | 88 | 0.684944 | import numpy as np
from numpy.testing import assert_allclose, assert_almost_equal
from pytest import raises
from menpo.transform import Translation
def test_1d_translation():
t_vec = np.array([1])
with raises(ValueError):
Translation(t_vec)
def test_5d_translation():
t_vec = np.ones(5)
with raises(ValueError):
Translation(t_vec)
def test_translation():
t_vec = np.array([1, 2, 3])
starting_vector = np.random.rand(10, 3)
transform = Translation(t_vec)
transformed = transform.apply(starting_vector)
assert_allclose(starting_vector + t_vec, transformed)
def test_translation_2d_from_vector():
params = np.array([1, 2])
homo = np.array([[1, 0, params[0]], [0, 1, params[1]], [0, 0, 1]])
tr = Translation.init_identity(2).from_vector(params)
assert_almost_equal(tr.h_matrix, homo)
def test_translation_2d_as_vector():
params = np.array([1, 2])
vec = Translation(params).as_vector()
assert_allclose(vec, params)
def test_translation_3d_from_vector():
params = np.array([1, 2, 3])
homo = np.array(
[[1, 0, 0, params[0]], [0, 1, 0, params[1]], [0, 0, 1, params[2]], [0, 0, 0, 1]]
)
tr = Translation.init_identity(3).from_vector(params)
assert_almost_equal(tr.h_matrix, homo)
def test_translation_3d_as_vector():
params = np.array([1, 2, 3])
vec = Translation(params).as_vector()
assert_allclose(vec, params)
def test_translation_2d_n_parameters():
trans = np.array([1, 2])
t = Translation(trans)
assert t.n_parameters == 2
def test_translation_3d_n_parameters():
trans = np.array([1, 2, 3])
t = Translation(trans)
assert t.n_parameters == 3
def test_translation_from_list():
t_a = Translation([3, 4])
t_b = Translation(np.array([3, 4]))
assert np.all(t_a.h_matrix == t_b.h_matrix)
def test_translation_identity_2d():
assert_allclose(Translation.init_identity(2).h_matrix, np.eye(3))
def test_translation_identity_3d():
assert_allclose(Translation.init_identity(3).h_matrix, np.eye(4))
def test_translation_decompose_optional():
t = Translation.init_identity(2)
d = t.decompose()
assert np.all(d[0].h_matrix == t.h_matrix)
| true | true |
f7f56721bd15163bfcbd8f7d5f6e289678cad3e1 | 76 | py | Python | enthought/pyface/ui/qt4/code_editor/pygments_highlighter.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 3 | 2016-12-09T06:05:18.000Z | 2018-03-01T13:00:29.000Z | enthought/pyface/ui/qt4/code_editor/pygments_highlighter.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | 1 | 2020-12-02T00:51:32.000Z | 2020-12-02T08:48:55.000Z | enthought/pyface/ui/qt4/code_editor/pygments_highlighter.py | enthought/etsproxy | 4aafd628611ebf7fe8311c9d1a0abcf7f7bb5347 | [
"BSD-3-Clause"
] | null | null | null | # proxy module
from pyface.ui.qt4.code_editor.pygments_highlighter import *
| 25.333333 | 60 | 0.828947 |
from pyface.ui.qt4.code_editor.pygments_highlighter import *
| true | true |
f7f5675c5392bfc113f2e39284bea19adbaa5bc7 | 659 | py | Python | final_project/server.py | jalsop24/xzceb-flask_eng_fr | 6d956b09fdc2e657764529b7183bc32334d9dd3f | [
"Apache-2.0"
] | null | null | null | final_project/server.py | jalsop24/xzceb-flask_eng_fr | 6d956b09fdc2e657764529b7183bc32334d9dd3f | [
"Apache-2.0"
] | null | null | null | final_project/server.py | jalsop24/xzceb-flask_eng_fr | 6d956b09fdc2e657764529b7183bc32334d9dd3f | [
"Apache-2.0"
] | null | null | null | from machinetranslation import translator
from flask import Flask, render_template, request
import json
app = Flask("Web Translator")
@app.route("/englishToFrench")
def englishToFrench():
textToTranslate = request.args.get('textToTranslate')
return translator.englishToFrench(textToTranslate)
@app.route("/frenchToEnglish")
def frenchToEnglish():
textToTranslate = request.args.get('textToTranslate')
return translator.frenchToEnglish(textToTranslate)
@app.route("/")
def renderIndexPage():
# Write the code to render template
return render_template("index.html")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
| 27.458333 | 57 | 0.754173 | from machinetranslation import translator
from flask import Flask, render_template, request
import json
app = Flask("Web Translator")
@app.route("/englishToFrench")
def englishToFrench():
textToTranslate = request.args.get('textToTranslate')
return translator.englishToFrench(textToTranslate)
@app.route("/frenchToEnglish")
def frenchToEnglish():
textToTranslate = request.args.get('textToTranslate')
return translator.frenchToEnglish(textToTranslate)
@app.route("/")
def renderIndexPage():
return render_template("index.html")
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080)
| true | true |
f7f5676ced5c70ec5871951c67b0b300b0ad4e21 | 29,683 | py | Python | sdk/python/tests/test_client.py | anderseriksson/feast | 4f01f17dc63260b8565ec66ed3806dfa068e5543 | [
"Apache-2.0"
] | null | null | null | sdk/python/tests/test_client.py | anderseriksson/feast | 4f01f17dc63260b8565ec66ed3806dfa068e5543 | [
"Apache-2.0"
] | null | null | null | sdk/python/tests/test_client.py | anderseriksson/feast | 4f01f17dc63260b8565ec66ed3806dfa068e5543 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Feast Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkgutil
from concurrent import futures
from unittest import mock
import grpc
import pytest
from google.protobuf.duration_pb2 import Duration
from mock import MagicMock, patch
import dataframes
import feast.core.CoreService_pb2_grpc as Core
import feast.serving.ServingService_pb2_grpc as Serving
from feast.client import Client
from feast.core.CoreService_pb2 import (
GetFeastCoreVersionResponse,
GetFeatureSetResponse,
ListIngestionJobsResponse,
)
from feast.core.Store_pb2 import Store
from feast.core.IngestionJob_pb2 import (
IngestionJob as IngestJobProto,
IngestionJobStatus,
)
from feast.core.FeatureSet_pb2 import EntitySpec as EntitySpecProto
from feast.core.FeatureSet_pb2 import FeatureSet as FeatureSetProto
from feast.core.FeatureSet_pb2 import FeatureSetMeta as FeatureSetMetaProto
from feast.core.FeatureSet_pb2 import FeatureSetSpec as FeatureSetSpecProto
from feast.core.FeatureSet_pb2 import FeatureSetStatus as FeatureSetStatusProto
from feast.core.FeatureSet_pb2 import FeatureSpec as FeatureSpecProto
from feast.core.Source_pb2 import KafkaSourceConfig, Source, SourceType
from feast.entity import Entity
from feast.feature_set import Feature, FeatureSet, FeatureSetRef
from feast.job import IngestJob
from feast.serving.ServingService_pb2 import (
GetFeastServingInfoResponse,
GetOnlineFeaturesRequest,
GetOnlineFeaturesResponse,
)
from feast.source import KafkaSource
from feast.types import Value_pb2 as ValueProto
from feast.value_type import ValueType
from feast_core_server import CoreServicer
from feast_serving_server import ServingServicer
CORE_URL = "core.feast.example.com"
SERVING_URL = "serving.example.com"
_PRIVATE_KEY_RESOURCE_PATH = "data/localhost.key"
_CERTIFICATE_CHAIN_RESOURCE_PATH = "data/localhost.pem"
_ROOT_CERTIFICATE_RESOURCE_PATH = "data/localhost.crt"
class TestClient:
@pytest.fixture
def secure_mock_client(self, mocker):
client = Client(
core_url=CORE_URL,
serving_url=SERVING_URL,
core_secure=True,
serving_secure=True,
)
mocker.patch.object(client, "_connect_core")
mocker.patch.object(client, "_connect_serving")
client._core_url = CORE_URL
client._serving_url = SERVING_URL
return client
@pytest.fixture
def mock_client(self, mocker):
client = Client(core_url=CORE_URL, serving_url=SERVING_URL)
mocker.patch.object(client, "_connect_core")
mocker.patch.object(client, "_connect_serving")
client._core_url = CORE_URL
client._serving_url = SERVING_URL
return client
@pytest.fixture
def server_credentials(self):
private_key = pkgutil.get_data(__name__, _PRIVATE_KEY_RESOURCE_PATH)
certificate_chain = pkgutil.get_data(__name__, _CERTIFICATE_CHAIN_RESOURCE_PATH)
return grpc.ssl_server_credentials(((private_key, certificate_chain),))
@pytest.fixture
def core_server(self):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
Core.add_CoreServiceServicer_to_server(CoreServicer(), server)
server.add_insecure_port("[::]:50051")
server.start()
yield server
server.stop(0)
@pytest.fixture
def serving_server(self):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
Serving.add_ServingServiceServicer_to_server(ServingServicer(), server)
server.add_insecure_port("[::]:50052")
server.start()
yield server
server.stop(0)
@pytest.fixture
def secure_core_server(self, server_credentials):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
Core.add_CoreServiceServicer_to_server(CoreServicer(), server)
server.add_secure_port("[::]:50053", server_credentials)
server.start()
yield server
server.stop(0)
@pytest.fixture
def secure_serving_server(self, server_credentials):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
Serving.add_ServingServiceServicer_to_server(ServingServicer(), server)
server.add_secure_port("[::]:50054", server_credentials)
server.start()
yield server
server.stop(0)
@pytest.fixture
def secure_client(self, secure_core_server, secure_serving_server):
root_certificate_credentials = pkgutil.get_data(
__name__, _ROOT_CERTIFICATE_RESOURCE_PATH
)
# this is needed to establish a secure connection using self-signed certificates, for the purpose of the test
ssl_channel_credentials = grpc.ssl_channel_credentials(
root_certificates=root_certificate_credentials
)
with mock.patch(
"grpc.ssl_channel_credentials",
MagicMock(return_value=ssl_channel_credentials),
):
yield Client(
core_url="localhost:50053",
serving_url="localhost:50054",
core_secure=True,
serving_secure=True,
)
@pytest.fixture
def client(self, core_server, serving_server):
return Client(core_url="localhost:50051", serving_url="localhost:50052")
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_version(self, mocked_client, mocker):
mocked_client._core_service_stub = Core.CoreServiceStub(
grpc.insecure_channel("")
)
mocked_client._serving_service_stub = Serving.ServingServiceStub(
grpc.insecure_channel("")
)
mocker.patch.object(
mocked_client._core_service_stub,
"GetFeastCoreVersion",
return_value=GetFeastCoreVersionResponse(version="0.3.2"),
)
mocker.patch.object(
mocked_client._serving_service_stub,
"GetFeastServingInfo",
return_value=GetFeastServingInfoResponse(version="0.3.2"),
)
status = mocked_client.version()
assert (
status["core"]["url"] == CORE_URL
and status["core"]["version"] == "0.3.2"
and status["serving"]["url"] == SERVING_URL
and status["serving"]["version"] == "0.3.2"
)
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_get_online_features(self, mocked_client, mocker):
ROW_COUNT = 300
mocked_client._serving_service_stub = Serving.ServingServiceStub(
grpc.insecure_channel("")
)
fields = dict()
for feature_num in range(1, 10):
fields[f"my_project/feature_{str(feature_num)}:1"] = ValueProto.Value(
int64_val=feature_num
)
field_values = GetOnlineFeaturesResponse.FieldValues(fields=fields)
response = GetOnlineFeaturesResponse()
entity_rows = []
for row_number in range(1, ROW_COUNT + 1):
response.field_values.append(field_values)
entity_rows.append(
GetOnlineFeaturesRequest.EntityRow(
fields={"customer_id": ValueProto.Value(int64_val=row_number)}
)
)
mocker.patch.object(
mocked_client._serving_service_stub,
"GetOnlineFeatures",
return_value=response,
)
response = mocked_client.get_online_features(
entity_rows=entity_rows,
feature_refs=[
"my_project/feature_1:1",
"my_project/feature_2:1",
"my_project/feature_3:1",
"my_project/feature_4:1",
"my_project/feature_5:1",
"my_project/feature_6:1",
"my_project/feature_7:1",
"my_project/feature_8:1",
"my_project/feature_9:1",
],
) # type: GetOnlineFeaturesResponse
assert (
response.field_values[0].fields["my_project/feature_1:1"].int64_val == 1
and response.field_values[0].fields["my_project/feature_9:1"].int64_val == 9
)
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_get_feature_set(self, mocked_client, mocker):
mocked_client._core_service_stub = Core.CoreServiceStub(
grpc.insecure_channel("")
)
from google.protobuf.duration_pb2 import Duration
mocker.patch.object(
mocked_client._core_service_stub,
"GetFeatureSet",
return_value=GetFeatureSetResponse(
feature_set=FeatureSetProto(
spec=FeatureSetSpecProto(
name="my_feature_set",
version=2,
max_age=Duration(seconds=3600),
features=[
FeatureSpecProto(
name="my_feature_1",
value_type=ValueProto.ValueType.FLOAT,
),
FeatureSpecProto(
name="my_feature_2",
value_type=ValueProto.ValueType.FLOAT,
),
],
entities=[
EntitySpecProto(
name="my_entity_1",
value_type=ValueProto.ValueType.INT64,
)
],
source=Source(
type=SourceType.KAFKA,
kafka_source_config=KafkaSourceConfig(
bootstrap_servers="localhost:9092", topic="topic"
),
),
),
meta=FeatureSetMetaProto(),
)
),
)
mocked_client.set_project("my_project")
feature_set = mocked_client.get_feature_set("my_feature_set", version=2)
assert (
feature_set.name == "my_feature_set"
and feature_set.version == 2
and feature_set.fields["my_feature_1"].name == "my_feature_1"
and feature_set.fields["my_feature_1"].dtype == ValueType.FLOAT
and feature_set.fields["my_entity_1"].name == "my_entity_1"
and feature_set.fields["my_entity_1"].dtype == ValueType.INT64
and len(feature_set.features) == 2
and len(feature_set.entities) == 1
)
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_list_ingest_jobs(self, mocked_client, mocker):
mocker.patch.object(
mocked_client,
"_core_service_stub",
return_value=Core.CoreServiceStub(grpc.insecure_channel("")),
)
feature_set_proto = FeatureSetProto(
spec=FeatureSetSpecProto(
project="test", name="driver", max_age=Duration(seconds=3600),
)
)
mocker.patch.object(
mocked_client._core_service_stub,
"ListIngestionJobs",
return_value=ListIngestionJobsResponse(
jobs=[
IngestJobProto(
id="kafka-to-redis",
external_id="job-2222",
status=IngestionJobStatus.RUNNING,
feature_sets=[feature_set_proto],
source=Source(
type=SourceType.KAFKA,
kafka_source_config=KafkaSourceConfig(
bootstrap_servers="localhost:9092", topic="topic"
),
),
store=Store(name="redis"),
)
]
),
)
# list ingestion jobs by target feature set reference
ingest_jobs = mocked_client.list_ingest_jobs(
feature_set_ref=FeatureSetRef.from_feature_set(
FeatureSet.from_proto(feature_set_proto)
)
)
assert len(ingest_jobs) >= 1
ingest_job = ingest_jobs[0]
assert (
ingest_job.status == IngestionJobStatus.RUNNING
and ingest_job.id == "kafka-to-redis"
and ingest_job.external_id == "job-2222"
and ingest_job.feature_sets[0].name == "driver"
and ingest_job.source.source_type == "Kafka"
)
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_restart_ingest_job(self, mocked_client, mocker):
mocker.patch.object(
mocked_client,
"_core_service_stub",
return_value=Core.CoreServiceStub(grpc.insecure_channel("")),
)
ingest_job = IngestJob(
job_proto=IngestJobProto(
id="kafka-to-redis",
external_id="job#2222",
status=IngestionJobStatus.ERROR,
),
core_stub=mocked_client._core_service_stub,
)
mocked_client.restart_ingest_job(ingest_job)
assert mocked_client._core_service_stub.RestartIngestionJob.called
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_stop_ingest_job(self, mocked_client, mocker):
mocker.patch.object(
mocked_client,
"_core_service_stub",
return_value=Core.CoreServiceStub(grpc.insecure_channel("")),
)
ingest_job = IngestJob(
job_proto=IngestJobProto(
id="kafka-to-redis",
external_id="job#2222",
status=IngestionJobStatus.RUNNING,
),
core_stub=mocked_client._core_service_stub,
)
mocked_client.stop_ingest_job(ingest_job)
assert mocked_client._core_service_stub.StopIngestionJob.called
# @pytest.mark.parametrize
# "mocked_client",
# [pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
# )
# def test_get_batch_features(self, mocked_client, mocker):
#
# mocked_client._serving_service_stub = Serving.ServingServiceStub(
# grpc.insecure_channel("")
# )
# mocked_client._core_service_stub = Core.CoreServiceStub(
# grpc.insecure_channel("")
# )
#
# mocker.patch.object(
# mocked_client._core_service_stub,
# "GetFeatureSet",
# return_value=GetFeatureSetResponse(
# feature_set=FeatureSetProto(
# spec=FeatureSetSpecProto(
# name="customer_fs",
# version=1,
# project="my_project",
# entities=[
# EntitySpecProto(
# name="customer", value_type=ValueProto.ValueType.INT64
# ),
# EntitySpecProto(
# name="transaction",
# value_type=ValueProto.ValueType.INT64,
# ),
# ],
# features=[
# FeatureSpecProto(
# name="customer_feature_1",
# value_type=ValueProto.ValueType.FLOAT,
# ),
# FeatureSpecProto(
# name="customer_feature_2",
# value_type=ValueProto.ValueType.STRING,
# ),
# ],
# ),
# meta=FeatureSetMetaProto(status=FeatureSetStatusProto.STATUS_READY),
# )
# ),
# )
#
# expected_dataframe = pd.DataFrame(
# {
# "datetime": [datetime.utcnow() for _ in range(3)],
# "customer": [1001, 1002, 1003],
# "transaction": [1001, 1002, 1003],
# "my_project/customer_feature_1:1": [1001, 1002, 1003],
# "my_project/customer_feature_2:1": [1001, 1002, 1003],
# }
# )
#
# final_results = tempfile.mktemp()
# to_avro(file_path_or_buffer=final_results, df=expected_dataframe)
#
# mocker.patch.object(
# mocked_client._serving_service_stub,
# "GetBatchFeatures",
# return_value=GetBatchFeaturesResponse(
# job=BatchFeaturesJob(
# id="123",
# type=JobType.JOB_TYPE_DOWNLOAD,
# status=JobStatus.JOB_STATUS_DONE,
# file_uris=[f"file://{final_results}"],
# data_format=DataFormat.DATA_FORMAT_AVRO,
# )
# ),
# )
#
# mocker.patch.object(
# mocked_client._serving_service_stub,
# "GetJob",
# return_value=GetJobResponse(
# job=BatchFeaturesJob(
# id="123",
# type=JobType.JOB_TYPE_DOWNLOAD,
# status=JobStatus.JOB_STATUS_DONE,
# file_uris=[f"file://{final_results}"],
# data_format=DataFormat.DATA_FORMAT_AVRO,
# )
# ),
# )
#
# mocker.patch.object(
# mocked_client._serving_service_stub,
# "GetFeastServingInfo",
# return_value=GetFeastServingInfoResponse(
# job_staging_location=f"file://{tempfile.mkdtemp()}/",
# type=FeastServingType.FEAST_SERVING_TYPE_BATCH,
# ),
# )
#
# mocked_client.set_project("project1")
# response = mocked_client.get_batch_features(
# entity_rows=pd.DataFrame(
# {
# "datetime": [
# pd.datetime.now(tz=timezone("Asia/Singapore")) for _ in range(3)
# ],
# "customer": [1001, 1002, 1003],
# "transaction": [1001, 1002, 1003],
# }
# ),
# feature_refs=[
# "my_project/customer_feature_1:1",
# "my_project/customer_feature_2:1",
# ],
# ) # type: Job
#
# assert response.id == "123" and response.status == JobStatus.JOB_STATUS_DONE
#
# actual_dataframe = response.to_dataframe()
#
# assert actual_dataframe[
# ["my_project/customer_feature_1:1", "my_project/customer_feature_2:1"]
# ].equals(
# expected_dataframe[
# ["my_project/customer_feature_1:1", "my_project/customer_feature_2:1"]
# ]
# )
@pytest.mark.parametrize(
"test_client",
[pytest.lazy_fixture("client"), pytest.lazy_fixture("secure_client")],
)
def test_apply_feature_set_success(self, test_client):
test_client.set_project("project1")
# Create Feature Sets
fs1 = FeatureSet("my-feature-set-1")
fs1.add(Feature(name="fs1-my-feature-1", dtype=ValueType.INT64))
fs1.add(Feature(name="fs1-my-feature-2", dtype=ValueType.STRING))
fs1.add(Entity(name="fs1-my-entity-1", dtype=ValueType.INT64))
fs2 = FeatureSet("my-feature-set-2")
fs2.add(Feature(name="fs2-my-feature-1", dtype=ValueType.STRING_LIST))
fs2.add(Feature(name="fs2-my-feature-2", dtype=ValueType.BYTES_LIST))
fs2.add(Entity(name="fs2-my-entity-1", dtype=ValueType.INT64))
# Register Feature Set with Core
test_client.apply(fs1)
test_client.apply(fs2)
feature_sets = test_client.list_feature_sets()
# List Feature Sets
assert (
len(feature_sets) == 2
and feature_sets[0].name == "my-feature-set-1"
and feature_sets[0].features[0].name == "fs1-my-feature-1"
and feature_sets[0].features[0].dtype == ValueType.INT64
and feature_sets[1].features[1].dtype == ValueType.BYTES_LIST
)
@pytest.mark.parametrize(
"dataframe,test_client",
[
(dataframes.GOOD, pytest.lazy_fixture("client")),
(dataframes.GOOD, pytest.lazy_fixture("secure_client")),
],
)
def test_feature_set_ingest_success(self, dataframe, test_client, mocker):
test_client.set_project("project1")
driver_fs = FeatureSet(
"driver-feature-set", source=KafkaSource(brokers="kafka:9092", topic="test")
)
driver_fs.add(Feature(name="feature_1", dtype=ValueType.FLOAT))
driver_fs.add(Feature(name="feature_2", dtype=ValueType.STRING))
driver_fs.add(Feature(name="feature_3", dtype=ValueType.INT64))
driver_fs.add(Entity(name="entity_id", dtype=ValueType.INT64))
# Register with Feast core
test_client.apply(driver_fs)
driver_fs = driver_fs.to_proto()
driver_fs.meta.status = FeatureSetStatusProto.STATUS_READY
mocker.patch.object(
test_client._core_service_stub,
"GetFeatureSet",
return_value=GetFeatureSetResponse(feature_set=driver_fs),
)
# Need to create a mock producer
with patch("feast.client.get_producer"):
# Ingest data into Feast
test_client.ingest("driver-feature-set", dataframe)
@pytest.mark.parametrize(
"dataframe,exception,test_client",
[
(dataframes.GOOD, TimeoutError, pytest.lazy_fixture("client")),
(dataframes.GOOD, TimeoutError, pytest.lazy_fixture("secure_client")),
],
)
def test_feature_set_ingest_fail_if_pending(
self, dataframe, exception, test_client, mocker
):
with pytest.raises(exception):
test_client.set_project("project1")
driver_fs = FeatureSet(
"driver-feature-set",
source=KafkaSource(brokers="kafka:9092", topic="test"),
)
driver_fs.add(Feature(name="feature_1", dtype=ValueType.FLOAT))
driver_fs.add(Feature(name="feature_2", dtype=ValueType.STRING))
driver_fs.add(Feature(name="feature_3", dtype=ValueType.INT64))
driver_fs.add(Entity(name="entity_id", dtype=ValueType.INT64))
# Register with Feast core
test_client.apply(driver_fs)
driver_fs = driver_fs.to_proto()
driver_fs.meta.status = FeatureSetStatusProto.STATUS_PENDING
mocker.patch.object(
test_client._core_service_stub,
"GetFeatureSet",
return_value=GetFeatureSetResponse(feature_set=driver_fs),
)
# Need to create a mock producer
with patch("feast.client.get_producer"):
# Ingest data into Feast
test_client.ingest("driver-feature-set", dataframe, timeout=1)
@pytest.mark.parametrize(
"dataframe,exception,test_client",
[
(dataframes.BAD_NO_DATETIME, Exception, pytest.lazy_fixture("client")),
(
dataframes.BAD_INCORRECT_DATETIME_TYPE,
Exception,
pytest.lazy_fixture("client"),
),
(dataframes.BAD_NO_ENTITY, Exception, pytest.lazy_fixture("client")),
(dataframes.NO_FEATURES, Exception, pytest.lazy_fixture("client")),
(
dataframes.BAD_NO_DATETIME,
Exception,
pytest.lazy_fixture("secure_client"),
),
(
dataframes.BAD_INCORRECT_DATETIME_TYPE,
Exception,
pytest.lazy_fixture("secure_client"),
),
(dataframes.BAD_NO_ENTITY, Exception, pytest.lazy_fixture("secure_client")),
(dataframes.NO_FEATURES, Exception, pytest.lazy_fixture("secure_client")),
],
)
def test_feature_set_ingest_failure(self, test_client, dataframe, exception):
with pytest.raises(exception):
# Create feature set
driver_fs = FeatureSet("driver-feature-set")
# Update based on dataset
driver_fs.infer_fields_from_df(dataframe)
# Register with Feast core
test_client.apply(driver_fs)
# Ingest data into Feast
test_client.ingest(driver_fs, dataframe=dataframe)
@pytest.mark.parametrize(
"dataframe,test_client",
[
(dataframes.ALL_TYPES, pytest.lazy_fixture("client")),
(dataframes.ALL_TYPES, pytest.lazy_fixture("secure_client")),
],
)
def test_feature_set_types_success(self, test_client, dataframe, mocker):
test_client.set_project("project1")
all_types_fs = FeatureSet(
name="all_types",
entities=[Entity(name="user_id", dtype=ValueType.INT64)],
features=[
Feature(name="float_feature", dtype=ValueType.FLOAT),
Feature(name="int64_feature", dtype=ValueType.INT64),
Feature(name="int32_feature", dtype=ValueType.INT32),
Feature(name="string_feature", dtype=ValueType.STRING),
Feature(name="bytes_feature", dtype=ValueType.BYTES),
Feature(name="bool_feature", dtype=ValueType.BOOL),
Feature(name="double_feature", dtype=ValueType.DOUBLE),
Feature(name="float_list_feature", dtype=ValueType.FLOAT_LIST),
Feature(name="int64_list_feature", dtype=ValueType.INT64_LIST),
Feature(name="int32_list_feature", dtype=ValueType.INT32_LIST),
Feature(name="string_list_feature", dtype=ValueType.STRING_LIST),
Feature(name="bytes_list_feature", dtype=ValueType.BYTES_LIST),
# Feature(name="bool_list_feature",
# dtype=ValueType.BOOL_LIST), # TODO: Add support for this
# type again https://github.com/gojek/feast/issues/341
Feature(name="double_list_feature", dtype=ValueType.DOUBLE_LIST),
],
max_age=Duration(seconds=3600),
)
# Register with Feast core
test_client.apply(all_types_fs)
mocker.patch.object(
test_client._core_service_stub,
"GetFeatureSet",
return_value=GetFeatureSetResponse(feature_set=all_types_fs.to_proto()),
)
# Need to create a mock producer
with patch("feast.client.get_producer"):
# Ingest data into Feast
test_client.ingest(all_types_fs, dataframe)
@patch("grpc.channel_ready_future")
def test_secure_channel_creation_with_secure_client(self, _mocked_obj):
client = Client(
core_url="localhost:50051",
serving_url="localhost:50052",
serving_secure=True,
core_secure=True,
)
with mock.patch("grpc.secure_channel") as _grpc_mock, mock.patch(
"grpc.ssl_channel_credentials", MagicMock(return_value="test")
) as _mocked_credentials:
client._connect_serving()
_grpc_mock.assert_called_with(
client.serving_url, _mocked_credentials.return_value
)
@mock.patch("grpc.channel_ready_future")
def test_secure_channel_creation_with_secure_serving_url(
self, _mocked_obj,
):
client = Client(core_url="localhost:50051", serving_url="localhost:443")
with mock.patch("grpc.secure_channel") as _grpc_mock, mock.patch(
"grpc.ssl_channel_credentials", MagicMock(return_value="test")
) as _mocked_credentials:
client._connect_serving()
_grpc_mock.assert_called_with(
client.serving_url, _mocked_credentials.return_value
)
@patch("grpc.channel_ready_future")
def test_secure_channel_creation_with_secure_core_url(self, _mocked_obj):
client = Client(core_url="localhost:443", serving_url="localhost:50054")
with mock.patch("grpc.secure_channel") as _grpc_mock, mock.patch(
"grpc.ssl_channel_credentials", MagicMock(return_value="test")
) as _mocked_credentials:
client._connect_core()
_grpc_mock.assert_called_with(
client.core_url, _mocked_credentials.return_value
)
| 38.852094 | 117 | 0.593842 |
import pkgutil
from concurrent import futures
from unittest import mock
import grpc
import pytest
from google.protobuf.duration_pb2 import Duration
from mock import MagicMock, patch
import dataframes
import feast.core.CoreService_pb2_grpc as Core
import feast.serving.ServingService_pb2_grpc as Serving
from feast.client import Client
from feast.core.CoreService_pb2 import (
GetFeastCoreVersionResponse,
GetFeatureSetResponse,
ListIngestionJobsResponse,
)
from feast.core.Store_pb2 import Store
from feast.core.IngestionJob_pb2 import (
IngestionJob as IngestJobProto,
IngestionJobStatus,
)
from feast.core.FeatureSet_pb2 import EntitySpec as EntitySpecProto
from feast.core.FeatureSet_pb2 import FeatureSet as FeatureSetProto
from feast.core.FeatureSet_pb2 import FeatureSetMeta as FeatureSetMetaProto
from feast.core.FeatureSet_pb2 import FeatureSetSpec as FeatureSetSpecProto
from feast.core.FeatureSet_pb2 import FeatureSetStatus as FeatureSetStatusProto
from feast.core.FeatureSet_pb2 import FeatureSpec as FeatureSpecProto
from feast.core.Source_pb2 import KafkaSourceConfig, Source, SourceType
from feast.entity import Entity
from feast.feature_set import Feature, FeatureSet, FeatureSetRef
from feast.job import IngestJob
from feast.serving.ServingService_pb2 import (
GetFeastServingInfoResponse,
GetOnlineFeaturesRequest,
GetOnlineFeaturesResponse,
)
from feast.source import KafkaSource
from feast.types import Value_pb2 as ValueProto
from feast.value_type import ValueType
from feast_core_server import CoreServicer
from feast_serving_server import ServingServicer
CORE_URL = "core.feast.example.com"
SERVING_URL = "serving.example.com"
_PRIVATE_KEY_RESOURCE_PATH = "data/localhost.key"
_CERTIFICATE_CHAIN_RESOURCE_PATH = "data/localhost.pem"
_ROOT_CERTIFICATE_RESOURCE_PATH = "data/localhost.crt"
class TestClient:
@pytest.fixture
def secure_mock_client(self, mocker):
client = Client(
core_url=CORE_URL,
serving_url=SERVING_URL,
core_secure=True,
serving_secure=True,
)
mocker.patch.object(client, "_connect_core")
mocker.patch.object(client, "_connect_serving")
client._core_url = CORE_URL
client._serving_url = SERVING_URL
return client
@pytest.fixture
def mock_client(self, mocker):
client = Client(core_url=CORE_URL, serving_url=SERVING_URL)
mocker.patch.object(client, "_connect_core")
mocker.patch.object(client, "_connect_serving")
client._core_url = CORE_URL
client._serving_url = SERVING_URL
return client
@pytest.fixture
def server_credentials(self):
private_key = pkgutil.get_data(__name__, _PRIVATE_KEY_RESOURCE_PATH)
certificate_chain = pkgutil.get_data(__name__, _CERTIFICATE_CHAIN_RESOURCE_PATH)
return grpc.ssl_server_credentials(((private_key, certificate_chain),))
@pytest.fixture
def core_server(self):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
Core.add_CoreServiceServicer_to_server(CoreServicer(), server)
server.add_insecure_port("[::]:50051")
server.start()
yield server
server.stop(0)
@pytest.fixture
def serving_server(self):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
Serving.add_ServingServiceServicer_to_server(ServingServicer(), server)
server.add_insecure_port("[::]:50052")
server.start()
yield server
server.stop(0)
@pytest.fixture
def secure_core_server(self, server_credentials):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
Core.add_CoreServiceServicer_to_server(CoreServicer(), server)
server.add_secure_port("[::]:50053", server_credentials)
server.start()
yield server
server.stop(0)
@pytest.fixture
def secure_serving_server(self, server_credentials):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
Serving.add_ServingServiceServicer_to_server(ServingServicer(), server)
server.add_secure_port("[::]:50054", server_credentials)
server.start()
yield server
server.stop(0)
@pytest.fixture
def secure_client(self, secure_core_server, secure_serving_server):
root_certificate_credentials = pkgutil.get_data(
__name__, _ROOT_CERTIFICATE_RESOURCE_PATH
)
ssl_channel_credentials = grpc.ssl_channel_credentials(
root_certificates=root_certificate_credentials
)
with mock.patch(
"grpc.ssl_channel_credentials",
MagicMock(return_value=ssl_channel_credentials),
):
yield Client(
core_url="localhost:50053",
serving_url="localhost:50054",
core_secure=True,
serving_secure=True,
)
@pytest.fixture
def client(self, core_server, serving_server):
return Client(core_url="localhost:50051", serving_url="localhost:50052")
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_version(self, mocked_client, mocker):
mocked_client._core_service_stub = Core.CoreServiceStub(
grpc.insecure_channel("")
)
mocked_client._serving_service_stub = Serving.ServingServiceStub(
grpc.insecure_channel("")
)
mocker.patch.object(
mocked_client._core_service_stub,
"GetFeastCoreVersion",
return_value=GetFeastCoreVersionResponse(version="0.3.2"),
)
mocker.patch.object(
mocked_client._serving_service_stub,
"GetFeastServingInfo",
return_value=GetFeastServingInfoResponse(version="0.3.2"),
)
status = mocked_client.version()
assert (
status["core"]["url"] == CORE_URL
and status["core"]["version"] == "0.3.2"
and status["serving"]["url"] == SERVING_URL
and status["serving"]["version"] == "0.3.2"
)
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_get_online_features(self, mocked_client, mocker):
ROW_COUNT = 300
mocked_client._serving_service_stub = Serving.ServingServiceStub(
grpc.insecure_channel("")
)
fields = dict()
for feature_num in range(1, 10):
fields[f"my_project/feature_{str(feature_num)}:1"] = ValueProto.Value(
int64_val=feature_num
)
field_values = GetOnlineFeaturesResponse.FieldValues(fields=fields)
response = GetOnlineFeaturesResponse()
entity_rows = []
for row_number in range(1, ROW_COUNT + 1):
response.field_values.append(field_values)
entity_rows.append(
GetOnlineFeaturesRequest.EntityRow(
fields={"customer_id": ValueProto.Value(int64_val=row_number)}
)
)
mocker.patch.object(
mocked_client._serving_service_stub,
"GetOnlineFeatures",
return_value=response,
)
response = mocked_client.get_online_features(
entity_rows=entity_rows,
feature_refs=[
"my_project/feature_1:1",
"my_project/feature_2:1",
"my_project/feature_3:1",
"my_project/feature_4:1",
"my_project/feature_5:1",
"my_project/feature_6:1",
"my_project/feature_7:1",
"my_project/feature_8:1",
"my_project/feature_9:1",
],
)
assert (
response.field_values[0].fields["my_project/feature_1:1"].int64_val == 1
and response.field_values[0].fields["my_project/feature_9:1"].int64_val == 9
)
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_get_feature_set(self, mocked_client, mocker):
mocked_client._core_service_stub = Core.CoreServiceStub(
grpc.insecure_channel("")
)
from google.protobuf.duration_pb2 import Duration
mocker.patch.object(
mocked_client._core_service_stub,
"GetFeatureSet",
return_value=GetFeatureSetResponse(
feature_set=FeatureSetProto(
spec=FeatureSetSpecProto(
name="my_feature_set",
version=2,
max_age=Duration(seconds=3600),
features=[
FeatureSpecProto(
name="my_feature_1",
value_type=ValueProto.ValueType.FLOAT,
),
FeatureSpecProto(
name="my_feature_2",
value_type=ValueProto.ValueType.FLOAT,
),
],
entities=[
EntitySpecProto(
name="my_entity_1",
value_type=ValueProto.ValueType.INT64,
)
],
source=Source(
type=SourceType.KAFKA,
kafka_source_config=KafkaSourceConfig(
bootstrap_servers="localhost:9092", topic="topic"
),
),
),
meta=FeatureSetMetaProto(),
)
),
)
mocked_client.set_project("my_project")
feature_set = mocked_client.get_feature_set("my_feature_set", version=2)
assert (
feature_set.name == "my_feature_set"
and feature_set.version == 2
and feature_set.fields["my_feature_1"].name == "my_feature_1"
and feature_set.fields["my_feature_1"].dtype == ValueType.FLOAT
and feature_set.fields["my_entity_1"].name == "my_entity_1"
and feature_set.fields["my_entity_1"].dtype == ValueType.INT64
and len(feature_set.features) == 2
and len(feature_set.entities) == 1
)
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_list_ingest_jobs(self, mocked_client, mocker):
mocker.patch.object(
mocked_client,
"_core_service_stub",
return_value=Core.CoreServiceStub(grpc.insecure_channel("")),
)
feature_set_proto = FeatureSetProto(
spec=FeatureSetSpecProto(
project="test", name="driver", max_age=Duration(seconds=3600),
)
)
mocker.patch.object(
mocked_client._core_service_stub,
"ListIngestionJobs",
return_value=ListIngestionJobsResponse(
jobs=[
IngestJobProto(
id="kafka-to-redis",
external_id="job-2222",
status=IngestionJobStatus.RUNNING,
feature_sets=[feature_set_proto],
source=Source(
type=SourceType.KAFKA,
kafka_source_config=KafkaSourceConfig(
bootstrap_servers="localhost:9092", topic="topic"
),
),
store=Store(name="redis"),
)
]
),
)
ingest_jobs = mocked_client.list_ingest_jobs(
feature_set_ref=FeatureSetRef.from_feature_set(
FeatureSet.from_proto(feature_set_proto)
)
)
assert len(ingest_jobs) >= 1
ingest_job = ingest_jobs[0]
assert (
ingest_job.status == IngestionJobStatus.RUNNING
and ingest_job.id == "kafka-to-redis"
and ingest_job.external_id == "job-2222"
and ingest_job.feature_sets[0].name == "driver"
and ingest_job.source.source_type == "Kafka"
)
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_restart_ingest_job(self, mocked_client, mocker):
mocker.patch.object(
mocked_client,
"_core_service_stub",
return_value=Core.CoreServiceStub(grpc.insecure_channel("")),
)
ingest_job = IngestJob(
job_proto=IngestJobProto(
id="kafka-to-redis",
external_id="job#2222",
status=IngestionJobStatus.ERROR,
),
core_stub=mocked_client._core_service_stub,
)
mocked_client.restart_ingest_job(ingest_job)
assert mocked_client._core_service_stub.RestartIngestionJob.called
@pytest.mark.parametrize(
"mocked_client",
[pytest.lazy_fixture("mock_client"), pytest.lazy_fixture("secure_mock_client")],
)
def test_stop_ingest_job(self, mocked_client, mocker):
mocker.patch.object(
mocked_client,
"_core_service_stub",
return_value=Core.CoreServiceStub(grpc.insecure_channel("")),
)
ingest_job = IngestJob(
job_proto=IngestJobProto(
id="kafka-to-redis",
external_id="job#2222",
status=IngestionJobStatus.RUNNING,
),
core_stub=mocked_client._core_service_stub,
)
mocked_client.stop_ingest_job(ingest_job)
assert mocked_client._core_service_stub.StopIngestionJob.called
@pytest.mark.parametrize(
"test_client",
[pytest.lazy_fixture("client"), pytest.lazy_fixture("secure_client")],
)
def test_apply_feature_set_success(self, test_client):
test_client.set_project("project1")
fs1 = FeatureSet("my-feature-set-1")
fs1.add(Feature(name="fs1-my-feature-1", dtype=ValueType.INT64))
fs1.add(Feature(name="fs1-my-feature-2", dtype=ValueType.STRING))
fs1.add(Entity(name="fs1-my-entity-1", dtype=ValueType.INT64))
fs2 = FeatureSet("my-feature-set-2")
fs2.add(Feature(name="fs2-my-feature-1", dtype=ValueType.STRING_LIST))
fs2.add(Feature(name="fs2-my-feature-2", dtype=ValueType.BYTES_LIST))
fs2.add(Entity(name="fs2-my-entity-1", dtype=ValueType.INT64))
test_client.apply(fs1)
test_client.apply(fs2)
feature_sets = test_client.list_feature_sets()
assert (
len(feature_sets) == 2
and feature_sets[0].name == "my-feature-set-1"
and feature_sets[0].features[0].name == "fs1-my-feature-1"
and feature_sets[0].features[0].dtype == ValueType.INT64
and feature_sets[1].features[1].dtype == ValueType.BYTES_LIST
)
@pytest.mark.parametrize(
"dataframe,test_client",
[
(dataframes.GOOD, pytest.lazy_fixture("client")),
(dataframes.GOOD, pytest.lazy_fixture("secure_client")),
],
)
def test_feature_set_ingest_success(self, dataframe, test_client, mocker):
test_client.set_project("project1")
driver_fs = FeatureSet(
"driver-feature-set", source=KafkaSource(brokers="kafka:9092", topic="test")
)
driver_fs.add(Feature(name="feature_1", dtype=ValueType.FLOAT))
driver_fs.add(Feature(name="feature_2", dtype=ValueType.STRING))
driver_fs.add(Feature(name="feature_3", dtype=ValueType.INT64))
driver_fs.add(Entity(name="entity_id", dtype=ValueType.INT64))
test_client.apply(driver_fs)
driver_fs = driver_fs.to_proto()
driver_fs.meta.status = FeatureSetStatusProto.STATUS_READY
mocker.patch.object(
test_client._core_service_stub,
"GetFeatureSet",
return_value=GetFeatureSetResponse(feature_set=driver_fs),
)
with patch("feast.client.get_producer"):
test_client.ingest("driver-feature-set", dataframe)
@pytest.mark.parametrize(
"dataframe,exception,test_client",
[
(dataframes.GOOD, TimeoutError, pytest.lazy_fixture("client")),
(dataframes.GOOD, TimeoutError, pytest.lazy_fixture("secure_client")),
],
)
def test_feature_set_ingest_fail_if_pending(
self, dataframe, exception, test_client, mocker
):
with pytest.raises(exception):
test_client.set_project("project1")
driver_fs = FeatureSet(
"driver-feature-set",
source=KafkaSource(brokers="kafka:9092", topic="test"),
)
driver_fs.add(Feature(name="feature_1", dtype=ValueType.FLOAT))
driver_fs.add(Feature(name="feature_2", dtype=ValueType.STRING))
driver_fs.add(Feature(name="feature_3", dtype=ValueType.INT64))
driver_fs.add(Entity(name="entity_id", dtype=ValueType.INT64))
test_client.apply(driver_fs)
driver_fs = driver_fs.to_proto()
driver_fs.meta.status = FeatureSetStatusProto.STATUS_PENDING
mocker.patch.object(
test_client._core_service_stub,
"GetFeatureSet",
return_value=GetFeatureSetResponse(feature_set=driver_fs),
)
with patch("feast.client.get_producer"):
test_client.ingest("driver-feature-set", dataframe, timeout=1)
@pytest.mark.parametrize(
"dataframe,exception,test_client",
[
(dataframes.BAD_NO_DATETIME, Exception, pytest.lazy_fixture("client")),
(
dataframes.BAD_INCORRECT_DATETIME_TYPE,
Exception,
pytest.lazy_fixture("client"),
),
(dataframes.BAD_NO_ENTITY, Exception, pytest.lazy_fixture("client")),
(dataframes.NO_FEATURES, Exception, pytest.lazy_fixture("client")),
(
dataframes.BAD_NO_DATETIME,
Exception,
pytest.lazy_fixture("secure_client"),
),
(
dataframes.BAD_INCORRECT_DATETIME_TYPE,
Exception,
pytest.lazy_fixture("secure_client"),
),
(dataframes.BAD_NO_ENTITY, Exception, pytest.lazy_fixture("secure_client")),
(dataframes.NO_FEATURES, Exception, pytest.lazy_fixture("secure_client")),
],
)
def test_feature_set_ingest_failure(self, test_client, dataframe, exception):
with pytest.raises(exception):
driver_fs = FeatureSet("driver-feature-set")
driver_fs.infer_fields_from_df(dataframe)
test_client.apply(driver_fs)
test_client.ingest(driver_fs, dataframe=dataframe)
@pytest.mark.parametrize(
"dataframe,test_client",
[
(dataframes.ALL_TYPES, pytest.lazy_fixture("client")),
(dataframes.ALL_TYPES, pytest.lazy_fixture("secure_client")),
],
)
def test_feature_set_types_success(self, test_client, dataframe, mocker):
test_client.set_project("project1")
all_types_fs = FeatureSet(
name="all_types",
entities=[Entity(name="user_id", dtype=ValueType.INT64)],
features=[
Feature(name="float_feature", dtype=ValueType.FLOAT),
Feature(name="int64_feature", dtype=ValueType.INT64),
Feature(name="int32_feature", dtype=ValueType.INT32),
Feature(name="string_feature", dtype=ValueType.STRING),
Feature(name="bytes_feature", dtype=ValueType.BYTES),
Feature(name="bool_feature", dtype=ValueType.BOOL),
Feature(name="double_feature", dtype=ValueType.DOUBLE),
Feature(name="float_list_feature", dtype=ValueType.FLOAT_LIST),
Feature(name="int64_list_feature", dtype=ValueType.INT64_LIST),
Feature(name="int32_list_feature", dtype=ValueType.INT32_LIST),
Feature(name="string_list_feature", dtype=ValueType.STRING_LIST),
Feature(name="bytes_list_feature", dtype=ValueType.BYTES_LIST),
Feature(name="double_list_feature", dtype=ValueType.DOUBLE_LIST),
],
max_age=Duration(seconds=3600),
)
test_client.apply(all_types_fs)
mocker.patch.object(
test_client._core_service_stub,
"GetFeatureSet",
return_value=GetFeatureSetResponse(feature_set=all_types_fs.to_proto()),
)
with patch("feast.client.get_producer"):
test_client.ingest(all_types_fs, dataframe)
@patch("grpc.channel_ready_future")
def test_secure_channel_creation_with_secure_client(self, _mocked_obj):
client = Client(
core_url="localhost:50051",
serving_url="localhost:50052",
serving_secure=True,
core_secure=True,
)
with mock.patch("grpc.secure_channel") as _grpc_mock, mock.patch(
"grpc.ssl_channel_credentials", MagicMock(return_value="test")
) as _mocked_credentials:
client._connect_serving()
_grpc_mock.assert_called_with(
client.serving_url, _mocked_credentials.return_value
)
@mock.patch("grpc.channel_ready_future")
def test_secure_channel_creation_with_secure_serving_url(
self, _mocked_obj,
):
client = Client(core_url="localhost:50051", serving_url="localhost:443")
with mock.patch("grpc.secure_channel") as _grpc_mock, mock.patch(
"grpc.ssl_channel_credentials", MagicMock(return_value="test")
) as _mocked_credentials:
client._connect_serving()
_grpc_mock.assert_called_with(
client.serving_url, _mocked_credentials.return_value
)
@patch("grpc.channel_ready_future")
def test_secure_channel_creation_with_secure_core_url(self, _mocked_obj):
client = Client(core_url="localhost:443", serving_url="localhost:50054")
with mock.patch("grpc.secure_channel") as _grpc_mock, mock.patch(
"grpc.ssl_channel_credentials", MagicMock(return_value="test")
) as _mocked_credentials:
client._connect_core()
_grpc_mock.assert_called_with(
client.core_url, _mocked_credentials.return_value
)
| true | true |
f7f567c58571279038ec52ec7ed0bfaace7f0fb9 | 3,802 | py | Python | code/experiments/baseline_ptbdb.py | nkdnnlr/ECG-Heartbeat-Classification | d75012794b17d0b3b8dd9026874c026445cf05c3 | [
"MIT"
] | null | null | null | code/experiments/baseline_ptbdb.py | nkdnnlr/ECG-Heartbeat-Classification | d75012794b17d0b3b8dd9026874c026445cf05c3 | [
"MIT"
] | null | null | null | code/experiments/baseline_ptbdb.py | nkdnnlr/ECG-Heartbeat-Classification | d75012794b17d0b3b8dd9026874c026445cf05c3 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from keras import optimizers, losses, activations, models
from keras.callbacks import (
ModelCheckpoint,
EarlyStopping,
LearningRateScheduler,
ReduceLROnPlateau,
)
from keras.layers import (
Dense,
Input,
Dropout,
Convolution1D,
MaxPool1D,
GlobalMaxPool1D,
GlobalAveragePooling1D,
concatenate,
)
from keras.metrics import AUC
from sklearn.metrics import (
accuracy_score,
f1_score,
average_precision_score,
roc_auc_score,
)
from sklearn.model_selection import train_test_split
df_1 = pd.read_csv("../../data/ECG_Heartbeat_Classification/ptbdb_normal.csv", header=None)
df_2 = pd.read_csv("../../data/ECG_Heartbeat_Classification/ptbdb_abnormal.csv", header=None)
df = pd.concat([df_1, df_2])
df_train, df_test = train_test_split(
df, test_size=0.2, random_state=1337, stratify=df[187]
)
Y = np.array(df_train[187].values).astype(np.int8)
X = np.array(df_train[list(range(187))].values)[..., np.newaxis]
Y_test = np.array(df_test[187].values).astype(np.int8)
X_test = np.array(df_test[list(range(187))].values)[..., np.newaxis]
def get_model():
nclass = 1
inp = Input(shape=(187, 1))
img_1 = Convolution1D(
16, kernel_size=5, activation=activations.relu, padding="valid"
)(inp)
img_1 = Convolution1D(
16, kernel_size=5, activation=activations.relu, padding="valid"
)(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = Dropout(rate=0.1)(img_1)
img_1 = Convolution1D(
32, kernel_size=3, activation=activations.relu, padding="valid"
)(img_1)
img_1 = Convolution1D(
32, kernel_size=3, activation=activations.relu, padding="valid"
)(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = Dropout(rate=0.1)(img_1)
img_1 = Convolution1D(
32, kernel_size=3, activation=activations.relu, padding="valid"
)(img_1)
img_1 = Convolution1D(
32, kernel_size=3, activation=activations.relu, padding="valid"
)(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = Dropout(rate=0.1)(img_1)
img_1 = Convolution1D(
256, kernel_size=3, activation=activations.relu, padding="valid"
)(img_1)
img_1 = Convolution1D(
256, kernel_size=3, activation=activations.relu, padding="valid"
)(img_1)
img_1 = GlobalMaxPool1D()(img_1)
img_1 = Dropout(rate=0.2)(img_1)
dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
dense_1 = Dense(nclass, activation=activations.sigmoid, name="dense_3_ptbdb")(
dense_1
)
model = models.Model(inputs=inp, outputs=dense_1)
opt = optimizers.Adam(0.001)
model.compile(optimizer=opt, loss=losses.binary_crossentropy, metrics=["acc"])
model.summary()
return model
model = get_model()
file_path = "../../models/baseline_cnn_ptbdb.h5"
checkpoint = ModelCheckpoint(
file_path, monitor="val_acc", verbose=1, save_best_only=True, mode="max"
)
early = EarlyStopping(monitor="val_acc", mode="max", patience=5, verbose=1)
redonplat = ReduceLROnPlateau(monitor="val_acc", mode="max", patience=3, verbose=2)
callbacks_list = [checkpoint, early, redonplat] # early
# model.fit(X, Y, epochs=1000, verbose=2, callbacks=callbacks_list, validation_split=0.1)
model.load_weights(file_path)
pred_test = model.predict(X_test)
pred_test = (pred_test > 0.5).astype(np.int8)
f1 = f1_score(Y_test, pred_test)
acc = accuracy_score(Y_test, pred_test)
auroc = roc_auc_score(Y_test, pred_test)
auprc = average_precision_score(Y_test, pred_test)
print("Test f1 score : %s " % f1)
print("Test accuracy score : %s " % acc)
print("AUROC score : %s " % auroc)
print("AUPRC accuracy score : %s " % auprc)
| 31.683333 | 93 | 0.702262 | import pandas as pd
import numpy as np
from keras import optimizers, losses, activations, models
from keras.callbacks import (
ModelCheckpoint,
EarlyStopping,
LearningRateScheduler,
ReduceLROnPlateau,
)
from keras.layers import (
Dense,
Input,
Dropout,
Convolution1D,
MaxPool1D,
GlobalMaxPool1D,
GlobalAveragePooling1D,
concatenate,
)
from keras.metrics import AUC
from sklearn.metrics import (
accuracy_score,
f1_score,
average_precision_score,
roc_auc_score,
)
from sklearn.model_selection import train_test_split
df_1 = pd.read_csv("../../data/ECG_Heartbeat_Classification/ptbdb_normal.csv", header=None)
df_2 = pd.read_csv("../../data/ECG_Heartbeat_Classification/ptbdb_abnormal.csv", header=None)
df = pd.concat([df_1, df_2])
df_train, df_test = train_test_split(
df, test_size=0.2, random_state=1337, stratify=df[187]
)
Y = np.array(df_train[187].values).astype(np.int8)
X = np.array(df_train[list(range(187))].values)[..., np.newaxis]
Y_test = np.array(df_test[187].values).astype(np.int8)
X_test = np.array(df_test[list(range(187))].values)[..., np.newaxis]
def get_model():
nclass = 1
inp = Input(shape=(187, 1))
img_1 = Convolution1D(
16, kernel_size=5, activation=activations.relu, padding="valid"
)(inp)
img_1 = Convolution1D(
16, kernel_size=5, activation=activations.relu, padding="valid"
)(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = Dropout(rate=0.1)(img_1)
img_1 = Convolution1D(
32, kernel_size=3, activation=activations.relu, padding="valid"
)(img_1)
img_1 = Convolution1D(
32, kernel_size=3, activation=activations.relu, padding="valid"
)(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = Dropout(rate=0.1)(img_1)
img_1 = Convolution1D(
32, kernel_size=3, activation=activations.relu, padding="valid"
)(img_1)
img_1 = Convolution1D(
32, kernel_size=3, activation=activations.relu, padding="valid"
)(img_1)
img_1 = MaxPool1D(pool_size=2)(img_1)
img_1 = Dropout(rate=0.1)(img_1)
img_1 = Convolution1D(
256, kernel_size=3, activation=activations.relu, padding="valid"
)(img_1)
img_1 = Convolution1D(
256, kernel_size=3, activation=activations.relu, padding="valid"
)(img_1)
img_1 = GlobalMaxPool1D()(img_1)
img_1 = Dropout(rate=0.2)(img_1)
dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
dense_1 = Dense(nclass, activation=activations.sigmoid, name="dense_3_ptbdb")(
dense_1
)
model = models.Model(inputs=inp, outputs=dense_1)
opt = optimizers.Adam(0.001)
model.compile(optimizer=opt, loss=losses.binary_crossentropy, metrics=["acc"])
model.summary()
return model
model = get_model()
file_path = "../../models/baseline_cnn_ptbdb.h5"
checkpoint = ModelCheckpoint(
file_path, monitor="val_acc", verbose=1, save_best_only=True, mode="max"
)
early = EarlyStopping(monitor="val_acc", mode="max", patience=5, verbose=1)
redonplat = ReduceLROnPlateau(monitor="val_acc", mode="max", patience=3, verbose=2)
callbacks_list = [checkpoint, early, redonplat]
model.load_weights(file_path)
pred_test = model.predict(X_test)
pred_test = (pred_test > 0.5).astype(np.int8)
f1 = f1_score(Y_test, pred_test)
acc = accuracy_score(Y_test, pred_test)
auroc = roc_auc_score(Y_test, pred_test)
auprc = average_precision_score(Y_test, pred_test)
print("Test f1 score : %s " % f1)
print("Test accuracy score : %s " % acc)
print("AUROC score : %s " % auroc)
print("AUPRC accuracy score : %s " % auprc)
| true | true |
f7f5685ce14e3d724e7c28cd1d370749677e9c42 | 23,661 | py | Python | Improvements/gnnimprove.py | Harshitha-Nagapudi/NN_Project | f0df170a33b6b35a00929a0104dc6ee04c5062a9 | [
"MIT"
] | null | null | null | Improvements/gnnimprove.py | Harshitha-Nagapudi/NN_Project | f0df170a33b6b35a00929a0104dc6ee04c5062a9 | [
"MIT"
] | null | null | null | Improvements/gnnimprove.py | Harshitha-Nagapudi/NN_Project | f0df170a33b6b35a00929a0104dc6ee04c5062a9 | [
"MIT"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
import math
import torch
import torch.optim as optim
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from deeprobust.graph import utils
from copy import deepcopy
import sys
from scipy import stats
import tensorly as tl
tl.set_backend('pytorch')
from tensorly.decomposition import parafac, tucker, tensor_train, matrix_product_state
import numpy as np
import scipy.sparse as sp
from numba import njit
class GraphConvolution(Module):
"""Simple GCN layer, similar to https://github.com/tkipf/pygcn
"""
def __init__(self, in_features, out_features, with_bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if with_bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
""" Graph Convolutional Layer forward function
"""
if input.data.is_sparse:
support = torch.spmm(input, self.weight)
else:
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias, support
else:
return output, support
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class TGNN(nn.Module):
def __init__(self, nfeat, nhid, nclass,
dropout=0.5, lr=0.01, weight_decay=5e-4,
with_relu=True, with_bias=True,
format='Tucker', rank=32, pros='knn', euclidean=True,
svd_rank=200, prune_thd=0.01,
lambda_t = 1e-4, weight_decay_t=1e-5, topk=32,
device=None):
super(TGNN, self).__init__()
assert device is not None, "Please specify 'device'!"
self.device = device
self.nfeat = nfeat
self.hidden_sizes = [nhid]
self.nclass = nclass
self.gc1 = GraphConvolution(nfeat, nhid, with_bias=with_bias).to(device)
self.gc2 = GraphConvolution(nhid, nclass, with_bias=with_bias).to(device)
self.dropout = dropout
self.lr = lr
if not with_relu:
self.weight_decay = 0
else:
self.weight_decay = weight_decay
self.with_relu = with_relu
self.with_bias = with_bias
self.output = None
self.best_model = None # not used
self.best_output = None # not used
self.best_A = None
self.adj_norm = None
self.features = None
self.format = format
self.rank = rank
self.lamda_t = lambda_t
self.weight_decay_t = weight_decay_t
self.topk = topk
self.pros = pros.split(',')
self.euclidean = euclidean
self.svd_rank = svd_rank
self.prune_thd = prune_thd
self.acc_lst = []
def initialize(self):
"""Initialize parameters of GCN.
"""
self.gc1.reset_parameters()
self.gc2.reset_parameters()
def truncatedSVD(self, data, k=50):
"""Truncated SVD on input data.
Parameters
----------
data :
input matrix to be decomposed
k : int
number of singular values and vectors to compute.
Returns
-------
numpy.array
reconstructed matrix.
"""
print('=== GCN-SVD: rank={} ==='.format(k))
if sp.issparse(data):
data = data.asfptype()
U, S, V = sp.linalg.svds(data, k=k)
print("rank_after = {}".format(len(S.nonzero()[0])))
diag_S = np.diag(S)
else:
U, S, V = np.linalg.svd(data)
U = U[:, :k]
S = S[:k]
V = V[:k, :]
print("rank_before = {}".format(len(S.nonzero()[0])))
diag_S = np.diag(S)
print("rank_after = {}".format(len(diag_S.nonzero()[0])))
return U @ diag_S @ V
def fit(self, features, adj, labels, idx_train, idx_val, train_iters=200,
initialize=True, verbose=False, normalize=True, patience=500, **kwargs):
self.device = self.gc1.weight.device
if initialize:
self.initialize()
if 'svd' in self.pros:
self.svd_adj = (adj+torch.eye(adj.size(0)).to(self.device)).cpu().numpy()
self.svd_adj = self.truncatedSVD(data=self.svd_adj, k=self.svd_rank)
self.svd_adj = torch.FloatTensor(self.svd_adj).to(self.device)
self.svd_adj = torch.clamp(self.svd_adj, 0, 1)
self.svd_adj = self.svd_adj.unsqueeze(0)
if 'prune' in self.pros:
self.prune_adj = (adj+torch.eye(adj.size(0)).to(self.device)).cpu().numpy()
self.prune_adj = self.drop_dissimilar_edges(features=features.cpu().numpy(),
adj=self.prune_adj)
self.prune_adj = torch.FloatTensor(self.prune_adj.todense()).to(self.device)
self.prune_adj = self.prune_adj.unsqueeze(0)
if type(adj) is not torch.Tensor:
features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device)
else:
features = features.to(self.device)
adj = adj.to(self.device)
labels = labels.to(self.device)
self.adj_org = (adj + torch.eye(adj.size(0)).to(self.device))
if normalize:
if utils.is_sparse_tensor(adj):
adj_norm = utils.normalize_adj_tensor(adj, sparse=True)
else:
adj_norm = utils.normalize_adj_tensor(adj)
else:
adj_norm = adj
self.adj_norm = adj_norm
self.features = features
self.labels = labels
self._train_with_val(labels, idx_train, idx_val, train_iters, verbose)
def forward(self, x, adj, A_bar=None, need_feats=False):
x0 = x
feats = [x0]
adj1 = adj if A_bar is None else self.normalize(A_bar[0, :, :])
x, support1 = self.gc1(x, adj1)
if self.with_relu:
x = F.relu(x)
x = F.dropout(x, self.dropout, training=self.training)
# print('training', self.training)
x1 = x
feats.append(x1)
adj2 = adj if A_bar is None else self.normalize(A_bar[0, :, :])
x, support2 = self.gc2(x, adj2)
x2 = x
feats.append(x2)
if need_feats:
return F.log_softmax(x, dim=1), feats
return F.log_softmax(x, dim=1)
def _norm_feat(self, X, p='l2'):
if p == None:
return X
if p == 'l1':
sum = 1 / (X.norm(p=1, dim=1, keepdim=True) + 1e-9).detach()
sum[torch.isinf(sum)] = 0.
X = X * sum
return X
if p == 'l2':
sum = 1 / (X.norm(p=2, dim=1, keepdim=True) + 1e-9).detach()
sum[torch.isinf(sum)] = 0.
X = X * sum
return X
return None
def normalize(self, adj):
adj = torch.clamp(adj, 0, 1)
normalized_adj = self._normalize(adj + torch.eye(adj.shape[0]).to(self.device))
return normalized_adj
def _normalize(self, mx):
mx = torch.clamp(mx, 0, 1)
rowsum = torch.abs(mx).sum(1)
r_inv = rowsum.pow(-1/2).flatten()
r_inv[torch.isinf(r_inv)] = 0.
r_mat_inv = torch.diag(r_inv).detach()
mx = r_mat_inv @ mx
mx = mx @ r_mat_inv
return mx
def _knn(self, X, k=None):
k = k if k is not None else self.topk
topk = X.topk(k, dim=1)[1]
ret = torch.zeros_like(X)
for i in range(ret.size(0)):
ret[i, topk[i]] = 1
return ret
def _get_knn(self, X, k=None):
if self.is_binary:
intersection = torch.mm(X, X.t())
union = X.size(1) - torch.mm((1 - X), (1 - X).t())
smooth = 1
S = (intersection + smooth) / (union + smooth)
else:
X = self._norm_feat(X, p='l2')
S = -torch.cdist(X, X)
Z = self._knn(S, k=k).clone().detach().unsqueeze(0)
return Z
def _init_td(self):
A = torch.stack([self.adj_org for _ in range(1)], dim=0)
self.OA = A
_, X = self.forward(self.features, self.adj_norm, A_bar=A, need_feats=True)
self.T = [A]
self.is_binary = [1]
self.reg_name = ['ADJ']
if 'knn' in self.pros:
Z = self._get_knn(X[0])
self.Z = Z
self.T.append(self.Z)
self.is_binary.append(1)
self.reg_name.append('KNN%d'%self.topk)
if 'svd' in self.pros:
self.T.append(self.svd_adj)
self.is_binary.append(0)
self.reg_name.append('SVD%d'%self.svd_rank)
if 'prune' in self.pros:
self.T.append(self.prune_adj)
self.is_binary.append(1)
self.reg_name.append('PRUNE%.6f'%self.prune_thd)
self.T = torch.cat(self.T, dim=0)
T = self.T
self.A = T[:1, :, :]
if self.format == 'CP':
weights, factors = parafac(T.transpose(1, 0), self.rank, init='random', normalize_factors=True)
self.register_parameter('f0', Parameter(torch.zeros_like(factors[0]).to(self.device), requires_grad=True))
self.register_parameter('f1', Parameter(torch.zeros_like(factors[1]).to(self.device), requires_grad=True))
self.register_parameter('f2', Parameter(torch.zeros_like(factors[2]).to(self.device), requires_grad=True))
self.register_parameter('of0', Parameter((factors[0]).to(self.device), requires_grad=False))
self.register_parameter('of1', Parameter((factors[1]).to(self.device), requires_grad=False))
self.register_parameter('of2', Parameter((factors[2]).to(self.device), requires_grad=False))
self.register_parameter('weights', Parameter(torch.zeros_like(weights).to(self.device), requires_grad=True))
self.register_parameter('oweights', Parameter((weights).to(self.device), requires_grad=False))
elif self.format == 'Tucker':
core, factors = tucker(T, rank=self.rank, init='random')
self.register_parameter('f0', Parameter(torch.zeros_like(factors[0]).to(self.device), requires_grad=True))
self.register_parameter('f1', Parameter(torch.zeros_like(factors[1]).to(self.device), requires_grad=True))
self.register_parameter('f2', Parameter(torch.zeros_like(factors[2]).to(self.device), requires_grad=True))
self.register_parameter('of0', Parameter((factors[0]).to(self.device), requires_grad=False))
self.register_parameter('of1', Parameter((factors[1]).to(self.device), requires_grad=False))
self.register_parameter('of2', Parameter((factors[2]).to(self.device), requires_grad=False))
self.register_parameter('core',Parameter(torch.zeros_like(core).to(self.device), requires_grad=True))
self.register_parameter('ocore', Parameter((core).to(self.device), requires_grad=False))
elif self.format == 'TT':
factors = matrix_product_state(T, rank=[1, self.rank, self.rank, 1])
print([_.size() for _ in factors])
self.register_parameter('f0', Parameter(torch.zeros_like(factors[0]).to(self.device), requires_grad=True))
self.register_parameter('f1', Parameter(torch.zeros_like(factors[1]).to(self.device), requires_grad=True))
self.register_parameter('f2', Parameter(torch.zeros_like(factors[2]).to(self.device), requires_grad=True))
self.register_parameter('of0', Parameter((factors[0]).to(self.device), requires_grad=False))
self.register_parameter('of1', Parameter((factors[1]).to(self.device), requires_grad=False))
self.register_parameter('of2', Parameter((factors[2]).to(self.device), requires_grad=False))
def forward_T(self):
if self.format == 'CP':
factors = [self.f0 + self.of0, self.f1 + self.of1, self.f2 + self.of2]
core = self.weights + self.oweights
T_bar = tl.cp_to_tensor((core, factors))
T_bar = T_bar.transpose(1, 0) # avoid svd oom
elif self.format == 'Tucker':
factors = [self.f0 + self.of0, self.f1 + self.of1, self.f2 + self.of2]
core = self.core + self.ocore
T_bar = tl.tucker_to_tensor((core, factors))
elif self.format == 'TT':
factors = [self.f0 + self.of0, self.f1 + self.of1, self.f2 + self.of2]
T_bar = tl.tt_to_tensor(factors)
self.T_bar = T_bar
self.A_bar = T_bar[:1]
def rec_loss(self, X, Y):
X = torch.clamp(X, 0, 1)
return ((X - Y) ** 2).mean()
def _gcn_step(self, i, optimizer_G, optimizer_T, idx_train, labels):
self.train()
optimizer_G.zero_grad()
if optimizer_T is not None:
optimizer_T.zero_grad()
self.forward_T()
output, X = self.forward(self.features, self.adj_norm, A_bar=self.A_bar, need_feats=True)
loss_cls = F.nll_loss(output[idx_train], labels[idx_train])
loss_reg, loss_adj = 0, 0
if optimizer_T is not None:
for r in range(self.T.size(0)):
S0_normed = torch.clamp(self.T_bar[r], 0, 1)
S0_normed_t = torch.clamp(self.T[r].detach(), 0, 1)
loss = self.rec_loss(S0_normed, S0_normed_t)
if r == 0:
loss_adj = loss_adj + self.lamda_t * loss
else:
loss_reg = loss_reg + self.lamda_t * loss
sys.stdout.flush()
loss_train = loss_cls + (loss_reg * self.lamda_t + loss_adj * self.lamda_t)
loss_train.backward()
optimizer_G.step()
if optimizer_T is not None:
optimizer_T.step()
def _train_with_val(self, labels, idx_train, idx_val, train_iters, verbose):
if verbose:
print('=== Initialization ===')
self.eval()
self._init_td()
self.forward_T()
if verbose:
print('=== pre-training tensor model ===')
g_parameters = []
t_parameters = []
for name, param in self.named_parameters():
if name[:2] == 'gc':
g_parameters.append(param)
else:
t_parameters.append(param)
optimizer_T = optim.Adam(t_parameters, lr=self.lr, weight_decay=self.weight_decay_t) if len(t_parameters) > 0 else None
optimizer_G = optim.Adam(g_parameters, lr=self.lr, weight_decay=self.weight_decay)
if verbose:
print('=== training gcn model ===')
best_loss_val = 1e10
best_acc_val = 0
for i in range(train_iters):
self._gcn_step(i, optimizer_G, optimizer_T, idx_train, labels)
self.eval()
self.forward_T()
output = self.forward(self.features, self.adj_norm, self.A_bar)
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = utils.accuracy(output[idx_val], labels[idx_val])
loss_cls = F.nll_loss(output[idx_train], labels[idx_train])
acc_train = utils.accuracy(output[idx_train], labels[idx_train])
if best_loss_val > loss_val:
best_loss_val = loss_val
self.output = output
self.best_A = self.A_bar.clone().detach()
weights = deepcopy(self.state_dict())
if acc_val > best_acc_val:
best_acc_val = acc_val
self.output = output
self.best_A = self.A_bar.clone().detach()
weights = deepcopy(self.state_dict())
print('Epoch: {:04d}'.format(i + 1),
'loss_val: {:.4f}'.format(loss_val.item()),
'loss_train: {:.4f}'.format(loss_cls.item()),
'acc_val: {:.4f}'.format(acc_val.item()),
'acc_train: {:.4f}'.format(acc_train.item()))
if verbose:
print('=== picking the best model according to the performance on validation ===')
self.load_state_dict(weights)
def test(self, idx_test):
"""Evaluate GCN performance on test set.
Parameters
----------
idx_test :
node testing indices
"""
self.eval()
output = self.predict()
# output = self.output
loss_test = F.nll_loss(output[idx_test], self.labels[idx_test])
acc_test = utils.accuracy(output[idx_test], self.labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
return acc_test
def predict(self):
# return self.output
if(scores.A1 ==1):
self.eval()
else:
print("features not considered as they're unnoticeable")
return self.forward(self.features, self.adj_norm, A_bar=self.best_A)
def drop_dissimilar_edges(self, features, adj, metric='similarity'):
"""Drop dissimilar edges.(Faster version using numba)
"""
if not sp.issparse(adj):
adj = sp.csr_matrix(adj)
adj_triu = sp.triu(adj, format='csr')
if metric == 'distance':
removed_cnt = dropedge_dis(adj_triu.data, adj_triu.indptr, adj_triu.indices, features, threshold=self.prune_thd)
else:
if self.euclidean:
removed_cnt = dropedge_prune(adj_triu.data, adj_triu.indptr, adj_triu.indices, features, threshold=self.prune_thd)
else:
removed_cnt = dropedge_cosine(adj_triu.data, adj_triu.indptr, adj_triu.indices, features, threshold=self.prune_thd)
print('removed %s edges in the original graph' % removed_cnt)
modified_adj = adj_triu + adj_triu.transpose()
return modified_adj
def _drop_dissimilar_edges(self, features, adj):
"""Drop dissimilar edges. (Slower version)
"""
if not sp.issparse(adj):
adj = sp.csr_matrix(adj)
modified_adj = adj.copy().tolil()
# preprocessing based on features
print('=== GCN-Jaccrad ===')
edges = np.array(modified_adj.nonzero()).T
removed_cnt = 0
for edge in tqdm(edges):
n1 = edge[0]
n2 = edge[1]
if n1 > n2:
continue
if self.euclidean:
J = self._prune_similarity(features[n1], features[n2])
if J < self.prune_thd:
modified_adj[n1, n2] = 0
modified_adj[n2, n1] = 0
removed_cnt += 1
else:
# For not binary feature, use cosine similarity
C = self._cosine_similarity(features[n1], features[n2])
if C < self.prune_thd:
modified_adj[n1, n2] = 0
modified_adj[n2, n1] = 0
removed_cnt += 1
print('removed %s edges in the original graph' % removed_cnt)
return modified_adj
# New Updation
def feature_scores(self):
"""
Compute feature scores for all possible feature changes.
"""
if self.cooc_constraint is None:
self.compute_cooccurrence_constraint(self.influencer_nodes)
logits = self.compute_logits()
best_wrong_class = self.strongest_wrong_class(logits)
gradient = self.gradient_wrt_x(self.label_u) - self.gradient_wrt_x(best_wrong_class)
surrogate_loss = logits[self.label_u] - logits[best_wrong_class]
gradients_flipped = (gradient * -1).tolil()
gradients_flipped[self.X_obs.nonzero()] *= -1
X_influencers = sp.lil_matrix(self.X_obs.shape)
X_influencers[self.influencer_nodes] = self.X_obs[self.influencer_nodes]
gradients_flipped = gradients_flipped.multiply((self.cooc_constraint + X_influencers) > 0)
nnz_ixs = np.array(gradients_flipped.nonzero()).T
sorting = np.argsort(gradients_flipped[tuple(nnz_ixs.T)]).A1
sorted_ixs = nnz_ixs[sorting]
grads = gradients_flipped[tuple(nnz_ixs[sorting].T)]
scores = surrogate_loss - grads
return sorted_ixs[::-1], scores.A1[::-1]
def _prune_similarity(self, a, b):
intersection = a.multiply(b).count_nonzero()
J = intersection * 1.0 / (a.count_nonzero() + b.count_nonzero() - intersection)
return J
def _cosine_similarity(self, a, b):
inner_product = (features[n1] * features[n2]).sum()
C = inner_product / np.sqrt(np.square(a).sum() + np.square(b).sum())
return C
def dropedge_prune(A, iA, jA, features, threshold):
removed_cnt = 0
for row in range(len(iA)-1):
for i in range(iA[row], iA[row+1]):
# print(row, jA[i], A[i])
n1 = row
n2 = jA[i]
a, b = features[n1], features[n2]
intersection = np.count_nonzero(np.multiply(a, b))
# intersection = a.multiply(b).count_nonzero()
J = intersection * 1.0 / (np.count_nonzero(a) + np.count_nonzero(b) - intersection)
# J = intersection * 1.0 / (a.count_nonzero() + b.count_nonzero() - intersection)
if J < threshold:
A[i] = 0
# A[n2, n1] = 0
removed_cnt += 1
return removed_cnt
@njit
def dropedge_cosine(A, iA, jA, features, threshold):
removed_cnt = 0
for row in range(len(iA)-1):
for i in range(iA[row], iA[row+1]):
# print(row, jA[i], A[i])
n1 = row
n2 = jA[i]
a, b = features[n1], features[n2]
inner_product = (a * b).sum()
C = inner_product / (np.sqrt(np.square(a).sum() + np.square(b).sum())+ 1e-6)
if C < threshold:
A[i] = 0
# A[n2, n1] = 0
removed_cnt += 1
return removed_cnt
@njit
def dropedge_dis(A, iA, jA, features, threshold):
removed_cnt = 0
for row in range(len(iA)-1):
for i in range(iA[row], iA[row+1]):
# print(row, jA[i], A[i])
n1 = row
n2 = jA[i]
C = np.linalg.norm(features[n1] - features[n2])
if C > threshold:
A[i] = 0
# A[n2, n1] = 0
removed_cnt += 1
return removed_cnt
@njit
def dropedge_both(A, iA, jA, features, threshold1=2.5, threshold2=0.01):
removed_cnt = 0
for row in range(len(iA)-1):
for i in range(iA[row], iA[row+1]):
# print(row, jA[i], A[i])
n1 = row
n2 = jA[i]
C1 = np.linalg.norm(features[n1] - features[n2])
a, b = features[n1], features[n2]
inner_product = (a * b).sum()
C2 = inner_product / (np.sqrt(np.square(a).sum() + np.square(b).sum())+ 1e-6)
if C1 > threshold1 or threshold2 < 0:
A[i] = 0
# A[n2, n1] = 0
removed_cnt += 1
return removed_cnt
| 37.261417 | 131 | 0.571489 | import torch.nn as nn
import torch.nn.functional as F
import math
import torch
import torch.optim as optim
from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module
from deeprobust.graph import utils
from copy import deepcopy
import sys
from scipy import stats
import tensorly as tl
tl.set_backend('pytorch')
from tensorly.decomposition import parafac, tucker, tensor_train, matrix_product_state
import numpy as np
import scipy.sparse as sp
from numba import njit
class GraphConvolution(Module):
def __init__(self, in_features, out_features, with_bias=True):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if with_bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input, adj):
if input.data.is_sparse:
support = torch.spmm(input, self.weight)
else:
support = torch.mm(input, self.weight)
output = torch.spmm(adj, support)
if self.bias is not None:
return output + self.bias, support
else:
return output, support
def __repr__(self):
return self.__class__.__name__ + ' (' \
+ str(self.in_features) + ' -> ' \
+ str(self.out_features) + ')'
class TGNN(nn.Module):
def __init__(self, nfeat, nhid, nclass,
dropout=0.5, lr=0.01, weight_decay=5e-4,
with_relu=True, with_bias=True,
format='Tucker', rank=32, pros='knn', euclidean=True,
svd_rank=200, prune_thd=0.01,
lambda_t = 1e-4, weight_decay_t=1e-5, topk=32,
device=None):
super(TGNN, self).__init__()
assert device is not None, "Please specify 'device'!"
self.device = device
self.nfeat = nfeat
self.hidden_sizes = [nhid]
self.nclass = nclass
self.gc1 = GraphConvolution(nfeat, nhid, with_bias=with_bias).to(device)
self.gc2 = GraphConvolution(nhid, nclass, with_bias=with_bias).to(device)
self.dropout = dropout
self.lr = lr
if not with_relu:
self.weight_decay = 0
else:
self.weight_decay = weight_decay
self.with_relu = with_relu
self.with_bias = with_bias
self.output = None
self.best_model = None
self.best_output = None
self.best_A = None
self.adj_norm = None
self.features = None
self.format = format
self.rank = rank
self.lamda_t = lambda_t
self.weight_decay_t = weight_decay_t
self.topk = topk
self.pros = pros.split(',')
self.euclidean = euclidean
self.svd_rank = svd_rank
self.prune_thd = prune_thd
self.acc_lst = []
def initialize(self):
self.gc1.reset_parameters()
self.gc2.reset_parameters()
def truncatedSVD(self, data, k=50):
print('=== GCN-SVD: rank={} ==='.format(k))
if sp.issparse(data):
data = data.asfptype()
U, S, V = sp.linalg.svds(data, k=k)
print("rank_after = {}".format(len(S.nonzero()[0])))
diag_S = np.diag(S)
else:
U, S, V = np.linalg.svd(data)
U = U[:, :k]
S = S[:k]
V = V[:k, :]
print("rank_before = {}".format(len(S.nonzero()[0])))
diag_S = np.diag(S)
print("rank_after = {}".format(len(diag_S.nonzero()[0])))
return U @ diag_S @ V
def fit(self, features, adj, labels, idx_train, idx_val, train_iters=200,
initialize=True, verbose=False, normalize=True, patience=500, **kwargs):
self.device = self.gc1.weight.device
if initialize:
self.initialize()
if 'svd' in self.pros:
self.svd_adj = (adj+torch.eye(adj.size(0)).to(self.device)).cpu().numpy()
self.svd_adj = self.truncatedSVD(data=self.svd_adj, k=self.svd_rank)
self.svd_adj = torch.FloatTensor(self.svd_adj).to(self.device)
self.svd_adj = torch.clamp(self.svd_adj, 0, 1)
self.svd_adj = self.svd_adj.unsqueeze(0)
if 'prune' in self.pros:
self.prune_adj = (adj+torch.eye(adj.size(0)).to(self.device)).cpu().numpy()
self.prune_adj = self.drop_dissimilar_edges(features=features.cpu().numpy(),
adj=self.prune_adj)
self.prune_adj = torch.FloatTensor(self.prune_adj.todense()).to(self.device)
self.prune_adj = self.prune_adj.unsqueeze(0)
if type(adj) is not torch.Tensor:
features, adj, labels = utils.to_tensor(features, adj, labels, device=self.device)
else:
features = features.to(self.device)
adj = adj.to(self.device)
labels = labels.to(self.device)
self.adj_org = (adj + torch.eye(adj.size(0)).to(self.device))
if normalize:
if utils.is_sparse_tensor(adj):
adj_norm = utils.normalize_adj_tensor(adj, sparse=True)
else:
adj_norm = utils.normalize_adj_tensor(adj)
else:
adj_norm = adj
self.adj_norm = adj_norm
self.features = features
self.labels = labels
self._train_with_val(labels, idx_train, idx_val, train_iters, verbose)
def forward(self, x, adj, A_bar=None, need_feats=False):
x0 = x
feats = [x0]
adj1 = adj if A_bar is None else self.normalize(A_bar[0, :, :])
x, support1 = self.gc1(x, adj1)
if self.with_relu:
x = F.relu(x)
x = F.dropout(x, self.dropout, training=self.training)
x1 = x
feats.append(x1)
adj2 = adj if A_bar is None else self.normalize(A_bar[0, :, :])
x, support2 = self.gc2(x, adj2)
x2 = x
feats.append(x2)
if need_feats:
return F.log_softmax(x, dim=1), feats
return F.log_softmax(x, dim=1)
def _norm_feat(self, X, p='l2'):
if p == None:
return X
if p == 'l1':
sum = 1 / (X.norm(p=1, dim=1, keepdim=True) + 1e-9).detach()
sum[torch.isinf(sum)] = 0.
X = X * sum
return X
if p == 'l2':
sum = 1 / (X.norm(p=2, dim=1, keepdim=True) + 1e-9).detach()
sum[torch.isinf(sum)] = 0.
X = X * sum
return X
return None
def normalize(self, adj):
adj = torch.clamp(adj, 0, 1)
normalized_adj = self._normalize(adj + torch.eye(adj.shape[0]).to(self.device))
return normalized_adj
def _normalize(self, mx):
mx = torch.clamp(mx, 0, 1)
rowsum = torch.abs(mx).sum(1)
r_inv = rowsum.pow(-1/2).flatten()
r_inv[torch.isinf(r_inv)] = 0.
r_mat_inv = torch.diag(r_inv).detach()
mx = r_mat_inv @ mx
mx = mx @ r_mat_inv
return mx
def _knn(self, X, k=None):
k = k if k is not None else self.topk
topk = X.topk(k, dim=1)[1]
ret = torch.zeros_like(X)
for i in range(ret.size(0)):
ret[i, topk[i]] = 1
return ret
def _get_knn(self, X, k=None):
if self.is_binary:
intersection = torch.mm(X, X.t())
union = X.size(1) - torch.mm((1 - X), (1 - X).t())
smooth = 1
S = (intersection + smooth) / (union + smooth)
else:
X = self._norm_feat(X, p='l2')
S = -torch.cdist(X, X)
Z = self._knn(S, k=k).clone().detach().unsqueeze(0)
return Z
def _init_td(self):
A = torch.stack([self.adj_org for _ in range(1)], dim=0)
self.OA = A
_, X = self.forward(self.features, self.adj_norm, A_bar=A, need_feats=True)
self.T = [A]
self.is_binary = [1]
self.reg_name = ['ADJ']
if 'knn' in self.pros:
Z = self._get_knn(X[0])
self.Z = Z
self.T.append(self.Z)
self.is_binary.append(1)
self.reg_name.append('KNN%d'%self.topk)
if 'svd' in self.pros:
self.T.append(self.svd_adj)
self.is_binary.append(0)
self.reg_name.append('SVD%d'%self.svd_rank)
if 'prune' in self.pros:
self.T.append(self.prune_adj)
self.is_binary.append(1)
self.reg_name.append('PRUNE%.6f'%self.prune_thd)
self.T = torch.cat(self.T, dim=0)
T = self.T
self.A = T[:1, :, :]
if self.format == 'CP':
weights, factors = parafac(T.transpose(1, 0), self.rank, init='random', normalize_factors=True)
self.register_parameter('f0', Parameter(torch.zeros_like(factors[0]).to(self.device), requires_grad=True))
self.register_parameter('f1', Parameter(torch.zeros_like(factors[1]).to(self.device), requires_grad=True))
self.register_parameter('f2', Parameter(torch.zeros_like(factors[2]).to(self.device), requires_grad=True))
self.register_parameter('of0', Parameter((factors[0]).to(self.device), requires_grad=False))
self.register_parameter('of1', Parameter((factors[1]).to(self.device), requires_grad=False))
self.register_parameter('of2', Parameter((factors[2]).to(self.device), requires_grad=False))
self.register_parameter('weights', Parameter(torch.zeros_like(weights).to(self.device), requires_grad=True))
self.register_parameter('oweights', Parameter((weights).to(self.device), requires_grad=False))
elif self.format == 'Tucker':
core, factors = tucker(T, rank=self.rank, init='random')
self.register_parameter('f0', Parameter(torch.zeros_like(factors[0]).to(self.device), requires_grad=True))
self.register_parameter('f1', Parameter(torch.zeros_like(factors[1]).to(self.device), requires_grad=True))
self.register_parameter('f2', Parameter(torch.zeros_like(factors[2]).to(self.device), requires_grad=True))
self.register_parameter('of0', Parameter((factors[0]).to(self.device), requires_grad=False))
self.register_parameter('of1', Parameter((factors[1]).to(self.device), requires_grad=False))
self.register_parameter('of2', Parameter((factors[2]).to(self.device), requires_grad=False))
self.register_parameter('core',Parameter(torch.zeros_like(core).to(self.device), requires_grad=True))
self.register_parameter('ocore', Parameter((core).to(self.device), requires_grad=False))
elif self.format == 'TT':
factors = matrix_product_state(T, rank=[1, self.rank, self.rank, 1])
print([_.size() for _ in factors])
self.register_parameter('f0', Parameter(torch.zeros_like(factors[0]).to(self.device), requires_grad=True))
self.register_parameter('f1', Parameter(torch.zeros_like(factors[1]).to(self.device), requires_grad=True))
self.register_parameter('f2', Parameter(torch.zeros_like(factors[2]).to(self.device), requires_grad=True))
self.register_parameter('of0', Parameter((factors[0]).to(self.device), requires_grad=False))
self.register_parameter('of1', Parameter((factors[1]).to(self.device), requires_grad=False))
self.register_parameter('of2', Parameter((factors[2]).to(self.device), requires_grad=False))
def forward_T(self):
if self.format == 'CP':
factors = [self.f0 + self.of0, self.f1 + self.of1, self.f2 + self.of2]
core = self.weights + self.oweights
T_bar = tl.cp_to_tensor((core, factors))
T_bar = T_bar.transpose(1, 0)
elif self.format == 'Tucker':
factors = [self.f0 + self.of0, self.f1 + self.of1, self.f2 + self.of2]
core = self.core + self.ocore
T_bar = tl.tucker_to_tensor((core, factors))
elif self.format == 'TT':
factors = [self.f0 + self.of0, self.f1 + self.of1, self.f2 + self.of2]
T_bar = tl.tt_to_tensor(factors)
self.T_bar = T_bar
self.A_bar = T_bar[:1]
def rec_loss(self, X, Y):
X = torch.clamp(X, 0, 1)
return ((X - Y) ** 2).mean()
def _gcn_step(self, i, optimizer_G, optimizer_T, idx_train, labels):
self.train()
optimizer_G.zero_grad()
if optimizer_T is not None:
optimizer_T.zero_grad()
self.forward_T()
output, X = self.forward(self.features, self.adj_norm, A_bar=self.A_bar, need_feats=True)
loss_cls = F.nll_loss(output[idx_train], labels[idx_train])
loss_reg, loss_adj = 0, 0
if optimizer_T is not None:
for r in range(self.T.size(0)):
S0_normed = torch.clamp(self.T_bar[r], 0, 1)
S0_normed_t = torch.clamp(self.T[r].detach(), 0, 1)
loss = self.rec_loss(S0_normed, S0_normed_t)
if r == 0:
loss_adj = loss_adj + self.lamda_t * loss
else:
loss_reg = loss_reg + self.lamda_t * loss
sys.stdout.flush()
loss_train = loss_cls + (loss_reg * self.lamda_t + loss_adj * self.lamda_t)
loss_train.backward()
optimizer_G.step()
if optimizer_T is not None:
optimizer_T.step()
def _train_with_val(self, labels, idx_train, idx_val, train_iters, verbose):
if verbose:
print('=== Initialization ===')
self.eval()
self._init_td()
self.forward_T()
if verbose:
print('=== pre-training tensor model ===')
g_parameters = []
t_parameters = []
for name, param in self.named_parameters():
if name[:2] == 'gc':
g_parameters.append(param)
else:
t_parameters.append(param)
optimizer_T = optim.Adam(t_parameters, lr=self.lr, weight_decay=self.weight_decay_t) if len(t_parameters) > 0 else None
optimizer_G = optim.Adam(g_parameters, lr=self.lr, weight_decay=self.weight_decay)
if verbose:
print('=== training gcn model ===')
best_loss_val = 1e10
best_acc_val = 0
for i in range(train_iters):
self._gcn_step(i, optimizer_G, optimizer_T, idx_train, labels)
self.eval()
self.forward_T()
output = self.forward(self.features, self.adj_norm, self.A_bar)
loss_val = F.nll_loss(output[idx_val], labels[idx_val])
acc_val = utils.accuracy(output[idx_val], labels[idx_val])
loss_cls = F.nll_loss(output[idx_train], labels[idx_train])
acc_train = utils.accuracy(output[idx_train], labels[idx_train])
if best_loss_val > loss_val:
best_loss_val = loss_val
self.output = output
self.best_A = self.A_bar.clone().detach()
weights = deepcopy(self.state_dict())
if acc_val > best_acc_val:
best_acc_val = acc_val
self.output = output
self.best_A = self.A_bar.clone().detach()
weights = deepcopy(self.state_dict())
print('Epoch: {:04d}'.format(i + 1),
'loss_val: {:.4f}'.format(loss_val.item()),
'loss_train: {:.4f}'.format(loss_cls.item()),
'acc_val: {:.4f}'.format(acc_val.item()),
'acc_train: {:.4f}'.format(acc_train.item()))
if verbose:
print('=== picking the best model according to the performance on validation ===')
self.load_state_dict(weights)
def test(self, idx_test):
self.eval()
output = self.predict()
loss_test = F.nll_loss(output[idx_test], self.labels[idx_test])
acc_test = utils.accuracy(output[idx_test], self.labels[idx_test])
print("Test set results:",
"loss= {:.4f}".format(loss_test.item()),
"accuracy= {:.4f}".format(acc_test.item()))
return acc_test
def predict(self):
if(scores.A1 ==1):
self.eval()
else:
print("features not considered as they're unnoticeable")
return self.forward(self.features, self.adj_norm, A_bar=self.best_A)
def drop_dissimilar_edges(self, features, adj, metric='similarity'):
if not sp.issparse(adj):
adj = sp.csr_matrix(adj)
adj_triu = sp.triu(adj, format='csr')
if metric == 'distance':
removed_cnt = dropedge_dis(adj_triu.data, adj_triu.indptr, adj_triu.indices, features, threshold=self.prune_thd)
else:
if self.euclidean:
removed_cnt = dropedge_prune(adj_triu.data, adj_triu.indptr, adj_triu.indices, features, threshold=self.prune_thd)
else:
removed_cnt = dropedge_cosine(adj_triu.data, adj_triu.indptr, adj_triu.indices, features, threshold=self.prune_thd)
print('removed %s edges in the original graph' % removed_cnt)
modified_adj = adj_triu + adj_triu.transpose()
return modified_adj
def _drop_dissimilar_edges(self, features, adj):
if not sp.issparse(adj):
adj = sp.csr_matrix(adj)
modified_adj = adj.copy().tolil()
# preprocessing based on features
print('=== GCN-Jaccrad ===')
edges = np.array(modified_adj.nonzero()).T
removed_cnt = 0
for edge in tqdm(edges):
n1 = edge[0]
n2 = edge[1]
if n1 > n2:
continue
if self.euclidean:
J = self._prune_similarity(features[n1], features[n2])
if J < self.prune_thd:
modified_adj[n1, n2] = 0
modified_adj[n2, n1] = 0
removed_cnt += 1
else:
# For not binary feature, use cosine similarity
C = self._cosine_similarity(features[n1], features[n2])
if C < self.prune_thd:
modified_adj[n1, n2] = 0
modified_adj[n2, n1] = 0
removed_cnt += 1
print('removed %s edges in the original graph' % removed_cnt)
return modified_adj
# New Updation
def feature_scores(self):
if self.cooc_constraint is None:
self.compute_cooccurrence_constraint(self.influencer_nodes)
logits = self.compute_logits()
best_wrong_class = self.strongest_wrong_class(logits)
gradient = self.gradient_wrt_x(self.label_u) - self.gradient_wrt_x(best_wrong_class)
surrogate_loss = logits[self.label_u] - logits[best_wrong_class]
gradients_flipped = (gradient * -1).tolil()
gradients_flipped[self.X_obs.nonzero()] *= -1
X_influencers = sp.lil_matrix(self.X_obs.shape)
X_influencers[self.influencer_nodes] = self.X_obs[self.influencer_nodes]
gradients_flipped = gradients_flipped.multiply((self.cooc_constraint + X_influencers) > 0)
nnz_ixs = np.array(gradients_flipped.nonzero()).T
sorting = np.argsort(gradients_flipped[tuple(nnz_ixs.T)]).A1
sorted_ixs = nnz_ixs[sorting]
grads = gradients_flipped[tuple(nnz_ixs[sorting].T)]
scores = surrogate_loss - grads
return sorted_ixs[::-1], scores.A1[::-1]
def _prune_similarity(self, a, b):
intersection = a.multiply(b).count_nonzero()
J = intersection * 1.0 / (a.count_nonzero() + b.count_nonzero() - intersection)
return J
def _cosine_similarity(self, a, b):
inner_product = (features[n1] * features[n2]).sum()
C = inner_product / np.sqrt(np.square(a).sum() + np.square(b).sum())
return C
def dropedge_prune(A, iA, jA, features, threshold):
removed_cnt = 0
for row in range(len(iA)-1):
for i in range(iA[row], iA[row+1]):
# print(row, jA[i], A[i])
n1 = row
n2 = jA[i]
a, b = features[n1], features[n2]
intersection = np.count_nonzero(np.multiply(a, b))
# intersection = a.multiply(b).count_nonzero()
J = intersection * 1.0 / (np.count_nonzero(a) + np.count_nonzero(b) - intersection)
# J = intersection * 1.0 / (a.count_nonzero() + b.count_nonzero() - intersection)
if J < threshold:
A[i] = 0
# A[n2, n1] = 0
removed_cnt += 1
return removed_cnt
@njit
def dropedge_cosine(A, iA, jA, features, threshold):
removed_cnt = 0
for row in range(len(iA)-1):
for i in range(iA[row], iA[row+1]):
# print(row, jA[i], A[i])
n1 = row
n2 = jA[i]
a, b = features[n1], features[n2]
inner_product = (a * b).sum()
C = inner_product / (np.sqrt(np.square(a).sum() + np.square(b).sum())+ 1e-6)
if C < threshold:
A[i] = 0
# A[n2, n1] = 0
removed_cnt += 1
return removed_cnt
@njit
def dropedge_dis(A, iA, jA, features, threshold):
removed_cnt = 0
for row in range(len(iA)-1):
for i in range(iA[row], iA[row+1]):
# print(row, jA[i], A[i])
n1 = row
n2 = jA[i]
C = np.linalg.norm(features[n1] - features[n2])
if C > threshold:
A[i] = 0
# A[n2, n1] = 0
removed_cnt += 1
return removed_cnt
@njit
def dropedge_both(A, iA, jA, features, threshold1=2.5, threshold2=0.01):
removed_cnt = 0
for row in range(len(iA)-1):
for i in range(iA[row], iA[row+1]):
# print(row, jA[i], A[i])
n1 = row
n2 = jA[i]
C1 = np.linalg.norm(features[n1] - features[n2])
a, b = features[n1], features[n2]
inner_product = (a * b).sum()
C2 = inner_product / (np.sqrt(np.square(a).sum() + np.square(b).sum())+ 1e-6)
if C1 > threshold1 or threshold2 < 0:
A[i] = 0
# A[n2, n1] = 0
removed_cnt += 1
return removed_cnt
| true | true |
f7f568821e38bfb8baf20e3d3953ebea040ae2d4 | 3,346 | py | Python | attack.py | LiYingwei/Regional-Homogeneity | 6b0b521ff6e9d1f4c3f25cb25518968047b5cca0 | [
"MIT"
] | 44 | 2019-04-02T01:56:43.000Z | 2022-03-29T08:48:22.000Z | attack.py | Sunshine352/Regional-Homogeneity | 6b0b521ff6e9d1f4c3f25cb25518968047b5cca0 | [
"MIT"
] | 1 | 2020-02-05T03:43:05.000Z | 2020-03-17T20:06:59.000Z | attack.py | Sunshine352/Regional-Homogeneity | 6b0b521ff6e9d1f4c3f25cb25518968047b5cca0 | [
"MIT"
] | 10 | 2019-04-02T08:39:34.000Z | 2021-04-10T13:56:16.000Z | from config import config as FLAGS
import tensorflow as tf
from tensorpack import (BatchData)
from tqdm import tqdm
import numpy as np
from RHP_ops import conv_with_rn
from data import PNGDataFlow, save_images
from networks import network
from tensorpack.tfutils.tower import TowerContext
class Attacker:
def __init__(self, sess):
self.sess = sess
self.step_size = FLAGS.step_size / 255.0
self.max_epsilon = FLAGS.max_epsilon / 255.0
# Prepare graph
batch_shape = [FLAGS.batch_size, 299, 299, 3]
self.x_input = tf.placeholder(tf.float32, shape=batch_shape)
x_max = tf.clip_by_value(self.x_input + self.max_epsilon, 0., 1.0)
x_min = tf.clip_by_value(self.x_input - self.max_epsilon, 0., 1.0)
self.y_input = tf.placeholder(tf.int64, shape=batch_shape[0])
i = tf.constant(0)
self.x_adv, _, _, _, _ = tf.while_loop(self.stop, self.graph,
[self.x_input, self.y_input, i, x_max, x_min])
self.restore()
def graph(self, x, y, i, x_max, x_min):
with TowerContext("model_tower", is_training=False):
logits, _, endpoints = network.model(x, FLAGS.attack_networks[0])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)
noise = tf.gradients(loss, x)[0] if not FLAGS.universal else tf.zeros_like(x)
with TowerContext('RHP_tower', is_training=False):
with tf.variable_scope('RHP'):
noise = conv_with_rn(noise)
noise = noise / (tf.reduce_mean(tf.abs(noise), [1, 2, 3], keepdims=True) + 1e-12)
x = x + self.step_size * tf.sign(noise)
x = tf.clip_by_value(x, x_min, x_max)
i = tf.add(i, 1)
return x, y, i, x_max, x_min
@staticmethod
def stop(x, y, i, x_max, x_min):
return tf.less(i, FLAGS.num_steps)
def perturb(self, images, labels):
batch_size = images.shape[0]
if batch_size < FLAGS.batch_size:
pad_num = FLAGS.batch_size - batch_size
pad_img = np.zeros([pad_num, 299, 299, 3])
images = np.concatenate([images, pad_img])
pad_label = np.zeros([pad_num])
labels = np.concatenate([labels, pad_label])
adv_images = sess.run(self.x_adv, feed_dict={self.x_input: images, self.y_input: labels})
return adv_images[:batch_size]
def restore(self):
network.restore(self.sess, FLAGS.attack_networks[0])
RHP_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='RHP')
RHP_variables_saver = tf.train.Saver(RHP_variables)
ckpt_filename = tf.train.latest_checkpoint(FLAGS.RHP_savepath)
RHP_variables_saver.restore(sess, ckpt_filename)
if __name__ == '__main__':
sess = tf.Session()
model = Attacker(sess)
df = PNGDataFlow(FLAGS.img_dir, FLAGS.test_list_filename, FLAGS.ground_truth_file,
result_dir=None, img_num=FLAGS.img_num)
df = BatchData(df, FLAGS.batch_size, remainder=True)
df.reset_state()
total_batch = int((df.ds.img_num - 1) / FLAGS.batch_size) + 1
for batch_index, (x_batch, y_batch, name_batch) in tqdm(enumerate(df), total=total_batch):
advs = model.perturb(x_batch, y_batch)
save_images(advs, name_batch, FLAGS.result_dir)
| 41.308642 | 97 | 0.652122 | from config import config as FLAGS
import tensorflow as tf
from tensorpack import (BatchData)
from tqdm import tqdm
import numpy as np
from RHP_ops import conv_with_rn
from data import PNGDataFlow, save_images
from networks import network
from tensorpack.tfutils.tower import TowerContext
class Attacker:
def __init__(self, sess):
self.sess = sess
self.step_size = FLAGS.step_size / 255.0
self.max_epsilon = FLAGS.max_epsilon / 255.0
batch_shape = [FLAGS.batch_size, 299, 299, 3]
self.x_input = tf.placeholder(tf.float32, shape=batch_shape)
x_max = tf.clip_by_value(self.x_input + self.max_epsilon, 0., 1.0)
x_min = tf.clip_by_value(self.x_input - self.max_epsilon, 0., 1.0)
self.y_input = tf.placeholder(tf.int64, shape=batch_shape[0])
i = tf.constant(0)
self.x_adv, _, _, _, _ = tf.while_loop(self.stop, self.graph,
[self.x_input, self.y_input, i, x_max, x_min])
self.restore()
def graph(self, x, y, i, x_max, x_min):
with TowerContext("model_tower", is_training=False):
logits, _, endpoints = network.model(x, FLAGS.attack_networks[0])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y)
noise = tf.gradients(loss, x)[0] if not FLAGS.universal else tf.zeros_like(x)
with TowerContext('RHP_tower', is_training=False):
with tf.variable_scope('RHP'):
noise = conv_with_rn(noise)
noise = noise / (tf.reduce_mean(tf.abs(noise), [1, 2, 3], keepdims=True) + 1e-12)
x = x + self.step_size * tf.sign(noise)
x = tf.clip_by_value(x, x_min, x_max)
i = tf.add(i, 1)
return x, y, i, x_max, x_min
@staticmethod
def stop(x, y, i, x_max, x_min):
return tf.less(i, FLAGS.num_steps)
def perturb(self, images, labels):
batch_size = images.shape[0]
if batch_size < FLAGS.batch_size:
pad_num = FLAGS.batch_size - batch_size
pad_img = np.zeros([pad_num, 299, 299, 3])
images = np.concatenate([images, pad_img])
pad_label = np.zeros([pad_num])
labels = np.concatenate([labels, pad_label])
adv_images = sess.run(self.x_adv, feed_dict={self.x_input: images, self.y_input: labels})
return adv_images[:batch_size]
def restore(self):
network.restore(self.sess, FLAGS.attack_networks[0])
RHP_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='RHP')
RHP_variables_saver = tf.train.Saver(RHP_variables)
ckpt_filename = tf.train.latest_checkpoint(FLAGS.RHP_savepath)
RHP_variables_saver.restore(sess, ckpt_filename)
if __name__ == '__main__':
sess = tf.Session()
model = Attacker(sess)
df = PNGDataFlow(FLAGS.img_dir, FLAGS.test_list_filename, FLAGS.ground_truth_file,
result_dir=None, img_num=FLAGS.img_num)
df = BatchData(df, FLAGS.batch_size, remainder=True)
df.reset_state()
total_batch = int((df.ds.img_num - 1) / FLAGS.batch_size) + 1
for batch_index, (x_batch, y_batch, name_batch) in tqdm(enumerate(df), total=total_batch):
advs = model.perturb(x_batch, y_batch)
save_images(advs, name_batch, FLAGS.result_dir)
| true | true |
f7f56930f7f0bd97fe1f8c96b85ebd9a317a525e | 15,122 | py | Python | neo3/storage/implementations/leveldb.py | CityOfZion/neo3-python | db4cd53041ce7a2c1d32bee18214fb31bb9f52be | [
"MIT"
] | null | null | null | neo3/storage/implementations/leveldb.py | CityOfZion/neo3-python | db4cd53041ce7a2c1d32bee18214fb31bb9f52be | [
"MIT"
] | 23 | 2020-05-12T15:01:12.000Z | 2020-07-15T09:26:05.000Z | neo3/storage/implementations/leveldb.py | CityOfZion/neo3-python | db4cd53041ce7a2c1d32bee18214fb31bb9f52be | [
"MIT"
] | 1 | 2020-07-01T09:32:46.000Z | 2020-07-01T09:32:46.000Z | from __future__ import annotations
from neo3 import storage
from neo3 import storage_logger as logger
from neo3.core import types, serialization
from neo3.network import payloads
from contextlib import suppress
from copy import deepcopy
level_db_supported = False
with suppress(ModuleNotFoundError):
import plyvel # type: ignore
level_db_supported = True
class DBPrefixes:
BLOCKS = b'\x01'
BLOCKS_HEIGHT_MAP = b'\x02'
BLOCKS_BEST_HEIGHT = b'\x03'
CONTRACTS = b'\x04'
STORAGES = b'\x05'
TRANSACTIONS = b'\x06'
class LevelDB(storage.IDBImplementation):
def __init__(self, options: dict):
if not level_db_supported:
raise ModuleNotFoundError("plyvel module not found - try 'pip install plyvel'. "
"Also make sure to have leveldb installed.")
try:
self._path = options['path']
self._real_db = plyvel.DB(self._path, create_if_missing=True, max_open_files=100,
lru_cache_size=10 * 1024 * 1024)
self._tx_iterator = None
self._block_iterator = None
self._contract_iterator = None
logger.info(f"Created DB at {self._path}")
except Exception as e:
raise Exception(f"leveldb exception [ {e} ]")
def get_snapshotview(self) -> LevelDBSnapshot:
return LevelDBSnapshot(self)
def _internal_bestblockheight_get(self):
height_bytes = self._real_db.get(DBPrefixes.BLOCKS_BEST_HEIGHT)
if height_bytes is None:
raise KeyError
return int.from_bytes(height_bytes, 'little')
def _internal_bestblockheight_put(self, height: int, batch=None):
if batch:
db = batch
else:
db = self._real_db
db.put(DBPrefixes.BLOCKS_BEST_HEIGHT, height.to_bytes(4, 'little'))
def _internal_bestblockheight_update(self, height: int, batch=None):
self._internal_bestblockheight_put(height, batch)
def _internal_block_put(self, block: payloads.Block, batch=None):
if batch:
db = batch
else:
db = self._real_db
block_height_bytes = block.index.to_bytes(4, 'little')
db.put(DBPrefixes.BLOCKS + block.hash().to_array(), block.to_array())
db.put(DBPrefixes.BLOCKS_HEIGHT_MAP + block_height_bytes, block.hash().to_array())
# this function is only called when putting blocks to the backend via the raw view, or when committing
# a snapshot view. Either way it is ok to persist the height
stored_value = -1
with suppress(KeyError):
stored_value = self._internal_bestblockheight_get()
if block.index > stored_value:
db.put(DBPrefixes.BLOCKS_BEST_HEIGHT, block_height_bytes)
def _internal_block_update(self, block: payloads.Block, batch=None):
self._internal_block_put(block, batch)
def _internal_block_delete(self, hash: types.UInt256, batch=None):
if batch:
db = batch
else:
db = self._real_db
block_hash_bytes = hash.to_array()
block_bytes = self._real_db.get(DBPrefixes.BLOCKS + block_hash_bytes)
if block_bytes is not None:
# Instead of full block deserialization, which includes merkletree calculation and such, we manually extract
# the 4 bytes block.index from the stream.
start_idx = 4 + 32 + 32 + 8
block_height_bytes = block_bytes[start_idx:start_idx + 4]
db.delete(DBPrefixes.BLOCKS + block_hash_bytes)
db.delete(DBPrefixes.BLOCKS_HEIGHT_MAP + block_height_bytes)
def _internal_block_get(self, hash: types.UInt256):
block_bytes = self._real_db.get(DBPrefixes.BLOCKS + hash.to_array())
if block_bytes is None:
raise KeyError
return payloads.Block.deserialize_from_bytes(block_bytes)
def _internal_block_get_by_height(self, height: int):
block_hash_bytes = self._real_db.get(DBPrefixes.BLOCKS_HEIGHT_MAP + height.to_bytes(4, 'little'))
if block_hash_bytes is None:
raise KeyError
block_bytes = self._real_db.get(DBPrefixes.BLOCKS + block_hash_bytes)
if block_bytes is None:
# should not be reachable unless _internal_block_put/delete() are messed up.
raise KeyError
return payloads.Block.deserialize_from_bytes(block_bytes)
def _internal_block_all(self):
res = []
with self._real_db.iterator(prefix=DBPrefixes.BLOCKS, include_key=False, include_value=True) as it:
for value in it:
v = payloads.Block.deserialize_from_bytes(value)
res.append(v)
# yielding outside of iterator to make sure the LevelDB iterator is closed and not leaking resources
for block in res:
yield deepcopy(block)
def _internal_contract_put(self, contract: storage.ContractState, batch=None):
if batch:
db = batch
else:
db = self._real_db
db.put(DBPrefixes.CONTRACTS + contract.script_hash().to_array(), contract.to_array())
def _internal_contract_update(self, contract: storage.ContractState, batch=None):
self._internal_contract_put(contract, batch)
def _internal_contract_delete(self, script_hash: types.UInt160, batch=None):
if batch:
db = batch
else:
db = self._real_db
db.delete(DBPrefixes.CONTRACTS + script_hash.to_array())
def _internal_contract_get(self, script_hash: types.UInt160):
contract_bytes = self._real_db.get(DBPrefixes.CONTRACTS + script_hash.to_array())
if contract_bytes is None:
raise KeyError
return storage.ContractState.deserialize_from_bytes(contract_bytes)
def _internal_contract_all(self):
res = []
with self._real_db.iterator(prefix=DBPrefixes.CONTRACTS, include_key=False, include_value=True) as it:
for value in it:
# strip off prefix
v = storage.ContractState.deserialize_from_bytes(value)
res.append(v)
# yielding outside of iterator to make sure the LevelDB iterator is closed and not leaking resources
for contract in res:
yield deepcopy(contract)
def _internal_storage_put(self, key: storage.StorageKey, value: storage.StorageItem, batch=None):
if batch:
db = batch
else:
db = self._real_db
db.put(DBPrefixes.STORAGES + key.to_array(), value.to_array())
def _internal_storage_update(self, key: storage.StorageKey, value: storage.StorageItem, batch=None):
self._internal_storage_put(key, value)
def _internal_storage_delete(self, key: storage.StorageKey, batch=None):
if batch:
db = batch
else:
db = self._real_db
db.delete(DBPrefixes.STORAGES + key.to_array())
def _internal_storage_get(self, key: storage.StorageKey):
storage_bytes = self._real_db.get(DBPrefixes.STORAGES + key.to_array())
if storage_bytes is None:
raise KeyError
return storage.StorageItem.deserialize_from_bytes(storage_bytes)
def _internal_storage_all(self, contract_script_hash: types.UInt160 = None):
prefix = DBPrefixes.STORAGES
if contract_script_hash is not None:
prefix = DBPrefixes.STORAGES + contract_script_hash.to_array()
res = {}
with self._real_db.iterator(prefix=prefix, include_key=True, include_value=True) as it:
for key, value in it:
# strip off prefix
k = storage.StorageKey.deserialize_from_bytes(key[1:])
v = storage.StorageItem.deserialize_from_bytes(value)
res[k] = v
# yielding outside of iterator to make sure the LevelDB iterator is closed and not leaking resources
for k, v in res.items():
yield deepcopy(k), deepcopy(v)
def _internal_storage_find(self, contract_script_hash: types.UInt160, key_prefix: bytes):
prefix = DBPrefixes.STORAGES + contract_script_hash.to_array() + key_prefix
res = {}
with self._real_db.iterator(prefix=prefix, include_key=True, include_value=True) as it:
for key, value in it:
# strip off prefix
k = storage.StorageKey.deserialize_from_bytes(key[1:])
v = storage.StorageItem.deserialize_from_bytes(value)
res[k] = v
# yielding outside of iterator to make sure the LevelDB iterator is closed and not leaking resources
for k, v in res.items():
yield k, v
def _internal_transaction_put(self, transaction: payloads.Transaction, batch=None):
if batch:
db = batch
else:
db = self._real_db
with serialization.BinaryWriter() as bw:
transaction.serialize_special(bw)
serialized_tx = bw.to_array()
db.put(DBPrefixes.TRANSACTIONS + transaction.hash().to_array(), serialized_tx)
def _internal_transaction_update(self, transaction, batch=None):
self._internal_transaction_put(transaction, batch)
def _internal_transaction_delete(self, hash: types.UInt256, batch=None):
if batch:
db = batch
else:
db = self._real_db
db.delete(DBPrefixes.TRANSACTIONS + hash.to_array())
def _internal_transaction_get(self, hash: types.UInt256):
tx_bytes = self._real_db.get(DBPrefixes.TRANSACTIONS + hash.to_array())
if tx_bytes is None:
# this is a must if not found!
raise KeyError
with serialization.BinaryReader(tx_bytes) as br:
tx = payloads.Transaction()
tx.deserialize_special(br)
return tx
def _internal_transaction_all(self):
res = []
with self._real_db.iterator(prefix=DBPrefixes.TRANSACTIONS, include_key=False, include_value=True) as it:
for value in it:
# strip off prefix
with serialization.BinaryReader(value) as br:
v = payloads.Transaction()
v.deserialize_special(br)
res.append(v)
# yielding outside of iterator to make sure the LevelDB iterator is closed and not leaking resources
for tx in res:
yield deepcopy(tx)
def close(self):
self._real_db.close()
class LevelDBSnapshot(storage.Snapshot):
def __init__(self, db: LevelDB):
super(LevelDBSnapshot, self).__init__()
self._db = db
self._snapshot = db._real_db.snapshot()
self._batch = db._real_db.write_batch()
self._block_cache = LevelDBCachedBlockAccess(db, self._batch)
self._contract_cache = LevelDBCachedContractAccess(db, self._batch)
self._storage_cache = LevelDBCachedStorageAccess(db, self._batch)
self._tx_cache = LevelDBCachedTXAccess(db, self._batch)
self._block_height_cache = LevelDBBestBlockHeightAttribute(db, self._batch)
def commit(self) -> None:
super(LevelDBSnapshot, self).commit()
self._batch.write()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._snapshot.close()
class LevelDBBestBlockHeightAttribute(storage.AttributeCache):
def __init__(self, db, batch):
super(LevelDBBestBlockHeightAttribute, self).__init__()
self._db = db
self._batch = batch
def _get_internal(self):
return self._db._internal_bestblockheight_get()
def _update_internal(self, value):
self._db._internal_bestblockheight_update(value, self._batch)
class LevelDBCachedBlockAccess(storage.CachedBlockAccess):
def __init__(self, db, batch):
super(LevelDBCachedBlockAccess, self).__init__(db)
self._batch = batch
def commit(self) -> None:
for trackable in self._dictionary.values(): # trackable.item: payloads.Block
if trackable.state == storage.TrackState.ADDED:
self._db._internal_block_put(trackable.item, self._batch)
elif trackable.state == storage.TrackState.CHANGED:
self._db._internal_block_update(trackable.item, self._batch)
elif trackable.state == storage.TrackState.DELETED:
self._db._internal_block_delete(trackable.item.hash(), self._batch)
def create_snapshot(self):
return storage.CloneBlockCache(self._db, self)
class LevelDBCachedContractAccess(storage.CachedContractAccess):
def __init__(self, db, batch):
super(LevelDBCachedContractAccess, self).__init__(db)
self._batch = batch
def commit(self) -> None:
for trackable in self._dictionary.values(): # trackable.item: storage.ContractState
if trackable.state == storage.TrackState.ADDED:
self._db._internal_contract_put(trackable.item, self._batch)
elif trackable.state == storage.TrackState.CHANGED:
self._db._internal_contract_update(trackable.item, self._batch)
elif trackable.state == storage.TrackState.DELETED:
self._db._internal_contract_delete(trackable.item.script_hash(), self._batch)
def create_snapshot(self):
return storage.CloneContractCache(self._db, self)
class LevelDBCachedStorageAccess(storage.CachedStorageAccess):
def __init__(self, db, batch):
super(LevelDBCachedStorageAccess, self).__init__(db)
self._batch = batch
def commit(self) -> None:
for trackable in self._dictionary.values(): # type: storage.Trackable
if trackable.state == storage.TrackState.ADDED:
self._db._internal_storage_put(trackable.key, trackable.item, self._batch)
elif trackable.state == storage.TrackState.CHANGED:
self._db._internal_storage_update(trackable.key, trackable.item, self._batch)
elif trackable.state == storage.TrackState.DELETED:
self._db._internal_storage_delete(trackable.key, self._batch)
def create_snapshot(self):
return storage.CloneStorageCache(self._db, self)
class LevelDBCachedTXAccess(storage.CachedTXAccess):
def __init__(self, db, batch):
super(LevelDBCachedTXAccess, self).__init__(db)
self._batch = batch
def commit(self) -> None:
for trackable in self._dictionary.values(): # trackable.item: payloads.Transaction
if trackable.state == storage.TrackState.ADDED:
self._db._internal_transaction_put(trackable.item, self._batch)
elif trackable.state == storage.TrackState.CHANGED:
self._db._internal_transaction_update(trackable.item, self._batch)
elif trackable.state == storage.TrackState.DELETED:
self._db._internal_transaction_delete(trackable.item.hash(), self._batch)
def create_snapshot(self):
return storage.CloneTXCache(self._db, self)
| 39.483029 | 120 | 0.661156 | from __future__ import annotations
from neo3 import storage
from neo3 import storage_logger as logger
from neo3.core import types, serialization
from neo3.network import payloads
from contextlib import suppress
from copy import deepcopy
level_db_supported = False
with suppress(ModuleNotFoundError):
import plyvel
level_db_supported = True
class DBPrefixes:
BLOCKS = b'\x01'
BLOCKS_HEIGHT_MAP = b'\x02'
BLOCKS_BEST_HEIGHT = b'\x03'
CONTRACTS = b'\x04'
STORAGES = b'\x05'
TRANSACTIONS = b'\x06'
class LevelDB(storage.IDBImplementation):
def __init__(self, options: dict):
if not level_db_supported:
raise ModuleNotFoundError("plyvel module not found - try 'pip install plyvel'. "
"Also make sure to have leveldb installed.")
try:
self._path = options['path']
self._real_db = plyvel.DB(self._path, create_if_missing=True, max_open_files=100,
lru_cache_size=10 * 1024 * 1024)
self._tx_iterator = None
self._block_iterator = None
self._contract_iterator = None
logger.info(f"Created DB at {self._path}")
except Exception as e:
raise Exception(f"leveldb exception [ {e} ]")
def get_snapshotview(self) -> LevelDBSnapshot:
return LevelDBSnapshot(self)
def _internal_bestblockheight_get(self):
height_bytes = self._real_db.get(DBPrefixes.BLOCKS_BEST_HEIGHT)
if height_bytes is None:
raise KeyError
return int.from_bytes(height_bytes, 'little')
def _internal_bestblockheight_put(self, height: int, batch=None):
if batch:
db = batch
else:
db = self._real_db
db.put(DBPrefixes.BLOCKS_BEST_HEIGHT, height.to_bytes(4, 'little'))
def _internal_bestblockheight_update(self, height: int, batch=None):
self._internal_bestblockheight_put(height, batch)
def _internal_block_put(self, block: payloads.Block, batch=None):
if batch:
db = batch
else:
db = self._real_db
block_height_bytes = block.index.to_bytes(4, 'little')
db.put(DBPrefixes.BLOCKS + block.hash().to_array(), block.to_array())
db.put(DBPrefixes.BLOCKS_HEIGHT_MAP + block_height_bytes, block.hash().to_array())
stored_value = -1
with suppress(KeyError):
stored_value = self._internal_bestblockheight_get()
if block.index > stored_value:
db.put(DBPrefixes.BLOCKS_BEST_HEIGHT, block_height_bytes)
def _internal_block_update(self, block: payloads.Block, batch=None):
self._internal_block_put(block, batch)
def _internal_block_delete(self, hash: types.UInt256, batch=None):
if batch:
db = batch
else:
db = self._real_db
block_hash_bytes = hash.to_array()
block_bytes = self._real_db.get(DBPrefixes.BLOCKS + block_hash_bytes)
if block_bytes is not None:
start_idx = 4 + 32 + 32 + 8
block_height_bytes = block_bytes[start_idx:start_idx + 4]
db.delete(DBPrefixes.BLOCKS + block_hash_bytes)
db.delete(DBPrefixes.BLOCKS_HEIGHT_MAP + block_height_bytes)
def _internal_block_get(self, hash: types.UInt256):
block_bytes = self._real_db.get(DBPrefixes.BLOCKS + hash.to_array())
if block_bytes is None:
raise KeyError
return payloads.Block.deserialize_from_bytes(block_bytes)
def _internal_block_get_by_height(self, height: int):
block_hash_bytes = self._real_db.get(DBPrefixes.BLOCKS_HEIGHT_MAP + height.to_bytes(4, 'little'))
if block_hash_bytes is None:
raise KeyError
block_bytes = self._real_db.get(DBPrefixes.BLOCKS + block_hash_bytes)
if block_bytes is None:
raise KeyError
return payloads.Block.deserialize_from_bytes(block_bytes)
def _internal_block_all(self):
res = []
with self._real_db.iterator(prefix=DBPrefixes.BLOCKS, include_key=False, include_value=True) as it:
for value in it:
v = payloads.Block.deserialize_from_bytes(value)
res.append(v)
for block in res:
yield deepcopy(block)
def _internal_contract_put(self, contract: storage.ContractState, batch=None):
if batch:
db = batch
else:
db = self._real_db
db.put(DBPrefixes.CONTRACTS + contract.script_hash().to_array(), contract.to_array())
def _internal_contract_update(self, contract: storage.ContractState, batch=None):
self._internal_contract_put(contract, batch)
def _internal_contract_delete(self, script_hash: types.UInt160, batch=None):
if batch:
db = batch
else:
db = self._real_db
db.delete(DBPrefixes.CONTRACTS + script_hash.to_array())
def _internal_contract_get(self, script_hash: types.UInt160):
contract_bytes = self._real_db.get(DBPrefixes.CONTRACTS + script_hash.to_array())
if contract_bytes is None:
raise KeyError
return storage.ContractState.deserialize_from_bytes(contract_bytes)
def _internal_contract_all(self):
res = []
with self._real_db.iterator(prefix=DBPrefixes.CONTRACTS, include_key=False, include_value=True) as it:
for value in it:
v = storage.ContractState.deserialize_from_bytes(value)
res.append(v)
for contract in res:
yield deepcopy(contract)
def _internal_storage_put(self, key: storage.StorageKey, value: storage.StorageItem, batch=None):
if batch:
db = batch
else:
db = self._real_db
db.put(DBPrefixes.STORAGES + key.to_array(), value.to_array())
def _internal_storage_update(self, key: storage.StorageKey, value: storage.StorageItem, batch=None):
self._internal_storage_put(key, value)
def _internal_storage_delete(self, key: storage.StorageKey, batch=None):
if batch:
db = batch
else:
db = self._real_db
db.delete(DBPrefixes.STORAGES + key.to_array())
def _internal_storage_get(self, key: storage.StorageKey):
storage_bytes = self._real_db.get(DBPrefixes.STORAGES + key.to_array())
if storage_bytes is None:
raise KeyError
return storage.StorageItem.deserialize_from_bytes(storage_bytes)
def _internal_storage_all(self, contract_script_hash: types.UInt160 = None):
prefix = DBPrefixes.STORAGES
if contract_script_hash is not None:
prefix = DBPrefixes.STORAGES + contract_script_hash.to_array()
res = {}
with self._real_db.iterator(prefix=prefix, include_key=True, include_value=True) as it:
for key, value in it:
k = storage.StorageKey.deserialize_from_bytes(key[1:])
v = storage.StorageItem.deserialize_from_bytes(value)
res[k] = v
for k, v in res.items():
yield deepcopy(k), deepcopy(v)
def _internal_storage_find(self, contract_script_hash: types.UInt160, key_prefix: bytes):
prefix = DBPrefixes.STORAGES + contract_script_hash.to_array() + key_prefix
res = {}
with self._real_db.iterator(prefix=prefix, include_key=True, include_value=True) as it:
for key, value in it:
k = storage.StorageKey.deserialize_from_bytes(key[1:])
v = storage.StorageItem.deserialize_from_bytes(value)
res[k] = v
for k, v in res.items():
yield k, v
def _internal_transaction_put(self, transaction: payloads.Transaction, batch=None):
if batch:
db = batch
else:
db = self._real_db
with serialization.BinaryWriter() as bw:
transaction.serialize_special(bw)
serialized_tx = bw.to_array()
db.put(DBPrefixes.TRANSACTIONS + transaction.hash().to_array(), serialized_tx)
def _internal_transaction_update(self, transaction, batch=None):
self._internal_transaction_put(transaction, batch)
def _internal_transaction_delete(self, hash: types.UInt256, batch=None):
if batch:
db = batch
else:
db = self._real_db
db.delete(DBPrefixes.TRANSACTIONS + hash.to_array())
def _internal_transaction_get(self, hash: types.UInt256):
tx_bytes = self._real_db.get(DBPrefixes.TRANSACTIONS + hash.to_array())
if tx_bytes is None:
raise KeyError
with serialization.BinaryReader(tx_bytes) as br:
tx = payloads.Transaction()
tx.deserialize_special(br)
return tx
def _internal_transaction_all(self):
res = []
with self._real_db.iterator(prefix=DBPrefixes.TRANSACTIONS, include_key=False, include_value=True) as it:
for value in it:
with serialization.BinaryReader(value) as br:
v = payloads.Transaction()
v.deserialize_special(br)
res.append(v)
for tx in res:
yield deepcopy(tx)
def close(self):
self._real_db.close()
class LevelDBSnapshot(storage.Snapshot):
def __init__(self, db: LevelDB):
super(LevelDBSnapshot, self).__init__()
self._db = db
self._snapshot = db._real_db.snapshot()
self._batch = db._real_db.write_batch()
self._block_cache = LevelDBCachedBlockAccess(db, self._batch)
self._contract_cache = LevelDBCachedContractAccess(db, self._batch)
self._storage_cache = LevelDBCachedStorageAccess(db, self._batch)
self._tx_cache = LevelDBCachedTXAccess(db, self._batch)
self._block_height_cache = LevelDBBestBlockHeightAttribute(db, self._batch)
def commit(self) -> None:
super(LevelDBSnapshot, self).commit()
self._batch.write()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._snapshot.close()
class LevelDBBestBlockHeightAttribute(storage.AttributeCache):
def __init__(self, db, batch):
super(LevelDBBestBlockHeightAttribute, self).__init__()
self._db = db
self._batch = batch
def _get_internal(self):
return self._db._internal_bestblockheight_get()
def _update_internal(self, value):
self._db._internal_bestblockheight_update(value, self._batch)
class LevelDBCachedBlockAccess(storage.CachedBlockAccess):
def __init__(self, db, batch):
super(LevelDBCachedBlockAccess, self).__init__(db)
self._batch = batch
def commit(self) -> None:
for trackable in self._dictionary.values():
if trackable.state == storage.TrackState.ADDED:
self._db._internal_block_put(trackable.item, self._batch)
elif trackable.state == storage.TrackState.CHANGED:
self._db._internal_block_update(trackable.item, self._batch)
elif trackable.state == storage.TrackState.DELETED:
self._db._internal_block_delete(trackable.item.hash(), self._batch)
def create_snapshot(self):
return storage.CloneBlockCache(self._db, self)
class LevelDBCachedContractAccess(storage.CachedContractAccess):
def __init__(self, db, batch):
super(LevelDBCachedContractAccess, self).__init__(db)
self._batch = batch
def commit(self) -> None:
for trackable in self._dictionary.values():
if trackable.state == storage.TrackState.ADDED:
self._db._internal_contract_put(trackable.item, self._batch)
elif trackable.state == storage.TrackState.CHANGED:
self._db._internal_contract_update(trackable.item, self._batch)
elif trackable.state == storage.TrackState.DELETED:
self._db._internal_contract_delete(trackable.item.script_hash(), self._batch)
def create_snapshot(self):
return storage.CloneContractCache(self._db, self)
class LevelDBCachedStorageAccess(storage.CachedStorageAccess):
def __init__(self, db, batch):
super(LevelDBCachedStorageAccess, self).__init__(db)
self._batch = batch
def commit(self) -> None:
for trackable in self._dictionary.values():
if trackable.state == storage.TrackState.ADDED:
self._db._internal_storage_put(trackable.key, trackable.item, self._batch)
elif trackable.state == storage.TrackState.CHANGED:
self._db._internal_storage_update(trackable.key, trackable.item, self._batch)
elif trackable.state == storage.TrackState.DELETED:
self._db._internal_storage_delete(trackable.key, self._batch)
def create_snapshot(self):
return storage.CloneStorageCache(self._db, self)
class LevelDBCachedTXAccess(storage.CachedTXAccess):
def __init__(self, db, batch):
super(LevelDBCachedTXAccess, self).__init__(db)
self._batch = batch
def commit(self) -> None:
for trackable in self._dictionary.values():
if trackable.state == storage.TrackState.ADDED:
self._db._internal_transaction_put(trackable.item, self._batch)
elif trackable.state == storage.TrackState.CHANGED:
self._db._internal_transaction_update(trackable.item, self._batch)
elif trackable.state == storage.TrackState.DELETED:
self._db._internal_transaction_delete(trackable.item.hash(), self._batch)
def create_snapshot(self):
return storage.CloneTXCache(self._db, self)
| true | true |
f7f56a0c755ea0ebc5e458d374bfd1d15e2715e8 | 2,783 | py | Python | examples/flow_routing/simple_sp_driver_wlakes.py | cctrunz/landlab | 4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939 | [
"MIT"
] | null | null | null | examples/flow_routing/simple_sp_driver_wlakes.py | cctrunz/landlab | 4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939 | [
"MIT"
] | 1 | 2016-03-16T02:34:08.000Z | 2016-04-20T19:31:30.000Z | examples/flow_routing/simple_sp_driver_wlakes.py | cctrunz/landlab | 4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939 | [
"MIT"
] | null | null | null | """
simple_sp_driver.py
A simple driver implementing Braun-Willett flow routing and then a
(non-fastscape) stream power component.
DEJH, 09/15/14
"""
from __future__ import print_function
import time
import numpy
import pylab
from landlab import ModelParameterDictionary, RasterModelGrid
from landlab.components.flow_routing import DepressionFinderAndRouter, FlowAccumulator
from landlab.components.stream_power import FastscapeEroder, StreamPowerEroder
from landlab.plot.imshow import imshow_grid
inputs = ModelParameterDictionary("./drive_sp_params.txt")
nrows = inputs.read_int("nrows")
ncols = inputs.read_int("ncols")
dx = inputs.read_float("dx")
dt = inputs.read_float("dt")
time_to_run = inputs.read_float("run_time")
# nt needs defining
uplift = 10. * inputs.read_float("uplift_rate")
init_elev = inputs.read_float("init_elev")
mg = RasterModelGrid((nrows, ncols), xy_spacing=dx)
# create the fields in the grid
mg.add_zeros("topographic__elevation", at="node")
z = mg.zeros(at="node") + init_elev
# z += mg.node_x*0.001
mg["node"]["topographic__elevation"] = z + numpy.random.rand(len(z)) / 1000.
# make some K values in a field to test
mg.at_node["K_values"] = 0.1 + numpy.random.rand(nrows * ncols) / 10.
print("Running ...")
time_on = time.time()
# instantiate the components:
fr = FlowAccumulator(mg, flow_director="D8")
sp = StreamPowerEroder(mg, "./drive_sp_params.txt")
lf = DepressionFinderAndRouter(mg)
# load the Fastscape module too, to allow direct comparison
fsp = FastscapeEroder(mg, "./drive_sp_params.txt")
# perform the loop:
elapsed_time = 0. # total time in simulation
while elapsed_time < time_to_run:
# for i in range(10):
print(elapsed_time)
if elapsed_time + dt > time_to_run:
print("Short step!")
dt = time_to_run - elapsed_time
mg = fr.route_flow(method="D8")
lf.map_depressions()
# print 'Area: ', numpy.max(mg.at_node['drainage_area'])
# mg = fsp.erode(mg)
mg, _, _ = sp.erode(
mg,
dt,
node_drainage_areas="drainage_area",
slopes_at_nodes="topographic__steepest_slope",
K_if_used="K_values",
)
# add uplift
mg.at_node["topographic__elevation"][mg.core_nodes] += uplift * dt
elapsed_time += dt
time_off = time.time()
print("Elapsed time: ", time_off - time_on)
# Finalize and plot
elev = mg["node"]["topographic__elevation"]
elev_r = mg.node_vector_to_raster(elev)
# Clear previous plots
pylab.figure(1)
pylab.close()
# Plot topography
pylab.figure(1)
im = imshow_grid(mg, "topographic__elevation") # display a colored image
print(elev_r)
pylab.figure(2)
im = pylab.plot(
dx * numpy.arange(nrows), elev_r[:, int(ncols // 2)]
) # display a colored image
pylab.title("Vertical cross section")
pylab.show()
print("Done.")
| 28.111111 | 86 | 0.722961 | from __future__ import print_function
import time
import numpy
import pylab
from landlab import ModelParameterDictionary, RasterModelGrid
from landlab.components.flow_routing import DepressionFinderAndRouter, FlowAccumulator
from landlab.components.stream_power import FastscapeEroder, StreamPowerEroder
from landlab.plot.imshow import imshow_grid
inputs = ModelParameterDictionary("./drive_sp_params.txt")
nrows = inputs.read_int("nrows")
ncols = inputs.read_int("ncols")
dx = inputs.read_float("dx")
dt = inputs.read_float("dt")
time_to_run = inputs.read_float("run_time")
uplift = 10. * inputs.read_float("uplift_rate")
init_elev = inputs.read_float("init_elev")
mg = RasterModelGrid((nrows, ncols), xy_spacing=dx)
mg.add_zeros("topographic__elevation", at="node")
z = mg.zeros(at="node") + init_elev
mg["node"]["topographic__elevation"] = z + numpy.random.rand(len(z)) / 1000.
mg.at_node["K_values"] = 0.1 + numpy.random.rand(nrows * ncols) / 10.
print("Running ...")
time_on = time.time()
fr = FlowAccumulator(mg, flow_director="D8")
sp = StreamPowerEroder(mg, "./drive_sp_params.txt")
lf = DepressionFinderAndRouter(mg)
fsp = FastscapeEroder(mg, "./drive_sp_params.txt")
elapsed_time = 0.
while elapsed_time < time_to_run:
print(elapsed_time)
if elapsed_time + dt > time_to_run:
print("Short step!")
dt = time_to_run - elapsed_time
mg = fr.route_flow(method="D8")
lf.map_depressions()
mg, _, _ = sp.erode(
mg,
dt,
node_drainage_areas="drainage_area",
slopes_at_nodes="topographic__steepest_slope",
K_if_used="K_values",
)
mg.at_node["topographic__elevation"][mg.core_nodes] += uplift * dt
elapsed_time += dt
time_off = time.time()
print("Elapsed time: ", time_off - time_on)
elev = mg["node"]["topographic__elevation"]
elev_r = mg.node_vector_to_raster(elev)
pylab.figure(1)
pylab.close()
pylab.figure(1)
im = imshow_grid(mg, "topographic__elevation")
print(elev_r)
pylab.figure(2)
im = pylab.plot(
dx * numpy.arange(nrows), elev_r[:, int(ncols // 2)]
)
pylab.title("Vertical cross section")
pylab.show()
print("Done.")
| true | true |
f7f56a289e503d16b10223c8580fb77729b270d6 | 990 | py | Python | app/db_manager/tests/test_utils.py | PragmaticCoder/Linkedin-Analytics | a990b5cae02f0d758bc3123bde643d13a439efa3 | [
"MIT"
] | null | null | null | app/db_manager/tests/test_utils.py | PragmaticCoder/Linkedin-Analytics | a990b5cae02f0d758bc3123bde643d13a439efa3 | [
"MIT"
] | null | null | null | app/db_manager/tests/test_utils.py | PragmaticCoder/Linkedin-Analytics | a990b5cae02f0d758bc3123bde643d13a439efa3 | [
"MIT"
] | null | null | null | from django.test import TestCase
from core.models import Company
from db_manager.utils import database_upload
class DatabaseUploadTests(TestCase):
def setUp(self):
self.company = Company.objects.create(
name="Test",
companyType="test",
employeeCountRange="12",
foundedYear="test",
industries="test",
numFollowers="12",
specialities="test",
squareLogoUrl="test.jpg",
websiteUrl="www.test.com",
group="test",
)
self.empty_company = Company.objects.create()
def tearDown(self):
self.company.delete()
self.empty_company.delete()
def test_function_deletes_previous_companies(self):
"""Test that db is cleaned before the upload"""
database_upload()
self.assertEqual(Company.objects.filter(name="Test").count(), 0)
self.assertEqual(Company.objects.filter(name="Undisclosed").count(), 0)
| 30 | 79 | 0.620202 | from django.test import TestCase
from core.models import Company
from db_manager.utils import database_upload
class DatabaseUploadTests(TestCase):
def setUp(self):
self.company = Company.objects.create(
name="Test",
companyType="test",
employeeCountRange="12",
foundedYear="test",
industries="test",
numFollowers="12",
specialities="test",
squareLogoUrl="test.jpg",
websiteUrl="www.test.com",
group="test",
)
self.empty_company = Company.objects.create()
def tearDown(self):
self.company.delete()
self.empty_company.delete()
def test_function_deletes_previous_companies(self):
database_upload()
self.assertEqual(Company.objects.filter(name="Test").count(), 0)
self.assertEqual(Company.objects.filter(name="Undisclosed").count(), 0)
| true | true |
f7f56c8c79c97d94f4af07571b6af1adae4f2e57 | 7,206 | py | Python | robustRL-master/robustRL/samplers.py | kyuhoJeong11/GrewRL | a514698df8d38df34de0bd1667d99927f0aa3885 | [
"MIT"
] | null | null | null | robustRL-master/robustRL/samplers.py | kyuhoJeong11/GrewRL | a514698df8d38df34de0bd1667d99927f0aa3885 | [
"MIT"
] | null | null | null | robustRL-master/robustRL/samplers.py | kyuhoJeong11/GrewRL | a514698df8d38df34de0bd1667d99927f0aa3885 | [
"MIT"
] | null | null | null | """
Sampler functions to be used with the policy search algorithms
Aravind Rajeswaran, 08/04/16
"""
import numpy as np
import copy
import multiprocessing as mp
from rllab.misc import tensor_utils
from MDP_funcs import *
# above MDP_funs is local copy
# =======================================================================================
# Functions for sampling paths
def sample_paths(N,
policy,
baseline,
env_mode='train',
T=1e6,
gamma=1,
mujoco_env=True,
normalized_env=False,
env=None):
# Directly specifying env works only when sampling in series
# set random seed (needed for multiprocessing)
np.random.seed()
if env == None:
env = get_environment(env_mode)
T = min(T, env.horizon)
T = max(1, T)
# sometimes, env is not initialized correctly in multiprocessing
# this is just a sanity check and step size should essentially be zero.
print("####### Worker started #######")
paths = []
for ep in range(N):
observations=[]
actions=[]
rewards=[]
agent_infos = []
env_infos = []
qpos = []
qvel = []
o = env.reset()
if mujoco_env == True:
if normalized_env:
qpos.append(env.wrapped_env.env.model.data.qpos.reshape(-1))
qvel.append(env.wrapped_env.env.model.data.qvel.reshape(-1))
else:
qpos.append(env.env.model.data.qpos.reshape(-1))
qvel.append(env.env.model.data.qvel.reshape(-1))
done = False
t = 0
while t < T and done != True:
a, agent_info = policy.get_action(o)
next_o, r, done, env_info = env.step(a)
observations.append(env.observation_space.flatten(o))
actions.append(env.action_space.flatten(a))
rewards.append(r)
agent_infos.append(agent_info)
env_infos.append(env_info)
if mujoco_env == True:
if normalized_env:
qpos.append(env.wrapped_env.env.model.data.qpos.reshape(-1))
qvel.append(env.wrapped_env.env.model.data.qvel.reshape(-1))
else:
qpos.append(env.env.model.data.qpos.reshape(-1))
qvel.append(env.env.model.data.qvel.reshape(-1))
o = next_o
t += 1
# make a path dictionary
# Also store the path belief and env data used in the trajectory
#try:
# path_belief = env.env.belief
#except Exception as e:
# path_belief = str(e)
# path_model = env.env
qpos_flat = tensor_utils.stack_tensor_list(qpos)
qvel_flat = tensor_utils.stack_tensor_list(qvel)
path = dict(
observations=tensor_utils.stack_tensor_list(observations),
actions=tensor_utils.stack_tensor_list(actions),
rewards=tensor_utils.stack_tensor_list(rewards),
agent_infos=tensor_utils.stack_tensor_dict_list(agent_infos),
env_infos=tensor_utils.stack_tensor_dict_list(env_infos),
qpos=qpos_flat,
qvel=qvel_flat,
#path_belief=path_belief,
#path_model=path_model,
)
# TODO: Storing the path model is too space inefficient. Need to find alternative
# compute returns using the path
path_baseline = baseline.predict(path)
advantages = []
returns = []
return_so_far = 0
for t in range(len(rewards) - 1, -1, -1):
return_so_far = rewards[t] + gamma * return_so_far
returns.append(return_so_far)
advantage = return_so_far - path_baseline[t]
advantages.append(advantage)
# advantages and returns are stored backward in time
advantages = np.array(advantages[::-1])
returns = np.array(returns[::-1])
# normalize advantages
advantages = (advantages - np.mean(advantages)) / (np.std(advantages) + 1e-8)
path["advantages"] = advantages
path["returns"] = returns
paths.append(path)
#print "Env body_mass : ", env.env.model.body_mass[1]
print("====== Worker finished ======")
return paths
def _sample_paths_star(args_list):
""" Constructor function to pass an args_list.
Can call pool.map on this function """
return sample_paths(*args_list)
def sample_paths_parallel(N,
policy,
baseline,
env_mode='train',
T=1e6, gamma=1,
num_cpu=None,
max_process_time=120,
max_timeouts=4,
mujoco_env=True,
normalized_env=False):
if num_cpu == None or num_cpu == 'max':
num_cpu = mp.cpu_count()
elif num_cpu == 1:
return sample_paths(N, policy, baseline, evn_mode, T, gamma, mujoco_env, normalized_env)
else:
num_cpu = min(mp.cpu_count(), num_cpu)
paths_per_cpu = int(np.ceil(N/num_cpu))
args_list = [paths_per_cpu, policy, baseline, env_mode, T, gamma, mujoco_env, normalized_env]
results = _try_multiprocess(args_list, num_cpu, max_process_time, max_timeouts)
paths = []
# result is a paths type and results is list of paths
for result in results:
for path in result:
paths.append(path)
return paths
def _try_multiprocess(args_list, num_cpu, max_process_time, max_timeouts):
# Base case
if max_timeouts == 0:
return None
pool = mp.Pool(processes=num_cpu, maxtasksperchild=1)
parallel_runs = [pool.apply_async(_sample_paths_star, args=(args_list,)) for _ in range(num_cpu)]
try:
results = [p.get(timeout=max_process_time) for p in parallel_runs]
except Exception as e:
print(str(e))
print("Timeout Error raised... Trying again")
pool.close()
pool.terminate()
pool.join()
return _try_multiprocess(args_list, num_cpu, max_process_time, max_timeouts-1)
pool.close()
pool.terminate()
pool.join()
return results
# =======================================================================================
# Functions for performance evaluation
def policy_evaluation(policy,
env_mode='train',
num_episodes=10,
horizon=1e6,
visual=False,
gamma=1):
# TODO: Add functionality to sample parallel paths and evaluate policy
env = get_environment(env_mode)
horizon = min(env.horizon, horizon)
ep_returns = np.zeros(num_episodes)
for ep in range(num_episodes):
o = env.reset()
t = 0
done = False
while t < horizon and done != True:
if visual == True:
env.render()
a = policy.get_action(o)[0]
o, r, done, _ = env.step(a)
ep_returns[ep] += (gamma ** t) * r
t += 1
mean_eval = np.mean(ep_returns)
std_eval = np.std(ep_returns)
min_eval = np.amin(ep_returns)
max_eval = np.amax(ep_returns)
soft_min = np.percentile(ep_returns,15)
soft_max = np.percentile(ep_returns,85)
return (mean_eval, std_eval, min_eval, max_eval, soft_min, soft_max, num_episodes)
| 30.66383 | 101 | 0.592978 |
import numpy as np
import copy
import multiprocessing as mp
from rllab.misc import tensor_utils
from MDP_funcs import *
def sample_paths(N,
policy,
baseline,
env_mode='train',
T=1e6,
gamma=1,
mujoco_env=True,
normalized_env=False,
env=None):
np.random.seed()
if env == None:
env = get_environment(env_mode)
T = min(T, env.horizon)
T = max(1, T)
print("####### Worker started #######")
paths = []
for ep in range(N):
observations=[]
actions=[]
rewards=[]
agent_infos = []
env_infos = []
qpos = []
qvel = []
o = env.reset()
if mujoco_env == True:
if normalized_env:
qpos.append(env.wrapped_env.env.model.data.qpos.reshape(-1))
qvel.append(env.wrapped_env.env.model.data.qvel.reshape(-1))
else:
qpos.append(env.env.model.data.qpos.reshape(-1))
qvel.append(env.env.model.data.qvel.reshape(-1))
done = False
t = 0
while t < T and done != True:
a, agent_info = policy.get_action(o)
next_o, r, done, env_info = env.step(a)
observations.append(env.observation_space.flatten(o))
actions.append(env.action_space.flatten(a))
rewards.append(r)
agent_infos.append(agent_info)
env_infos.append(env_info)
if mujoco_env == True:
if normalized_env:
qpos.append(env.wrapped_env.env.model.data.qpos.reshape(-1))
qvel.append(env.wrapped_env.env.model.data.qvel.reshape(-1))
else:
qpos.append(env.env.model.data.qpos.reshape(-1))
qvel.append(env.env.model.data.qvel.reshape(-1))
o = next_o
t += 1
qpos_flat = tensor_utils.stack_tensor_list(qpos)
qvel_flat = tensor_utils.stack_tensor_list(qvel)
path = dict(
observations=tensor_utils.stack_tensor_list(observations),
actions=tensor_utils.stack_tensor_list(actions),
rewards=tensor_utils.stack_tensor_list(rewards),
agent_infos=tensor_utils.stack_tensor_dict_list(agent_infos),
env_infos=tensor_utils.stack_tensor_dict_list(env_infos),
qpos=qpos_flat,
qvel=qvel_flat,
)
path_baseline = baseline.predict(path)
advantages = []
returns = []
return_so_far = 0
for t in range(len(rewards) - 1, -1, -1):
return_so_far = rewards[t] + gamma * return_so_far
returns.append(return_so_far)
advantage = return_so_far - path_baseline[t]
advantages.append(advantage)
advantages = np.array(advantages[::-1])
returns = np.array(returns[::-1])
advantages = (advantages - np.mean(advantages)) / (np.std(advantages) + 1e-8)
path["advantages"] = advantages
path["returns"] = returns
paths.append(path)
print("====== Worker finished ======")
return paths
def _sample_paths_star(args_list):
return sample_paths(*args_list)
def sample_paths_parallel(N,
policy,
baseline,
env_mode='train',
T=1e6, gamma=1,
num_cpu=None,
max_process_time=120,
max_timeouts=4,
mujoco_env=True,
normalized_env=False):
if num_cpu == None or num_cpu == 'max':
num_cpu = mp.cpu_count()
elif num_cpu == 1:
return sample_paths(N, policy, baseline, evn_mode, T, gamma, mujoco_env, normalized_env)
else:
num_cpu = min(mp.cpu_count(), num_cpu)
paths_per_cpu = int(np.ceil(N/num_cpu))
args_list = [paths_per_cpu, policy, baseline, env_mode, T, gamma, mujoco_env, normalized_env]
results = _try_multiprocess(args_list, num_cpu, max_process_time, max_timeouts)
paths = []
for result in results:
for path in result:
paths.append(path)
return paths
def _try_multiprocess(args_list, num_cpu, max_process_time, max_timeouts):
if max_timeouts == 0:
return None
pool = mp.Pool(processes=num_cpu, maxtasksperchild=1)
parallel_runs = [pool.apply_async(_sample_paths_star, args=(args_list,)) for _ in range(num_cpu)]
try:
results = [p.get(timeout=max_process_time) for p in parallel_runs]
except Exception as e:
print(str(e))
print("Timeout Error raised... Trying again")
pool.close()
pool.terminate()
pool.join()
return _try_multiprocess(args_list, num_cpu, max_process_time, max_timeouts-1)
pool.close()
pool.terminate()
pool.join()
return results
def policy_evaluation(policy,
env_mode='train',
num_episodes=10,
horizon=1e6,
visual=False,
gamma=1):
env = get_environment(env_mode)
horizon = min(env.horizon, horizon)
ep_returns = np.zeros(num_episodes)
for ep in range(num_episodes):
o = env.reset()
t = 0
done = False
while t < horizon and done != True:
if visual == True:
env.render()
a = policy.get_action(o)[0]
o, r, done, _ = env.step(a)
ep_returns[ep] += (gamma ** t) * r
t += 1
mean_eval = np.mean(ep_returns)
std_eval = np.std(ep_returns)
min_eval = np.amin(ep_returns)
max_eval = np.amax(ep_returns)
soft_min = np.percentile(ep_returns,15)
soft_max = np.percentile(ep_returns,85)
return (mean_eval, std_eval, min_eval, max_eval, soft_min, soft_max, num_episodes)
| true | true |
f7f56dd149c048edb1a9a8025869171f526daf81 | 49 | py | Python | allpoetryapi/__init__.py | jmbhughes/allpoetryapi | aee522addb154711325bc8f5f90fc54170aeec66 | [
"MIT"
] | null | null | null | allpoetryapi/__init__.py | jmbhughes/allpoetryapi | aee522addb154711325bc8f5f90fc54170aeec66 | [
"MIT"
] | 2 | 2018-08-19T22:26:38.000Z | 2018-08-22T19:11:19.000Z | allpoetryapi/__init__.py | jmbhughes/allpoetryapi | aee522addb154711325bc8f5f90fc54170aeec66 | [
"MIT"
] | null | null | null | from .api import Poem
from .api import AllPoetry
| 16.333333 | 26 | 0.795918 | from .api import Poem
from .api import AllPoetry
| true | true |
f7f56e06634521e6d705dd886d50b6fea1796ccd | 2,733 | py | Python | config.py | mwiemarc/lightpack-audio-reactive | 331a1ff408d885da9a85d0bdfa06f2f370139fea | [
"MIT"
] | 1 | 2019-01-25T23:11:11.000Z | 2019-01-25T23:11:11.000Z | config.py | mwiemarc/lightpack-audio-reactive | 331a1ff408d885da9a85d0bdfa06f2f370139fea | [
"MIT"
] | null | null | null | config.py | mwiemarc/lightpack-audio-reactive | 331a1ff408d885da9a85d0bdfa06f2f370139fea | [
"MIT"
] | null | null | null | """Settings for audio reactive LED strip"""
from __future__ import print_function
from __future__ import division
import os
LIGHTPACK_HOST = '127.0.0.1'
"""Lightpack server host"""
LIGHTPACK_PORT = 3636
"""Lightpack server port"""
LIGHTPACK_APIKEY = ''
"""API Key for authentication"""
NUM_LEDS = 45
"""Number of leds"""
CENTER_OFFSET = 16
"""LED Strip center offset"""
USE_GUI = True
"""Whether or not to display a PyQtGraph GUI plot of visualization"""
DISPLAY_FPS = False
"""Whether to display the FPS when running (can reduce performance)"""
MIC_RATE = 44100
"""Sampling frequency of the microphone in Hz"""
SOFTWARE_GAMMA_CORRECTION = False
"""True for no hardware dithering available"""
GAMMA_TABLE_PATH = os.path.join(os.path.dirname(__file__), 'gamma_table.npy')
"""Location of the gamma correction table"""
FPS = 60
"""Desired refresh rate of the visualization (frames per second)
FPS indicates the desired refresh rate, or frames-per-second, of the audio
visualization. The actual refresh rate may be lower if the computer cannot keep
up with desired FPS value.
Higher framerates improve "responsiveness" and reduce the latency of the
visualization but are more computationally expensive.
Low framerates are less computationally expensive, but the visualization may
appear "sluggish" or out of sync with the audio being played if it is too low.
The FPS should not exceed the maximum refresh rate of the LED strip, which
depends on how long the LED strip is.
"""
N_PIXELS = NUM_LEDS if NUM_LEDS % 2 == 0 else NUM_LEDS - 1
_max_led_FPS = int(((N_PIXELS * 30e-6) + 50e-6)**-1.0)
assert FPS <= _max_led_FPS, 'FPS must be <= {}'.format(_max_led_FPS)
MIN_FREQUENCY = 500
#MIN_FREQUENCY = 200
"""Frequencies below this value will be removed during audio processing"""
MAX_FREQUENCY = 8000
#MAX_FREQUENCY = 12000
"""Frequencies above this value will be removed during audio processing"""
N_FFT_BINS = 24
"""Number of frequency bins to use when transforming audio to frequency domain
Fast Fourier transforms are used to transform time-domain audio data to the
frequency domain. The frequencies present in the audio signal are assigned
to their respective frequency bins. This value indicates the number of
frequency bins to use.
A small number of bins reduces the frequency resolution of the visualization
but improves amplitude resolution. The opposite is true when using a large
number of bins. More bins is not always better!
There is no point using more bins than there are pixels on the LED strip.
"""
N_ROLLING_HISTORY = 2
"""Number of past audio frames to include in the rolling window"""
MIN_VOLUME_THRESHOLD = 1e-7
"""No music visualization displayed if recorded audio volume below threshold"""
| 30.707865 | 79 | 0.76985 | from __future__ import print_function
from __future__ import division
import os
LIGHTPACK_HOST = '127.0.0.1'
LIGHTPACK_PORT = 3636
LIGHTPACK_APIKEY = ''
NUM_LEDS = 45
CENTER_OFFSET = 16
USE_GUI = True
DISPLAY_FPS = False
MIC_RATE = 44100
SOFTWARE_GAMMA_CORRECTION = False
GAMMA_TABLE_PATH = os.path.join(os.path.dirname(__file__), 'gamma_table.npy')
FPS = 60
N_PIXELS = NUM_LEDS if NUM_LEDS % 2 == 0 else NUM_LEDS - 1
_max_led_FPS = int(((N_PIXELS * 30e-6) + 50e-6)**-1.0)
assert FPS <= _max_led_FPS, 'FPS must be <= {}'.format(_max_led_FPS)
MIN_FREQUENCY = 500
MAX_FREQUENCY = 8000
N_FFT_BINS = 24
N_ROLLING_HISTORY = 2
MIN_VOLUME_THRESHOLD = 1e-7
| true | true |
f7f56e690d673f4161e70656c83d2a143dfed68d | 145 | py | Python | BOJ/21000~21999/21300~21399/21360.py | shinkeonkim/today-ps | f3e5e38c5215f19579bb0422f303a9c18c626afa | [
"Apache-2.0"
] | 2 | 2020-01-29T06:54:41.000Z | 2021-11-07T13:23:27.000Z | BOJ/21000~21999/21300~21399/21360.py | shinkeonkim/Today_PS | bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44 | [
"Apache-2.0"
] | null | null | null | BOJ/21000~21999/21300~21399/21360.py | shinkeonkim/Today_PS | bb0cda0ee1b9c57e1cfa38355e29d0f1c6167a44 | [
"Apache-2.0"
] | null | null | null | input()
l = [i for i,j in enumerate(input()) if j =='.']
ans = 11111111
for i in range(len(l)-1):
ans = min(ans, l[i+1] - l[i])
print(ans - 1) | 20.714286 | 48 | 0.558621 | input()
l = [i for i,j in enumerate(input()) if j =='.']
ans = 11111111
for i in range(len(l)-1):
ans = min(ans, l[i+1] - l[i])
print(ans - 1) | true | true |
f7f56f16a4888d35d64d917bb917686bb9558bbd | 2,212 | py | Python | tracker/migrations/0004_auto_20160806_2353.py | giantas/elibrary | b1abe74dcae036051764d5ad8bdf74673c8a2861 | [
"MIT"
] | null | null | null | tracker/migrations/0004_auto_20160806_2353.py | giantas/elibrary | b1abe74dcae036051764d5ad8bdf74673c8a2861 | [
"MIT"
] | null | null | null | tracker/migrations/0004_auto_20160806_2353.py | giantas/elibrary | b1abe74dcae036051764d5ad8bdf74673c8a2861 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-06 20:53
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tracker', '0003_auto_20160806_2311'),
]
operations = [
migrations.CreateModel(
name='Issue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('issue_name', models.CharField(max_length=25)),
('description', models.CharField(max_length=100)),
('time_raised', models.DateTimeField(default=django.utils.timezone.now)),
('priority', models.CharField(choices=[('low', 'Low'), ('medium', 'Medium'), ('high', 'High')], default='low', max_length=7)),
('status', models.CharField(choices=[('in_progress', 'In Progress'), ('resolved', 'Resolved')], default='in-progress', max_length=12)),
('time_resolved', models.DateTimeField(default=django.utils.timezone.now)),
('comments', models.CharField(blank=True, default='', max_length=255)),
('is_seen', models.BooleanField(default=False)),
('handler', models.ForeignKey(default=2, on_delete=django.db.models.deletion.CASCADE, related_name='issue_handler', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='issue_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='issuehandle',
name='handler',
),
migrations.RemoveField(
model_name='issuehandle',
name='issue',
),
migrations.RemoveField(
model_name='issues',
name='user',
),
migrations.DeleteModel(
name='IssueHandle',
),
migrations.DeleteModel(
name='Issues',
),
]
| 40.962963 | 162 | 0.606239 |
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('tracker', '0003_auto_20160806_2311'),
]
operations = [
migrations.CreateModel(
name='Issue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('issue_name', models.CharField(max_length=25)),
('description', models.CharField(max_length=100)),
('time_raised', models.DateTimeField(default=django.utils.timezone.now)),
('priority', models.CharField(choices=[('low', 'Low'), ('medium', 'Medium'), ('high', 'High')], default='low', max_length=7)),
('status', models.CharField(choices=[('in_progress', 'In Progress'), ('resolved', 'Resolved')], default='in-progress', max_length=12)),
('time_resolved', models.DateTimeField(default=django.utils.timezone.now)),
('comments', models.CharField(blank=True, default='', max_length=255)),
('is_seen', models.BooleanField(default=False)),
('handler', models.ForeignKey(default=2, on_delete=django.db.models.deletion.CASCADE, related_name='issue_handler', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='issue_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='issuehandle',
name='handler',
),
migrations.RemoveField(
model_name='issuehandle',
name='issue',
),
migrations.RemoveField(
model_name='issues',
name='user',
),
migrations.DeleteModel(
name='IssueHandle',
),
migrations.DeleteModel(
name='Issues',
),
]
| true | true |
f7f56f3a1c2b881cfc3efb61992d50e0592a6bbd | 409 | bzl | Python | cmd/image.bzl | ShotaKitazawa/pipe | 690a390e4364fc7d51c3a31e9921bda0300f6528 | [
"Apache-2.0"
] | null | null | null | cmd/image.bzl | ShotaKitazawa/pipe | 690a390e4364fc7d51c3a31e9921bda0300f6528 | [
"Apache-2.0"
] | null | null | null | cmd/image.bzl | ShotaKitazawa/pipe | 690a390e4364fc7d51c3a31e9921bda0300f6528 | [
"Apache-2.0"
] | null | null | null | def all_images():
cmds = {
"piped": "piped",
"pipecd": "pipecd",
"pipectl": "pipectl",
"helloworld": "helloworld",
}
images = {}
for cmd, repo in cmds.items():
images["$(DOCKER_REGISTRY)/%s:{STABLE_VERSION}" % repo] = "//cmd/%s:image" % cmd
images["$(DOCKER_REGISTRY)/%s:{STABLE_GIT_COMMIT}" % repo] = "//cmd/%s:image" % cmd
return images
| 27.266667 | 91 | 0.535452 | def all_images():
cmds = {
"piped": "piped",
"pipecd": "pipecd",
"pipectl": "pipectl",
"helloworld": "helloworld",
}
images = {}
for cmd, repo in cmds.items():
images["$(DOCKER_REGISTRY)/%s:{STABLE_VERSION}" % repo] = "//cmd/%s:image" % cmd
images["$(DOCKER_REGISTRY)/%s:{STABLE_GIT_COMMIT}" % repo] = "//cmd/%s:image" % cmd
return images
| true | true |
f7f5701d28225fe3218193f9f16fd161bac9bc89 | 755 | py | Python | MITx/6.00.1x/Week 2/Lecture_4/charSearchRecursive.py | dvpramodkumar/edX_moocs | 6e006cea8db9ac0784716a6f6143aeb3519e64c1 | [
"MIT"
] | null | null | null | MITx/6.00.1x/Week 2/Lecture_4/charSearchRecursive.py | dvpramodkumar/edX_moocs | 6e006cea8db9ac0784716a6f6143aeb3519e64c1 | [
"MIT"
] | null | null | null | MITx/6.00.1x/Week 2/Lecture_4/charSearchRecursive.py | dvpramodkumar/edX_moocs | 6e006cea8db9ac0784716a6f6143aeb3519e64c1 | [
"MIT"
] | null | null | null |
def isIn(char, aStr):
'''
char: a single character
aStr: an alphabetized string
returns: True if char is in aStr; False otherwise
'''
# Your code here
if(len(aStr) == 0):
return False
elif(len(aStr) == 1):
if(aStr[0] == char):
return True
else:
return False
else:
l = len(aStr)
low = 0
high = l - 1
value = int((low + high) / 2)
if(char < aStr[value]):
return isIn(char, aStr[:value-1])
elif(char > aStr[value]):
return isIn(char, aStr[value+1:])
else:
return True
print(isIn('z', 'abcdefgh'))
print(isIn('x', 'egmmmorstvy'))
print(isIn('a', ''))
print(isIn('f', 'b'))
| 18.875 | 53 | 0.491391 |
def isIn(char, aStr):
if(len(aStr) == 0):
return False
elif(len(aStr) == 1):
if(aStr[0] == char):
return True
else:
return False
else:
l = len(aStr)
low = 0
high = l - 1
value = int((low + high) / 2)
if(char < aStr[value]):
return isIn(char, aStr[:value-1])
elif(char > aStr[value]):
return isIn(char, aStr[value+1:])
else:
return True
print(isIn('z', 'abcdefgh'))
print(isIn('x', 'egmmmorstvy'))
print(isIn('a', ''))
print(isIn('f', 'b'))
| true | true |
f7f570c0d6d3315584a7819683cca2dc8657a8a0 | 1,504 | py | Python | recursion/104. Maximum Depth of Binary Tree_easy.py | JunzhongLin/leetcode_practice | 47b2f5cc3c87de004ae21a94024e751b40b8f559 | [
"MIT"
] | null | null | null | recursion/104. Maximum Depth of Binary Tree_easy.py | JunzhongLin/leetcode_practice | 47b2f5cc3c87de004ae21a94024e751b40b8f559 | [
"MIT"
] | null | null | null | recursion/104. Maximum Depth of Binary Tree_easy.py | JunzhongLin/leetcode_practice | 47b2f5cc3c87de004ae21a94024e751b40b8f559 | [
"MIT"
] | null | null | null | '''
Given the root of a binary tree, return its maximum depth.
A binary tree's maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
'''
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maxDepth_iter(self, root: Optional[TreeNode]) -> int:
if root is None:
return 0
ans = 0
que = []
que.append(root)
while que:
ans += 1
length = len(que)
for i in range(length):
root = que.pop(0)
if root.left:
que.append(root.left)
if root.right:
que.append(root.right)
return ans
def __init__(self):
self.ans = 0
def maxDepth_bu(self, root):
depth = 1
def find_maxdepth(root, depth):
if root is None:
return
self.ans = max(self.ans, depth)
find_maxdepth(root.left, depth + 1)
find_maxdepth(root.right, depth + 1)
find_maxdepth(root, depth)
return self.ans
def maxDepth(self, root):
def helper(node):
if not node:
return 0
depth = max(helper(node.left), helper(node.right)) + 1
return depth
return helper(root) | 23.5 | 126 | 0.525931 |
class Solution:
def maxDepth_iter(self, root: Optional[TreeNode]) -> int:
if root is None:
return 0
ans = 0
que = []
que.append(root)
while que:
ans += 1
length = len(que)
for i in range(length):
root = que.pop(0)
if root.left:
que.append(root.left)
if root.right:
que.append(root.right)
return ans
def __init__(self):
self.ans = 0
def maxDepth_bu(self, root):
depth = 1
def find_maxdepth(root, depth):
if root is None:
return
self.ans = max(self.ans, depth)
find_maxdepth(root.left, depth + 1)
find_maxdepth(root.right, depth + 1)
find_maxdepth(root, depth)
return self.ans
def maxDepth(self, root):
def helper(node):
if not node:
return 0
depth = max(helper(node.left), helper(node.right)) + 1
return depth
return helper(root) | true | true |
f7f570fffa8abab0de8cd0d8ed935490299d1194 | 189 | py | Python | project_euler/#1/Python/index.py | bored-user/b7web | 80785303d0e6a9e28b7ae5bdcf7fa20b90774a85 | [
"MIT"
] | null | null | null | project_euler/#1/Python/index.py | bored-user/b7web | 80785303d0e6a9e28b7ae5bdcf7fa20b90774a85 | [
"MIT"
] | 1 | 2021-04-29T10:29:05.000Z | 2021-04-29T10:29:05.000Z | project_euler/#1/Python/index.py | bored-user/exercises | 80785303d0e6a9e28b7ae5bdcf7fa20b90774a85 | [
"MIT"
] | null | null | null | def main():
total = 0
for i in range(1, 1000):
if i % 3 == 0 or i % 5 == 0:
total += i
return total
if (__name__ == '__main__'):
print(main()) | 17.181818 | 36 | 0.439153 | def main():
total = 0
for i in range(1, 1000):
if i % 3 == 0 or i % 5 == 0:
total += i
return total
if (__name__ == '__main__'):
print(main()) | true | true |
f7f5719b934e772adc8f4dc7311c47585c1c45f2 | 3,499 | py | Python | itchatmp/server.py | yf-ftd/itchatmp | 3c554f4f3e175a66f51edf4b8ff7982d0af378a0 | [
"MIT"
] | 1,504 | 2016-10-06T05:40:59.000Z | 2022-03-18T02:46:48.000Z | itchatmp/server.py | yf-ftd/itchatmp | 3c554f4f3e175a66f51edf4b8ff7982d0af378a0 | [
"MIT"
] | 51 | 2016-12-11T14:21:33.000Z | 2020-01-16T09:00:40.000Z | itchatmp/server.py | yf-ftd/itchatmp | 3c554f4f3e175a66f51edf4b8ff7982d0af378a0 | [
"MIT"
] | 370 | 2016-10-15T02:22:58.000Z | 2022-03-14T08:50:13.000Z | import os, logging
from base64 import b64decode
import tornado
from .content import NORMAL
from .controllers import (
Application, Chat, Common, CustomerService,
Menu, Messages, Oauth2, Statistics,
TemplateMsgs, Users, Utils, Wrapped)
from .components import load_register
from .exceptions import ParameterError
from .models.common import TestStorage
logger = logging.getLogger('itchatmp')
class WechatConfig(object):
''' config storing class
* if copId is set, appId will be ignored
'''
def __init__(self, token='', copId='', appId='', appSecret='',
encryptMode=NORMAL, encodingAesKey=''):
self.token = token
self.copId, self.appId, self.appSecret = copId, appId, appSecret
self.encryptMode = encryptMode
self.encodingAesKey = encodingAesKey
try:
self._encodingAesKey = b64decode(
self.encodingAesKey.encode('utf8') + b'=')
except:
if self.encryptMode == SAFE:
raise ParameterError('Wrong AES Key format')
else:
self._encodingAesKey = ''
def verify(self):
return True
class WechatServer(object):
''' Wechat server class '''
def __init__(self, config, atStorage, userStorage,
filterRequest=False, threadPoolNumber=None):
# init configurations
self.config = config
self.atStorage = atStorage or TestStorage()
self.userStorage = userStorage
self.filterRequest = filterRequest
self.threadPoolNumber = threadPoolNumber or ((None
if not hasattr(os, 'cpu_count') else os.cpu_count()) or 1) * 5
try:
self.ioLoop = tornado.ioloop.IOLoop.current()
except:
self.ioLoop = None
self.isWsgi = True
self.debug = True
self._replyFnDict = {}
# init apis
self.application = Application(self)
self.chat = Chat(self)
self.common = Common(self)
self.customerservice = CustomerService(self)
self.menu = Menu(self)
self.messages = Messages(self)
self.oauth2 = Oauth2(self)
self.statistics = Statistics(self)
self.templatemsgs = TemplateMsgs(self)
self.users = Users(self)
self.utils = Utils(self)
self.wrapped = Wrapped(self)
def update_config(self, config=None, atStorage=None, userStorage=None,
filterRequest=None, threadPoolNumber=None):
''' it is defined in components/register '''
raise NotImplementedError()
def run(self, isWsgi=False, debug=True, port=80):
''' it is defined in components/register '''
raise NotImplementedError()
def msg_register(self, msgType):
''' it is defined in components/register '''
raise NotImplementedError()
def upload(self, fileType, fileDir, additionalDict={}, permanent=False):
return self.messages.upload(fileType, fileDir, additionalDict, permanent)
def send(self, msg, toUserName, mediaId=None):
return self.wrapped.send(msg, toUserName, mediaId)
def filter_request(self, request):
''' this is not open for calling '''
return self.common.filter_request(request)
def access_token(self, fn):
return self.common.access_token(fn)
def clear_quota(self):
return self.common.clear_quota()
load_register(WechatServer)
| 38.032609 | 82 | 0.627894 | import os, logging
from base64 import b64decode
import tornado
from .content import NORMAL
from .controllers import (
Application, Chat, Common, CustomerService,
Menu, Messages, Oauth2, Statistics,
TemplateMsgs, Users, Utils, Wrapped)
from .components import load_register
from .exceptions import ParameterError
from .models.common import TestStorage
logger = logging.getLogger('itchatmp')
class WechatConfig(object):
def __init__(self, token='', copId='', appId='', appSecret='',
encryptMode=NORMAL, encodingAesKey=''):
self.token = token
self.copId, self.appId, self.appSecret = copId, appId, appSecret
self.encryptMode = encryptMode
self.encodingAesKey = encodingAesKey
try:
self._encodingAesKey = b64decode(
self.encodingAesKey.encode('utf8') + b'=')
except:
if self.encryptMode == SAFE:
raise ParameterError('Wrong AES Key format')
else:
self._encodingAesKey = ''
def verify(self):
return True
class WechatServer(object):
def __init__(self, config, atStorage, userStorage,
filterRequest=False, threadPoolNumber=None):
self.config = config
self.atStorage = atStorage or TestStorage()
self.userStorage = userStorage
self.filterRequest = filterRequest
self.threadPoolNumber = threadPoolNumber or ((None
if not hasattr(os, 'cpu_count') else os.cpu_count()) or 1) * 5
try:
self.ioLoop = tornado.ioloop.IOLoop.current()
except:
self.ioLoop = None
self.isWsgi = True
self.debug = True
self._replyFnDict = {}
self.application = Application(self)
self.chat = Chat(self)
self.common = Common(self)
self.customerservice = CustomerService(self)
self.menu = Menu(self)
self.messages = Messages(self)
self.oauth2 = Oauth2(self)
self.statistics = Statistics(self)
self.templatemsgs = TemplateMsgs(self)
self.users = Users(self)
self.utils = Utils(self)
self.wrapped = Wrapped(self)
def update_config(self, config=None, atStorage=None, userStorage=None,
filterRequest=None, threadPoolNumber=None):
raise NotImplementedError()
def run(self, isWsgi=False, debug=True, port=80):
raise NotImplementedError()
def msg_register(self, msgType):
raise NotImplementedError()
def upload(self, fileType, fileDir, additionalDict={}, permanent=False):
return self.messages.upload(fileType, fileDir, additionalDict, permanent)
def send(self, msg, toUserName, mediaId=None):
return self.wrapped.send(msg, toUserName, mediaId)
def filter_request(self, request):
return self.common.filter_request(request)
def access_token(self, fn):
return self.common.access_token(fn)
def clear_quota(self):
return self.common.clear_quota()
load_register(WechatServer)
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.